chore: add local pre-CI patch report generation for backend and frontend coverage

- Implemented a new script `local-patch-report.sh` to generate a local patch report.
- The report computes patch coverage based on changes from the current branch against `origin/main`.
- Integrated backend and frontend coverage inputs, producing both Markdown and JSON output artifacts.
- Updated existing frontend coverage script to validate the presence of LCOV coverage file.
- Added tests for coverage computation and parsing of unified diffs for changed lines.
- Enhanced error handling and validation for coverage inputs and baseline references.
This commit is contained in:
GitHub Actions
2026-02-17 13:11:29 +00:00
parent 7c82f5ad0d
commit aefbc5eee8
9 changed files with 1451 additions and 192 deletions

55
.vscode/tasks.json vendored
View File

@@ -125,6 +125,61 @@
"group": "test",
"problemMatcher": []
},
{
"label": "Test: Local Patch Report",
"type": "shell",
"command": "bash scripts/local-patch-report.sh",
"group": "test",
"problemMatcher": []
},
{
"label": "Test: Coverage Inputs for Local Patch Report",
"type": "shell",
"dependsOn": [
"Test: Backend with Coverage",
"Test: Frontend Coverage (Vitest)"
],
"dependsOrder": "sequence",
"command": "echo 'Coverage inputs for local patch report complete'",
"group": "test",
"problemMatcher": []
},
{
"label": "Test: Backend DoD + Local Patch Report",
"type": "shell",
"dependsOn": [
"Test: Backend with Coverage",
"Test: Local Patch Report"
],
"dependsOrder": "sequence",
"command": "echo 'Backend DoD + local patch report complete'",
"group": "test",
"problemMatcher": []
},
{
"label": "Test: Frontend DoD + Local Patch Report",
"type": "shell",
"dependsOn": [
"Test: Frontend Coverage (Vitest)",
"Test: Local Patch Report"
],
"dependsOrder": "sequence",
"command": "echo 'Frontend DoD + local patch report complete'",
"group": "test",
"problemMatcher": []
},
{
"label": "Test: Full DoD Unit + Local Patch Report",
"type": "shell",
"dependsOn": [
"Test: Coverage Inputs for Local Patch Report",
"Test: Local Patch Report"
],
"dependsOrder": "sequence",
"command": "echo 'Full DoD + local patch report complete'",
"group": "test",
"problemMatcher": []
},
{
"label": "Test: E2E Playwright (FireFox)",
"type": "shell",

View File

@@ -18,6 +18,7 @@ help:
@echo " dev - Run both backend and frontend in dev mode (requires tmux)"
@echo " go-check - Verify backend build readiness (runs scripts/check_go_build.sh)"
@echo " gopls-logs - Collect gopls diagnostics (runs scripts/gopls_collect.sh)"
@echo " local-patch-report - Generate local patch coverage report"
@echo ""
@echo "Security targets:"
@echo " security-scan - Quick security scan (govulncheck on Go deps)"
@@ -136,6 +137,9 @@ go-check:
gopls-logs:
./scripts/gopls_collect.sh
local-patch-report:
bash scripts/local-patch-report.sh
# Security scanning targets
security-scan:
@echo "Running security scan (govulncheck)..."

View File

@@ -0,0 +1,269 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/Wikid82/charon/backend/internal/patchreport"
)
type thresholdJSON struct {
Overall float64 `json:"overall_patch_coverage_min"`
Backend float64 `json:"backend_patch_coverage_min"`
Frontend float64 `json:"frontend_patch_coverage_min"`
}
type thresholdSourcesJSON struct {
Overall string `json:"overall"`
Backend string `json:"backend"`
Frontend string `json:"frontend"`
}
type artifactsJSON struct {
Markdown string `json:"markdown"`
JSON string `json:"json"`
}
type reportJSON struct {
Baseline string `json:"baseline"`
GeneratedAt string `json:"generated_at"`
Mode string `json:"mode"`
Thresholds thresholdJSON `json:"thresholds"`
ThresholdSources thresholdSourcesJSON `json:"threshold_sources"`
Overall patchreport.ScopeCoverage `json:"overall"`
Backend patchreport.ScopeCoverage `json:"backend"`
Frontend patchreport.ScopeCoverage `json:"frontend"`
Warnings []string `json:"warnings,omitempty"`
Artifacts artifactsJSON `json:"artifacts"`
}
func main() {
repoRootFlag := flag.String("repo-root", ".", "Repository root path")
baselineFlag := flag.String("baseline", "origin/main...HEAD", "Git diff baseline")
backendCoverageFlag := flag.String("backend-coverage", "backend/coverage.txt", "Backend Go coverage profile")
frontendCoverageFlag := flag.String("frontend-coverage", "frontend/coverage/lcov.info", "Frontend LCOV coverage report")
jsonOutFlag := flag.String("json-out", "test-results/local-patch-report.json", "Path to JSON output report")
mdOutFlag := flag.String("md-out", "test-results/local-patch-report.md", "Path to markdown output report")
flag.Parse()
repoRoot, err := filepath.Abs(*repoRootFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "error resolving repo root: %v\n", err)
os.Exit(1)
}
backendCoveragePath := resolvePath(repoRoot, *backendCoverageFlag)
frontendCoveragePath := resolvePath(repoRoot, *frontendCoverageFlag)
jsonOutPath := resolvePath(repoRoot, *jsonOutFlag)
mdOutPath := resolvePath(repoRoot, *mdOutFlag)
if err := assertFileExists(backendCoveragePath, "backend coverage file"); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := assertFileExists(frontendCoveragePath, "frontend coverage file"); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
diffContent, err := gitDiff(repoRoot, *baselineFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "error generating git diff: %v\n", err)
os.Exit(1)
}
backendChanged, frontendChanged, err := patchreport.ParseUnifiedDiffChangedLines(diffContent)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing changed lines from diff: %v\n", err)
os.Exit(1)
}
backendCoverage, err := patchreport.ParseGoCoverageProfile(backendCoveragePath)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing backend coverage: %v\n", err)
os.Exit(1)
}
frontendCoverage, err := patchreport.ParseLCOVProfile(frontendCoveragePath)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing frontend coverage: %v\n", err)
os.Exit(1)
}
overallThreshold := patchreport.ResolveThreshold("CHARON_OVERALL_PATCH_COVERAGE_MIN", 90, nil)
backendThreshold := patchreport.ResolveThreshold("CHARON_BACKEND_PATCH_COVERAGE_MIN", 85, nil)
frontendThreshold := patchreport.ResolveThreshold("CHARON_FRONTEND_PATCH_COVERAGE_MIN", 85, nil)
backendScope := patchreport.ComputeScopeCoverage(backendChanged, backendCoverage)
frontendScope := patchreport.ComputeScopeCoverage(frontendChanged, frontendCoverage)
overallScope := patchreport.MergeScopeCoverage(backendScope, frontendScope)
backendScope = patchreport.ApplyStatus(backendScope, backendThreshold.Value)
frontendScope = patchreport.ApplyStatus(frontendScope, frontendThreshold.Value)
overallScope = patchreport.ApplyStatus(overallScope, overallThreshold.Value)
warnings := patchreport.SortedWarnings([]string{
overallThreshold.Warning,
backendThreshold.Warning,
frontendThreshold.Warning,
})
if overallScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Overall patch coverage %.1f%% is below threshold %.1f%%", overallScope.PatchCoveragePct, overallThreshold.Value))
}
if backendScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Backend patch coverage %.1f%% is below threshold %.1f%%", backendScope.PatchCoveragePct, backendThreshold.Value))
}
if frontendScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Frontend patch coverage %.1f%% is below threshold %.1f%%", frontendScope.PatchCoveragePct, frontendThreshold.Value))
}
report := reportJSON{
Baseline: *baselineFlag,
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
Mode: "warn",
Thresholds: thresholdJSON{
Overall: overallThreshold.Value,
Backend: backendThreshold.Value,
Frontend: frontendThreshold.Value,
},
ThresholdSources: thresholdSourcesJSON{
Overall: overallThreshold.Source,
Backend: backendThreshold.Source,
Frontend: frontendThreshold.Source,
},
Overall: overallScope,
Backend: backendScope,
Frontend: frontendScope,
Warnings: warnings,
Artifacts: artifactsJSON{
Markdown: relOrAbs(repoRoot, mdOutPath),
JSON: relOrAbs(repoRoot, jsonOutPath),
},
}
if err := os.MkdirAll(filepath.Dir(jsonOutPath), 0o750); err != nil {
fmt.Fprintf(os.Stderr, "error creating json output directory: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll(filepath.Dir(mdOutPath), 0o750); err != nil {
fmt.Fprintf(os.Stderr, "error creating markdown output directory: %v\n", err)
os.Exit(1)
}
if err := writeJSON(jsonOutPath, report); err != nil {
fmt.Fprintf(os.Stderr, "error writing json report: %v\n", err)
os.Exit(1)
}
if err := writeMarkdown(mdOutPath, report, relOrAbs(repoRoot, backendCoveragePath), relOrAbs(repoRoot, frontendCoveragePath)); err != nil {
fmt.Fprintf(os.Stderr, "error writing markdown report: %v\n", err)
os.Exit(1)
}
fmt.Printf("Local patch report generated (mode=%s)\n", report.Mode)
fmt.Printf("JSON: %s\n", relOrAbs(repoRoot, jsonOutPath))
fmt.Printf("Markdown: %s\n", relOrAbs(repoRoot, mdOutPath))
for _, warning := range warnings {
fmt.Printf("WARN: %s\n", warning)
}
}
func resolvePath(repoRoot, configured string) string {
if filepath.IsAbs(configured) {
return configured
}
return filepath.Join(repoRoot, configured)
}
func relOrAbs(repoRoot, path string) string {
rel, err := filepath.Rel(repoRoot, path)
if err != nil {
return filepath.ToSlash(path)
}
return filepath.ToSlash(rel)
}
func assertFileExists(path, label string) error {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("missing %s at %s: %w", label, path, err)
}
if info.IsDir() {
return fmt.Errorf("expected %s to be a file but found directory: %s", label, path)
}
return nil
}
func gitDiff(repoRoot, baseline string) (string, error) {
cmd := exec.Command("git", "-C", repoRoot, "diff", "--unified=0", baseline)
output, err := cmd.CombinedOutput()
if err != nil {
return "", fmt.Errorf("git diff %s failed: %w (%s)", baseline, err, strings.TrimSpace(string(output)))
}
return string(output), nil
}
func writeJSON(path string, report reportJSON) error {
encoded, err := json.MarshalIndent(report, "", " ")
if err != nil {
return fmt.Errorf("marshal report json: %w", err)
}
encoded = append(encoded, '\n')
if err := os.WriteFile(path, encoded, 0o600); err != nil {
return fmt.Errorf("write report json file: %w", err)
}
return nil
}
func writeMarkdown(path string, report reportJSON, backendCoveragePath, frontendCoveragePath string) error {
var builder strings.Builder
builder.WriteString("# Local Patch Coverage Report\n\n")
builder.WriteString("## Metadata\n\n")
builder.WriteString(fmt.Sprintf("- Generated: %s\n", report.GeneratedAt))
builder.WriteString(fmt.Sprintf("- Baseline: `%s`\n", report.Baseline))
builder.WriteString(fmt.Sprintf("- Mode: `%s`\n\n", report.Mode))
builder.WriteString("## Inputs\n\n")
builder.WriteString(fmt.Sprintf("- Backend coverage: `%s`\n", backendCoveragePath))
builder.WriteString(fmt.Sprintf("- Frontend coverage: `%s`\n\n", frontendCoveragePath))
builder.WriteString("## Resolved Thresholds\n\n")
builder.WriteString("| Scope | Minimum (%) | Source |\n")
builder.WriteString("|---|---:|---|\n")
builder.WriteString(fmt.Sprintf("| Overall | %.1f | %s |\n", report.Thresholds.Overall, report.ThresholdSources.Overall))
builder.WriteString(fmt.Sprintf("| Backend | %.1f | %s |\n", report.Thresholds.Backend, report.ThresholdSources.Backend))
builder.WriteString(fmt.Sprintf("| Frontend | %.1f | %s |\n\n", report.Thresholds.Frontend, report.ThresholdSources.Frontend))
builder.WriteString("## Coverage Summary\n\n")
builder.WriteString("| Scope | Changed Lines | Covered Lines | Patch Coverage (%) | Status |\n")
builder.WriteString("|---|---:|---:|---:|---|\n")
builder.WriteString(scopeRow("Overall", report.Overall))
builder.WriteString(scopeRow("Backend", report.Backend))
builder.WriteString(scopeRow("Frontend", report.Frontend))
builder.WriteString("\n")
if len(report.Warnings) > 0 {
builder.WriteString("## Warnings\n\n")
for _, warning := range report.Warnings {
builder.WriteString(fmt.Sprintf("- %s\n", warning))
}
builder.WriteString("\n")
}
builder.WriteString("## Artifacts\n\n")
builder.WriteString(fmt.Sprintf("- Markdown: `%s`\n", report.Artifacts.Markdown))
builder.WriteString(fmt.Sprintf("- JSON: `%s`\n", report.Artifacts.JSON))
if err := os.WriteFile(path, []byte(builder.String()), 0o600); err != nil {
return fmt.Errorf("write markdown file: %w", err)
}
return nil
}
func scopeRow(name string, scope patchreport.ScopeCoverage) string {
return fmt.Sprintf("| %s | %d | %d | %.1f | %s |\n", name, scope.ChangedLines, scope.CoveredLines, scope.PatchCoveragePct, scope.Status)
}

View File

@@ -10,16 +10,18 @@ import (
)
func TestLoad(t *testing.T) {
// Save original env vars
originalEnv := os.Getenv("CPM_ENV")
defer func() { _ = os.Setenv("CPM_ENV", originalEnv) }()
// Explicitly isolate CHARON_* to validate CPM_* fallback behavior
t.Setenv("CHARON_ENV", "")
t.Setenv("CHARON_DB_PATH", "")
t.Setenv("CHARON_CADDY_CONFIG_DIR", "")
t.Setenv("CHARON_IMPORT_DIR", "")
// Set test env vars
_ = os.Setenv("CPM_ENV", "test")
t.Setenv("CPM_ENV", "test")
tempDir := t.TempDir()
_ = os.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "test.db"))
_ = os.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
_ = os.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports"))
t.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "test.db"))
t.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports"))
cfg, err := Load()
require.NoError(t, err)
@@ -33,13 +35,18 @@ func TestLoad(t *testing.T) {
func TestLoad_Defaults(t *testing.T) {
// Clear env vars to test defaults
_ = os.Unsetenv("CPM_ENV")
_ = os.Unsetenv("CPM_HTTP_PORT")
t.Setenv("CPM_ENV", "")
t.Setenv("CPM_HTTP_PORT", "")
t.Setenv("CHARON_ENV", "")
t.Setenv("CHARON_HTTP_PORT", "")
t.Setenv("CHARON_DB_PATH", "")
t.Setenv("CHARON_CADDY_CONFIG_DIR", "")
t.Setenv("CHARON_IMPORT_DIR", "")
// We need to set paths to a temp dir to avoid creating real dirs in test
tempDir := t.TempDir()
_ = os.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "default.db"))
_ = os.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy_default"))
_ = os.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports_default"))
t.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "default.db"))
t.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy_default"))
t.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports_default"))
cfg, err := Load()
require.NoError(t, err)
@@ -53,8 +60,8 @@ func TestLoad_CharonPrefersOverCPM(t *testing.T) {
tempDir := t.TempDir()
charonDB := filepath.Join(tempDir, "charon.db")
cpmDB := filepath.Join(tempDir, "cpm.db")
_ = os.Setenv("CHARON_DB_PATH", charonDB)
_ = os.Setenv("CPM_DB_PATH", cpmDB)
t.Setenv("CHARON_DB_PATH", charonDB)
t.Setenv("CPM_DB_PATH", cpmDB)
cfg, err := Load()
require.NoError(t, err)
@@ -68,22 +75,32 @@ func TestLoad_Error(t *testing.T) {
require.NoError(t, err)
_ = f.Close()
// Ensure CHARON_* precedence cannot bypass this test's CPM_* setup under shuffled runs
t.Setenv("CHARON_DB_PATH", "")
t.Setenv("CHARON_CADDY_CONFIG_DIR", "")
t.Setenv("CHARON_IMPORT_DIR", "")
// Case 1: CaddyConfigDir is a file
_ = os.Setenv("CPM_CADDY_CONFIG_DIR", filePath)
t.Setenv("CPM_CADDY_CONFIG_DIR", filePath)
// Set other paths to valid locations to isolate the error
_ = os.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "db", "test.db"))
_ = os.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports"))
t.Setenv("CPM_DB_PATH", filepath.Join(tempDir, "db", "test.db"))
t.Setenv("CPM_IMPORT_DIR", filepath.Join(tempDir, "imports"))
t.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "db", "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filePath)
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
_, err = Load()
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), "ensure caddy config directory")
// Case 2: ImportDir is a file
_ = os.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
_ = os.Setenv("CPM_IMPORT_DIR", filePath)
t.Setenv("CPM_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CPM_IMPORT_DIR", filePath)
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filePath)
_, err = Load()
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), "ensure import directory")
}
@@ -93,44 +110,36 @@ func TestGetEnvAny(t *testing.T) {
assert.Equal(t, "fallback_value", result)
// Test with first key set
_ = os.Setenv("TEST_KEY1", "value1")
defer func() { _ = os.Unsetenv("TEST_KEY1") }()
t.Setenv("TEST_KEY1", "value1")
result = getEnvAny("fallback", "TEST_KEY1", "TEST_KEY2")
assert.Equal(t, "value1", result)
// Test with second key set (first takes precedence)
_ = os.Setenv("TEST_KEY2", "value2")
defer func() { _ = os.Unsetenv("TEST_KEY2") }()
t.Setenv("TEST_KEY2", "value2")
result = getEnvAny("fallback", "TEST_KEY1", "TEST_KEY2")
assert.Equal(t, "value1", result)
// Test with only second key set
_ = os.Unsetenv("TEST_KEY1")
t.Setenv("TEST_KEY1", "")
result = getEnvAny("fallback", "TEST_KEY1", "TEST_KEY2")
assert.Equal(t, "value2", result)
// Test with empty string value (should still be considered set)
_ = os.Setenv("TEST_KEY3", "")
defer func() { _ = os.Unsetenv("TEST_KEY3") }()
t.Setenv("TEST_KEY3", "")
result = getEnvAny("fallback", "TEST_KEY3")
assert.Equal(t, "fallback", result) // Empty strings are treated as not set
}
func TestLoad_SecurityConfig(t *testing.T) {
tempDir := t.TempDir()
_ = os.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
_ = os.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
_ = os.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
t.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
// Test security settings
_ = os.Setenv("CERBERUS_SECURITY_CROWDSEC_MODE", "live")
_ = os.Setenv("CERBERUS_SECURITY_WAF_MODE", "enabled")
_ = os.Setenv("CERBERUS_SECURITY_CERBERUS_ENABLED", "true")
defer func() {
_ = os.Unsetenv("CERBERUS_SECURITY_CROWDSEC_MODE")
_ = os.Unsetenv("CERBERUS_SECURITY_WAF_MODE")
_ = os.Unsetenv("CERBERUS_SECURITY_CERBERUS_ENABLED")
}()
t.Setenv("CERBERUS_SECURITY_CROWDSEC_MODE", "live")
t.Setenv("CERBERUS_SECURITY_WAF_MODE", "enabled")
t.Setenv("CERBERUS_SECURITY_CERBERUS_ENABLED", "true")
cfg, err := Load()
require.NoError(t, err)
@@ -150,14 +159,9 @@ func TestLoad_DatabasePathError(t *testing.T) {
_ = f.Close()
// Try to use a path that requires creating a dir inside the blocking file
_ = os.Setenv("CHARON_DB_PATH", filepath.Join(blockingFile, "data", "test.db"))
_ = os.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
_ = os.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
defer func() {
_ = os.Unsetenv("CHARON_DB_PATH")
_ = os.Unsetenv("CHARON_CADDY_CONFIG_DIR")
_ = os.Unsetenv("CHARON_IMPORT_DIR")
}()
t.Setenv("CHARON_DB_PATH", filepath.Join(blockingFile, "data", "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
_, err = Load()
assert.Error(t, err)
@@ -166,20 +170,19 @@ func TestLoad_DatabasePathError(t *testing.T) {
func TestLoad_ACMEStaging(t *testing.T) {
tempDir := t.TempDir()
_ = os.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
_ = os.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
_ = os.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
t.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
// Test ACME staging enabled
_ = os.Setenv("CHARON_ACME_STAGING", "true")
defer func() { _ = os.Unsetenv("CHARON_ACME_STAGING") }()
t.Setenv("CHARON_ACME_STAGING", "true")
cfg, err := Load()
require.NoError(t, err)
assert.True(t, cfg.ACMEStaging)
// Test ACME staging disabled
require.NoError(t, os.Setenv("CHARON_ACME_STAGING", "false"))
t.Setenv("CHARON_ACME_STAGING", "false")
cfg, err = Load()
require.NoError(t, err)
assert.False(t, cfg.ACMEStaging)
@@ -187,20 +190,19 @@ func TestLoad_ACMEStaging(t *testing.T) {
func TestLoad_DebugMode(t *testing.T) {
tempDir := t.TempDir()
require.NoError(t, os.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db")))
require.NoError(t, os.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy")))
require.NoError(t, os.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports")))
t.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
// Test debug mode enabled
require.NoError(t, os.Setenv("CHARON_DEBUG", "true"))
defer func() { require.NoError(t, os.Unsetenv("CHARON_DEBUG")) }()
t.Setenv("CHARON_DEBUG", "true")
cfg, err := Load()
require.NoError(t, err)
assert.True(t, cfg.Debug)
// Test debug mode disabled
require.NoError(t, os.Setenv("CHARON_DEBUG", "false"))
t.Setenv("CHARON_DEBUG", "false")
cfg, err = Load()
require.NoError(t, err)
assert.False(t, cfg.Debug)
@@ -208,9 +210,9 @@ func TestLoad_DebugMode(t *testing.T) {
func TestLoad_EmergencyConfig(t *testing.T) {
tempDir := t.TempDir()
require.NoError(t, os.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db")))
require.NoError(t, os.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy")))
require.NoError(t, os.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports")))
t.Setenv("CHARON_DB_PATH", filepath.Join(tempDir, "test.db"))
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tempDir, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tempDir, "imports"))
// Test emergency config defaults
cfg, err := Load()
@@ -221,16 +223,10 @@ func TestLoad_EmergencyConfig(t *testing.T) {
assert.Equal(t, "", cfg.Emergency.BasicAuthPassword, "Basic auth password should be empty by default")
// Test emergency config with custom values
_ = os.Setenv("CHARON_EMERGENCY_SERVER_ENABLED", "true")
_ = os.Setenv("CHARON_EMERGENCY_BIND", "0.0.0.0:2020")
_ = os.Setenv("CHARON_EMERGENCY_USERNAME", "admin")
_ = os.Setenv("CHARON_EMERGENCY_PASSWORD", "testpass")
defer func() {
_ = os.Unsetenv("CHARON_EMERGENCY_SERVER_ENABLED")
_ = os.Unsetenv("CHARON_EMERGENCY_BIND")
_ = os.Unsetenv("CHARON_EMERGENCY_USERNAME")
_ = os.Unsetenv("CHARON_EMERGENCY_PASSWORD")
}()
t.Setenv("CHARON_EMERGENCY_SERVER_ENABLED", "true")
t.Setenv("CHARON_EMERGENCY_BIND", "0.0.0.0:2020")
t.Setenv("CHARON_EMERGENCY_USERNAME", "admin")
t.Setenv("CHARON_EMERGENCY_PASSWORD", "testpass")
cfg, err = Load()
require.NoError(t, err)

View File

@@ -0,0 +1,482 @@
package patchreport
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
)
type LineSet map[int]struct{}
type FileLineSet map[string]LineSet
type CoverageData struct {
Executable FileLineSet
Covered FileLineSet
}
type ScopeCoverage struct {
ChangedLines int `json:"changed_lines"`
CoveredLines int `json:"covered_lines"`
PatchCoveragePct float64 `json:"patch_coverage_pct"`
Status string `json:"status"`
}
type ThresholdResolution struct {
Value float64
Source string
Warning string
}
var hunkPattern = regexp.MustCompile(`^@@ -\d+(?:,\d+)? \+(\d+)(?:,\d+)? @@`)
const maxScannerTokenSize = 2 * 1024 * 1024
func newScannerWithLargeBuffer(input *strings.Reader) *bufio.Scanner {
scanner := bufio.NewScanner(input)
scanner.Buffer(make([]byte, 0, 64*1024), maxScannerTokenSize)
return scanner
}
func newFileScannerWithLargeBuffer(file *os.File) *bufio.Scanner {
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), maxScannerTokenSize)
return scanner
}
func ResolveThreshold(envName string, defaultValue float64, lookup func(string) (string, bool)) ThresholdResolution {
if lookup == nil {
lookup = os.LookupEnv
}
raw, ok := lookup(envName)
if !ok {
return ThresholdResolution{Value: defaultValue, Source: "default"}
}
raw = strings.TrimSpace(raw)
value, err := strconv.ParseFloat(raw, 64)
if err != nil || value < 0 || value > 100 {
return ThresholdResolution{
Value: defaultValue,
Source: "default",
Warning: fmt.Sprintf("Ignoring invalid %s=%q; using default %.1f", envName, raw, defaultValue),
}
}
return ThresholdResolution{Value: value, Source: "env"}
}
func ParseUnifiedDiffChangedLines(diffContent string) (FileLineSet, FileLineSet, error) {
backendChanged := make(FileLineSet)
frontendChanged := make(FileLineSet)
var currentFile string
currentScope := ""
currentNewLine := 0
inHunk := false
scanner := newScannerWithLargeBuffer(strings.NewReader(diffContent))
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "+++") {
currentFile = ""
currentScope = ""
inHunk = false
newFile := strings.TrimSpace(strings.TrimPrefix(line, "+++"))
if newFile == "/dev/null" {
continue
}
newFile = strings.TrimPrefix(newFile, "b/")
newFile = normalizeRepoPath(newFile)
if strings.HasPrefix(newFile, "backend/") {
currentFile = newFile
currentScope = "backend"
} else if strings.HasPrefix(newFile, "frontend/") {
currentFile = newFile
currentScope = "frontend"
}
continue
}
if matches := hunkPattern.FindStringSubmatch(line); matches != nil {
startLine, err := strconv.Atoi(matches[1])
if err != nil {
return nil, nil, fmt.Errorf("parse hunk start line: %w", err)
}
currentNewLine = startLine
inHunk = true
continue
}
if !inHunk || currentFile == "" || currentScope == "" || line == "" {
continue
}
switch line[0] {
case '+':
if strings.HasPrefix(line, "+++") {
continue
}
switch currentScope {
case "backend":
addLine(backendChanged, currentFile, currentNewLine)
case "frontend":
addLine(frontendChanged, currentFile, currentNewLine)
}
currentNewLine++
case '-':
case ' ':
currentNewLine++
case '\\':
default:
}
}
if err := scanner.Err(); err != nil {
return nil, nil, fmt.Errorf("scan diff content: %w", err)
}
return backendChanged, frontendChanged, nil
}
func ParseGoCoverageProfile(profilePath string) (data CoverageData, err error) {
validatedPath, err := validateReadablePath(profilePath)
if err != nil {
return CoverageData{}, fmt.Errorf("validate go coverage profile path: %w", err)
}
// #nosec G304 -- validatedPath is cleaned and resolved to an absolute path by validateReadablePath.
file, err := os.Open(validatedPath)
if err != nil {
return CoverageData{}, fmt.Errorf("open go coverage profile: %w", err)
}
defer func() {
if closeErr := file.Close(); closeErr != nil && err == nil {
err = fmt.Errorf("close go coverage profile: %w", closeErr)
}
}()
data = CoverageData{
Executable: make(FileLineSet),
Covered: make(FileLineSet),
}
scanner := newFileScannerWithLargeBuffer(file)
firstLine := true
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
if firstLine {
firstLine = false
if strings.HasPrefix(line, "mode:") {
continue
}
}
fields := strings.Fields(line)
if len(fields) != 3 {
continue
}
count, err := strconv.Atoi(fields[2])
if err != nil {
continue
}
filePart, startLine, endLine, err := parseCoverageRange(fields[0])
if err != nil {
continue
}
normalizedFile := normalizeGoCoveragePath(filePart)
if normalizedFile == "" {
continue
}
for lineNo := startLine; lineNo <= endLine; lineNo++ {
addLine(data.Executable, normalizedFile, lineNo)
if count > 0 {
addLine(data.Covered, normalizedFile, lineNo)
}
}
}
if scanErr := scanner.Err(); scanErr != nil {
return CoverageData{}, fmt.Errorf("scan go coverage profile: %w", scanErr)
}
return data, nil
}
func ParseLCOVProfile(lcovPath string) (data CoverageData, err error) {
validatedPath, err := validateReadablePath(lcovPath)
if err != nil {
return CoverageData{}, fmt.Errorf("validate lcov profile path: %w", err)
}
// #nosec G304 -- validatedPath is cleaned and resolved to an absolute path by validateReadablePath.
file, err := os.Open(validatedPath)
if err != nil {
return CoverageData{}, fmt.Errorf("open lcov profile: %w", err)
}
defer func() {
if closeErr := file.Close(); closeErr != nil && err == nil {
err = fmt.Errorf("close lcov profile: %w", closeErr)
}
}()
data = CoverageData{
Executable: make(FileLineSet),
Covered: make(FileLineSet),
}
currentFiles := make([]string, 0, 2)
scanner := newFileScannerWithLargeBuffer(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
switch {
case strings.HasPrefix(line, "SF:"):
sourceFile := strings.TrimSpace(strings.TrimPrefix(line, "SF:"))
currentFiles = normalizeFrontendCoveragePaths(sourceFile)
case strings.HasPrefix(line, "DA:"):
if len(currentFiles) == 0 {
continue
}
parts := strings.Split(strings.TrimPrefix(line, "DA:"), ",")
if len(parts) < 2 {
continue
}
lineNo, err := strconv.Atoi(strings.TrimSpace(parts[0]))
if err != nil {
continue
}
hits, err := strconv.Atoi(strings.TrimSpace(parts[1]))
if err != nil {
continue
}
for _, filePath := range currentFiles {
addLine(data.Executable, filePath, lineNo)
if hits > 0 {
addLine(data.Covered, filePath, lineNo)
}
}
case line == "end_of_record":
currentFiles = currentFiles[:0]
}
}
if scanErr := scanner.Err(); scanErr != nil {
return CoverageData{}, fmt.Errorf("scan lcov profile: %w", scanErr)
}
return data, nil
}
func ComputeScopeCoverage(changedLines FileLineSet, coverage CoverageData) ScopeCoverage {
changedCount := 0
coveredCount := 0
for filePath, lines := range changedLines {
executable, ok := coverage.Executable[filePath]
if !ok {
continue
}
coveredLines := coverage.Covered[filePath]
for lineNo := range lines {
if _, executableLine := executable[lineNo]; !executableLine {
continue
}
changedCount++
if _, isCovered := coveredLines[lineNo]; isCovered {
coveredCount++
}
}
}
pct := 100.0
if changedCount > 0 {
pct = roundToOneDecimal(float64(coveredCount) * 100 / float64(changedCount))
}
return ScopeCoverage{
ChangedLines: changedCount,
CoveredLines: coveredCount,
PatchCoveragePct: pct,
}
}
func MergeScopeCoverage(scopes ...ScopeCoverage) ScopeCoverage {
changed := 0
covered := 0
for _, scope := range scopes {
changed += scope.ChangedLines
covered += scope.CoveredLines
}
pct := 100.0
if changed > 0 {
pct = roundToOneDecimal(float64(covered) * 100 / float64(changed))
}
return ScopeCoverage{
ChangedLines: changed,
CoveredLines: covered,
PatchCoveragePct: pct,
}
}
func ApplyStatus(scope ScopeCoverage, minThreshold float64) ScopeCoverage {
scope.Status = "pass"
if scope.PatchCoveragePct < minThreshold {
scope.Status = "warn"
}
return scope
}
func SortedWarnings(warnings []string) []string {
filtered := make([]string, 0, len(warnings))
for _, warning := range warnings {
if strings.TrimSpace(warning) != "" {
filtered = append(filtered, warning)
}
}
sort.Strings(filtered)
return filtered
}
func parseCoverageRange(rangePart string) (string, int, int, error) {
pathAndRange := strings.SplitN(rangePart, ":", 2)
if len(pathAndRange) != 2 {
return "", 0, 0, fmt.Errorf("invalid range format")
}
filePart := strings.TrimSpace(pathAndRange[0])
rangeSpec := strings.TrimSpace(pathAndRange[1])
coords := strings.SplitN(rangeSpec, ",", 2)
if len(coords) != 2 {
return "", 0, 0, fmt.Errorf("invalid coordinate format")
}
startParts := strings.SplitN(coords[0], ".", 2)
endParts := strings.SplitN(coords[1], ".", 2)
if len(startParts) == 0 || len(endParts) == 0 {
return "", 0, 0, fmt.Errorf("invalid line coordinate")
}
startLine, err := strconv.Atoi(startParts[0])
if err != nil {
return "", 0, 0, fmt.Errorf("parse start line: %w", err)
}
endLine, err := strconv.Atoi(endParts[0])
if err != nil {
return "", 0, 0, fmt.Errorf("parse end line: %w", err)
}
if startLine <= 0 || endLine <= 0 || endLine < startLine {
return "", 0, 0, fmt.Errorf("invalid line range")
}
return filePart, startLine, endLine, nil
}
func normalizeRepoPath(input string) string {
cleaned := filepath.ToSlash(filepath.Clean(strings.TrimSpace(input)))
cleaned = strings.TrimPrefix(cleaned, "./")
return cleaned
}
func normalizeGoCoveragePath(input string) string {
cleaned := normalizeRepoPath(input)
if cleaned == "" {
return ""
}
if strings.HasPrefix(cleaned, "backend/") {
return cleaned
}
if idx := strings.Index(cleaned, "/backend/"); idx >= 0 {
return cleaned[idx+1:]
}
repoRelativePrefixes := []string{"cmd/", "internal/", "pkg/", "api/", "integration/", "tools/"}
for _, prefix := range repoRelativePrefixes {
if strings.HasPrefix(cleaned, prefix) {
return "backend/" + cleaned
}
}
return cleaned
}
func normalizeFrontendCoveragePaths(input string) []string {
cleaned := normalizeRepoPath(input)
if cleaned == "" {
return nil
}
seen := map[string]struct{}{}
result := make([]string, 0, 3)
add := func(value string) {
value = normalizeRepoPath(value)
if value == "" {
return
}
if _, ok := seen[value]; ok {
return
}
seen[value] = struct{}{}
result = append(result, value)
}
add(cleaned)
if idx := strings.Index(cleaned, "/frontend/"); idx >= 0 {
frontendPath := cleaned[idx+1:]
add(frontendPath)
add(strings.TrimPrefix(frontendPath, "frontend/"))
} else if strings.HasPrefix(cleaned, "frontend/") {
add(strings.TrimPrefix(cleaned, "frontend/"))
} else {
add("frontend/" + cleaned)
}
return result
}
func addLine(set FileLineSet, filePath string, lineNo int) {
if lineNo <= 0 || filePath == "" {
return
}
if _, ok := set[filePath]; !ok {
set[filePath] = make(LineSet)
}
set[filePath][lineNo] = struct{}{}
}
func roundToOneDecimal(value float64) float64 {
return float64(int(value*10+0.5)) / 10
}
func validateReadablePath(rawPath string) (string, error) {
trimmedPath := strings.TrimSpace(rawPath)
if trimmedPath == "" {
return "", fmt.Errorf("path is empty")
}
cleanedPath := filepath.Clean(trimmedPath)
absolutePath, err := filepath.Abs(cleanedPath)
if err != nil {
return "", fmt.Errorf("resolve absolute path: %w", err)
}
return absolutePath, nil
}

View File

@@ -0,0 +1,295 @@
package patchreport
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestResolveThreshold(t *testing.T) {
t.Parallel()
tests := []struct {
name string
envValue string
envSet bool
defaultValue float64
wantValue float64
wantSource string
wantWarning bool
}{
{
name: "uses default when env is absent",
envSet: false,
defaultValue: 90,
wantValue: 90,
wantSource: "default",
wantWarning: false,
},
{
name: "uses env value when valid",
envSet: true,
envValue: "87.5",
defaultValue: 85,
wantValue: 87.5,
wantSource: "env",
wantWarning: false,
},
{
name: "falls back when env is invalid",
envSet: true,
envValue: "invalid",
defaultValue: 85,
wantValue: 85,
wantSource: "default",
wantWarning: true,
},
{
name: "falls back when env is out of range",
envSet: true,
envValue: "101",
defaultValue: 85,
wantValue: 85,
wantSource: "default",
wantWarning: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
lookup := func(name string) (string, bool) {
if name != "TARGET" {
t.Fatalf("unexpected env lookup key: %s", name)
}
if !tt.envSet {
return "", false
}
return tt.envValue, true
}
resolved := ResolveThreshold("TARGET", tt.defaultValue, lookup)
if resolved.Value != tt.wantValue {
t.Fatalf("value mismatch: got %.1f want %.1f", resolved.Value, tt.wantValue)
}
if resolved.Source != tt.wantSource {
t.Fatalf("source mismatch: got %s want %s", resolved.Source, tt.wantSource)
}
hasWarning := resolved.Warning != ""
if hasWarning != tt.wantWarning {
t.Fatalf("warning mismatch: got %v want %v (warning=%q)", hasWarning, tt.wantWarning, resolved.Warning)
}
})
}
}
func TestParseUnifiedDiffChangedLines(t *testing.T) {
t.Parallel()
diff := `diff --git a/backend/internal/app.go b/backend/internal/app.go
index 1111111..2222222 100644
--- a/backend/internal/app.go
+++ b/backend/internal/app.go
@@ -10,2 +10,3 @@ func example() {
line10
-line11
+line11 changed
+line12 new
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 3333333..4444444 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -20,0 +21,2 @@ export default function App() {
+new frontend line
+another frontend line
`
backendChanged, frontendChanged, err := ParseUnifiedDiffChangedLines(diff)
if err != nil {
t.Fatalf("ParseUnifiedDiffChangedLines returned error: %v", err)
}
assertHasLines(t, backendChanged, "backend/internal/app.go", []int{11, 12})
assertHasLines(t, frontendChanged, "frontend/src/App.tsx", []int{21, 22})
}
func TestBackendChangedLineCoverageComputation(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
coverageFile := filepath.Join(tempDir, "coverage.txt")
coverageContent := `mode: atomic
github.com/Wikid82/charon/backend/internal/service.go:10.1,10.20 1 1
github.com/Wikid82/charon/backend/internal/service.go:11.1,11.20 1 0
github.com/Wikid82/charon/backend/internal/service.go:12.1,12.20 1 1
`
if err := os.WriteFile(coverageFile, []byte(coverageContent), 0o600); err != nil {
t.Fatalf("failed to write temp coverage file: %v", err)
}
coverage, err := ParseGoCoverageProfile(coverageFile)
if err != nil {
t.Fatalf("ParseGoCoverageProfile returned error: %v", err)
}
changed := FileLineSet{
"backend/internal/service.go": {10: {}, 11: {}, 15: {}},
}
scope := ComputeScopeCoverage(changed, coverage)
if scope.ChangedLines != 2 {
t.Fatalf("changed lines mismatch: got %d want 2", scope.ChangedLines)
}
if scope.CoveredLines != 1 {
t.Fatalf("covered lines mismatch: got %d want 1", scope.CoveredLines)
}
if scope.PatchCoveragePct != 50.0 {
t.Fatalf("coverage pct mismatch: got %.1f want 50.0", scope.PatchCoveragePct)
}
}
func TestFrontendChangedLineCoverageComputationFromLCOV(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
lcovFile := filepath.Join(tempDir, "lcov.info")
lcovContent := `TN:
SF:frontend/src/App.tsx
DA:10,1
DA:11,0
DA:12,1
end_of_record
`
if err := os.WriteFile(lcovFile, []byte(lcovContent), 0o600); err != nil {
t.Fatalf("failed to write temp lcov file: %v", err)
}
coverage, err := ParseLCOVProfile(lcovFile)
if err != nil {
t.Fatalf("ParseLCOVProfile returned error: %v", err)
}
changed := FileLineSet{
"frontend/src/App.tsx": {10: {}, 11: {}, 13: {}},
}
scope := ComputeScopeCoverage(changed, coverage)
if scope.ChangedLines != 2 {
t.Fatalf("changed lines mismatch: got %d want 2", scope.ChangedLines)
}
if scope.CoveredLines != 1 {
t.Fatalf("covered lines mismatch: got %d want 1", scope.CoveredLines)
}
if scope.PatchCoveragePct != 50.0 {
t.Fatalf("coverage pct mismatch: got %.1f want 50.0", scope.PatchCoveragePct)
}
status := ApplyStatus(scope, 85)
if status.Status != "warn" {
t.Fatalf("status mismatch: got %s want warn", status.Status)
}
}
func TestParseUnifiedDiffChangedLines_AllowsLongLines(t *testing.T) {
t.Parallel()
longLine := strings.Repeat("x", 128*1024)
diff := strings.Join([]string{
"diff --git a/backend/internal/app.go b/backend/internal/app.go",
"index 1111111..2222222 100644",
"--- a/backend/internal/app.go",
"+++ b/backend/internal/app.go",
"@@ -1,1 +1,2 @@",
" line1",
"+" + longLine,
}, "\n")
backendChanged, _, err := ParseUnifiedDiffChangedLines(diff)
if err != nil {
t.Fatalf("ParseUnifiedDiffChangedLines returned error for long line: %v", err)
}
assertHasLines(t, backendChanged, "backend/internal/app.go", []int{2})
}
func TestParseGoCoverageProfile_AllowsLongLines(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
coverageFile := filepath.Join(tempDir, "coverage.txt")
longSegment := strings.Repeat("a", 128*1024)
coverageContent := "mode: atomic\n" +
"github.com/Wikid82/charon/backend/internal/" + longSegment + ".go:10.1,10.20 1 1\n"
if err := os.WriteFile(coverageFile, []byte(coverageContent), 0o600); err != nil {
t.Fatalf("failed to write temp coverage file: %v", err)
}
_, err := ParseGoCoverageProfile(coverageFile)
if err != nil {
t.Fatalf("ParseGoCoverageProfile returned error for long line: %v", err)
}
}
func TestParseLCOVProfile_AllowsLongLines(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
lcovFile := filepath.Join(tempDir, "lcov.info")
longPath := strings.Repeat("a", 128*1024)
lcovContent := strings.Join([]string{
"TN:",
"SF:frontend/src/" + longPath + ".tsx",
"DA:10,1",
"end_of_record",
}, "\n")
if err := os.WriteFile(lcovFile, []byte(lcovContent), 0o600); err != nil {
t.Fatalf("failed to write temp lcov file: %v", err)
}
_, err := ParseLCOVProfile(lcovFile)
if err != nil {
t.Fatalf("ParseLCOVProfile returned error for long line: %v", err)
}
}
func assertHasLines(t *testing.T, changed FileLineSet, file string, expected []int) {
t.Helper()
lines, ok := changed[file]
if !ok {
t.Fatalf("file %s not found in changed lines", file)
}
for _, line := range expected {
if _, hasLine := lines[line]; !hasLine {
t.Fatalf("expected line %d in file %s", line, file)
}
}
}
func TestValidateReadablePath(t *testing.T) {
t.Parallel()
t.Run("returns error for empty path", func(t *testing.T) {
t.Parallel()
_, err := validateReadablePath(" ")
if err == nil {
t.Fatal("expected error for empty path")
}
})
t.Run("returns absolute cleaned path", func(t *testing.T) {
t.Parallel()
path, err := validateReadablePath("./backend/../backend/internal")
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !filepath.IsAbs(path) {
t.Fatalf("expected absolute path, got %q", path)
}
})
}

View File

@@ -1,145 +1,245 @@
# CI Encryption-Key Investigation and Remediation Plan
## Local Pre-CI Patch Report (Single Scope)
## Context
- Date: 2026-02-17
- Scope: CI failures where backend jobs report encryption key not picked up.
- In-scope files:
- `.github/workflows/quality-checks.yml`
- `.github/workflows/codecov-upload.yml`
- `scripts/go-test-coverage.sh`
- `backend/internal/crypto/rotation_service.go`
- `backend/internal/services/dns_provider_service.go`
- `backend/internal/services/credential_service.go`
Date: 2026-02-17
Scope: Add a local pre-CI patch report to Definition of Done (DoD) unit-testing flow for both backend and frontend.
## Problem Statement
CI backend tests can fail late and ambiguously when `CHARON_ENCRYPTION_KEY` is missing or malformed. The root causes are context-dependent secret availability, missing preflight validation, and drift between workflow intent and implementation.
## 1) Objective
## Research Findings
Add one executable local workflow that computes patch coverage from current branch changes and publishes a consolidated report before CI runs.
### Workflow Surface and Risks
| Workflow | Job | Key-sensitive step | Current key source | Main risk |
|---|---|---|---|---|
| `.github/workflows/quality-checks.yml` | `backend-quality` | `Run Go tests`, `Run Perf Asserts` | `${{ secrets.CHARON_ENCRYPTION_KEY_TEST }}` | Empty/malformed input not preflighted |
| `.github/workflows/codecov-upload.yml` | `backend-codecov` | `Run Go tests with coverage` | `${{ secrets.CHARON_ENCRYPTION_KEY_TEST }}` | Same key-risk as above |
The report must consume backend and frontend coverage inputs, use `origin/main...HEAD` as the patch baseline, and produce human-readable and machine-readable artifacts in `test-results/`.
### Backend Failure Surface
- `backend/internal/crypto/rotation_service.go`
- `NewRotationService(db *gorm.DB)` hard-fails if `CHARON_ENCRYPTION_KEY` is empty.
- `backend/internal/services/dns_provider_service.go`
- `NewDNSProviderService(...)` depends on `NewRotationService(...)` and can degrade to warning-based behavior when key input is bad.
- `backend/internal/services/credential_service.go`
- `NewCredentialService(...)` has the same dependency pattern.
## 2) In Scope / Out of Scope
### Script Failure Mode
- `scripts/go-test-coverage.sh` currently uses `set -euo pipefail` but does not pre-validate key shape before `go test`.
- Empty secret expressions become late runtime failures instead of deterministic preflight failures.
### In Scope
## Supervisor-Required Constraints (Preserved)
1. `pull_request_target` SHALL NOT be used for secret-bearing backend test execution on untrusted code (fork PRs and Dependabot PRs).
2. Same-repo `pull_request` and `workflow_dispatch` SHALL require `CHARON_ENCRYPTION_KEY_TEST`; missing secret SHALL fail fast (no fallback).
3. Fork PRs and Dependabot PRs SHALL use workflow-only ephemeral key fallback for backend test execution.
4. Key material SHALL NEVER be logged.
5. Resolved key SHALL be masked before any potential output path.
6. `GITHUB_ENV` propagation SHALL use safe delimiter write pattern.
7. Workflow layer SHALL own key resolution/fallback.
8. Script layer SHALL only validate and fail fast; it SHALL NOT generate fallback keys.
9. Anti-drift guard SHALL be added so trigger comments and trigger blocks remain aligned.
10. Known drift SHALL be corrected: comment in `quality-checks.yml` about `codecov-upload.yml` trigger behavior must match actual triggers.
- Local patch report generation.
- Backend + frontend DoD unit-testing integration.
- VS Code task wiring for repeatable local execution.
- Non-blocking warning policy for initial rollout.
## EARS Requirements
### Out of Scope
### Ubiquitous
- THE SYSTEM SHALL fail fast with explicit diagnostics when encryption-key input is required and unavailable or malformed.
- THE SYSTEM SHALL prevent secret-value exposure in logs, summaries, and artifacts.
- CI gate changes.
- Encryption-key or unrelated reliability/security remediation.
- Historical Codecov placeholder gates and unrelated patch-closure matrices.
### Event-driven
- WHEN workflow context is trusted (same-repo `pull_request` or `workflow_dispatch`), THE SYSTEM SHALL require `secrets.CHARON_ENCRYPTION_KEY_TEST`.
- WHEN workflow context is untrusted (fork PR or Dependabot PR), THE SYSTEM SHALL generate ephemeral key material in workflow preflight only.
- WHEN workflow context is untrusted, THE SYSTEM SHALL NOT use `pull_request_target` for secret-bearing backend tests.
## 3) Required Inputs and Baseline
### Unwanted behavior
- IF `CHARON_ENCRYPTION_KEY` is empty, non-base64, or decoded length is not 32 bytes, THEN THE SYSTEM SHALL stop before running tests.
- IF trigger comments diverge from workflow triggers, THEN THE SYSTEM SHALL fail anti-drift validation.
### Coverage Inputs
## Technical Design
- Backend coverage profile: `backend/coverage.txt`
- Frontend coverage profile: `frontend/coverage/lcov.info`
### Workflow Contract
Both backend jobs (`backend-quality`, `backend-codecov`) implement the same preflight sequence:
1. `Resolve encryption key for backend tests`
2. `Fail fast when required encryption secret is missing`
3. `Validate encryption key format`
### Diff Baseline
### Preflight Resolution Algorithm
1. Detect fork PR context via `github.event.pull_request.head.repo.fork`.
2. Detect Dependabot PR context (actor/repo metadata check).
3. Trusted context: require `secrets.CHARON_ENCRYPTION_KEY_TEST`; fail immediately if empty.
4. Untrusted context: generate ephemeral key (`openssl rand -base64 32`) in workflow only.
5. Mask resolved key via `::add-mask::`.
6. Export via delimiter-based `GITHUB_ENV` write:
- `CHARON_ENCRYPTION_KEY<<EOF`
- `<value>`
- `EOF`
- Git diff range: `origin/main...HEAD`
### Script Validation Contract
`scripts/go-test-coverage.sh` adds strict preflight validation:
- Present and non-empty.
- Base64 decodable.
- Decoded length exactly 32 bytes.
### Preconditions
Script constraints:
- SHALL NOT generate keys.
- SHALL NOT select key source.
- SHALL only validate and fail fast with deterministic error messages.
- `origin/main` is fetchable locally.
- Backend and frontend coverage artifacts exist before report generation.
### Error Handling Matrix
| Condition | Detection layer | Outcome |
|---|---|---|
| Trusted context + missing secret | Workflow preflight | Immediate failure with explicit message |
| Untrusted context + no secret access | Workflow preflight | Ephemeral key path (masked) |
| Malformed key | Script preflight | Immediate failure before `go test` |
| Trigger/comment drift | Workflow consistency guard | CI failure until synchronized |
## 4) Required Output Artifacts
## Implementation Plan
- Markdown report: `test-results/local-patch-report.md`
- JSON report: `test-results/local-patch-report.json`
### Phase 1: Workflow Hardening
- Update `.github/workflows/quality-checks.yml` and `.github/workflows/codecov-upload.yml` with identical key-resolution and key-validation steps.
- Enforce trusted-context fail-fast and untrusted-context fallback boundaries.
- Add explicit prohibition notes and controls preventing `pull_request_target` migration for secret-bearing tests.
Both artifacts are mandatory per run. Missing either artifact is a failed local report run.
### Phase 2: Script Preflight Hardening
- Update `scripts/go-test-coverage.sh` to validate key presence/format/length before tests.
- Preserve existing coverage behavior; only harden pre-test guard path.
## 5) Initial Policy (Rollout)
### Phase 3: Anti-Drift Enforcement
- Define one canonical backend-key-bootstrap contract path.
- Add consistency check that enforces trigger/comment parity between `quality-checks.yml` and `codecov-upload.yml`.
- Fix known push-only comment mismatch in `quality-checks.yml`.
### Initial Policy (Non-Blocking)
## Validation Plan
Run these scenarios:
1. Same-repo PR with valid secret.
2. Same-repo PR with missing secret (must fail fast).
3. Same-repo PR with malformed secret (must fail fast before tests).
4. Fork PR with no secret access (must use ephemeral fallback).
5. Dependabot PR with no secret access (must use ephemeral fallback, no `pull_request_target`).
6. `workflow_dispatch` with valid secret.
- Local patch report does not fail DoD on low patch coverage during initial rollout.
- Local runner emits warnings (stdout + markdown/json status fields) when thresholds are not met.
- DoD requires the report to run and artifacts to exist, even in warning mode.
Expected results:
- No late ambiguous key-init failures.
- No secret material logged.
- Deterministic and attributable failure messages.
- Trigger docs and trigger config remain synchronized.
### Threshold Defaults and Source Precedence
## Acceptance Criteria
- Backend jobs in `quality-checks.yml` and `codecov-upload.yml` no longer fail ambiguously on encryption-key pickup.
- Trusted contexts fail fast if `CHARON_ENCRYPTION_KEY_TEST` is missing.
- Untrusted contexts use workflow-only ephemeral fallback.
- `scripts/go-test-coverage.sh` enforces deterministic key preflight checks.
- `pull_request_target` is explicitly prohibited for secret-bearing backend tests on untrusted code.
- Never-log-key-material and safe `GITHUB_ENV` propagation are implemented.
- Workflow/script responsibility boundary is enforced.
- Anti-drift guard is present and known trigger-comment mismatch is resolved.
- Coverage thresholds are resolved with this precedence:
1. Environment variables (highest precedence)
2. Built-in defaults (fallback)
- Threshold environment variables:
- `CHARON_OVERALL_PATCH_COVERAGE_MIN`
- `CHARON_BACKEND_PATCH_COVERAGE_MIN`
- `CHARON_FRONTEND_PATCH_COVERAGE_MIN`
- Built-in defaults for this rollout:
- Overall patch coverage minimum: `90`
- Backend patch coverage minimum: `85`
- Frontend patch coverage minimum: `85`
- Parsing/validation:
- Values must be numeric percentages in `[0, 100]`.
- Invalid env values are ignored with a warning, and the corresponding default is used.
## Handoff to Supervisor
- This document is intentionally single-scope and restricted to CI encryption-key investigation/remediation.
- Legacy multi-topic coverage planning content has been removed from this file to maintain coherence.
### Future Policy (Optional Hard Gate)
- Optional future switch to hard gate (non-zero exit on threshold breach).
- Gate behavior is controlled by a dedicated flag/env (to be added during implementation).
- Hard-gate enablement is explicitly deferred and not part of this rollout.
## 6) Technical Specification
### 6.1 Script
Implement a new local report script:
- Path: `scripts/local-patch-report.sh`
- Responsibilities:
1. Validate required inputs exist (`backend/coverage.txt`, `frontend/coverage/lcov.info`).
2. Resolve patch files/lines from `origin/main...HEAD`.
3. Correlate changed lines with backend/frontend coverage data.
4. Compute patch summary by component and overall.
5. Resolve thresholds using env-var-first precedence, then defaults (`90/85/85`).
6. Evaluate statuses against resolved thresholds:
- `overall.status=pass` when `overall.patch_coverage_pct >= overall_threshold`, else `warn`.
- `backend.status=pass` when `backend.patch_coverage_pct >= backend_threshold`, else `warn`.
- `frontend.status=pass` when `frontend.patch_coverage_pct >= frontend_threshold`, else `warn`.
7. Emit warning status when any scope is below its resolved threshold.
8. Write required outputs:
- `test-results/local-patch-report.md`
- `test-results/local-patch-report.json`
### 6.2 Report Contract
Minimum JSON fields:
- `baseline`: `origin/main...HEAD`
- `generated_at`
- `mode`: `warn` (initial rollout)
- `thresholds`:
- `overall_patch_coverage_min`
- `backend_patch_coverage_min`
- `frontend_patch_coverage_min`
- `threshold_sources`:
- `overall` (`env` | `default`)
- `backend` (`env` | `default`)
- `frontend` (`env` | `default`)
- `overall`:
- `changed_lines`
- `covered_lines`
- `patch_coverage_pct`
- `status` (`pass` | `warn`)
- `backend` and `frontend` objects with same coverage counters and status
- `artifacts` with emitted file paths
Minimum Markdown sections:
- Run metadata (timestamp, baseline)
- Input paths used
- Resolved thresholds and their sources (env/default)
- Coverage summary table (overall/backend/frontend)
- Warning section (if any)
- Artifact paths
### 6.3 Task Wiring
Add VS Code task entries in `.vscode/tasks.json`:
1. `Test: Local Patch Report`
- Runs report generation script only.
2. `Test: Backend DoD + Local Patch Report`
- Runs backend unit test coverage flow, then local patch report.
3. `Test: Frontend DoD + Local Patch Report`
- Runs frontend unit test coverage flow, then local patch report.
4. `Test: Full DoD Unit + Local Patch Report`
- Runs backend + frontend unit coverage flows, then local patch report.
Task behavior:
- Reuse existing coverage scripts/tasks where available.
- Keep command order deterministic: coverage generation first, patch report second.
## 7) Implementation Tasks
### Phase 1 — Script Foundation
- [ ] Create `scripts/local-patch-report.sh`.
- [ ] Add input validation + clear error messages.
- [ ] Add diff parsing for `origin/main...HEAD`.
### Phase 2 — Coverage Correlation
- [ ] Parse backend `coverage.txt` and map covered lines.
- [ ] Parse frontend `coverage/lcov.info` and map covered lines.
- [ ] Compute per-scope and overall patch coverage counters.
### Phase 3 — Artifact Emission
- [ ] Generate `test-results/local-patch-report.json` with required schema.
- [ ] Generate `test-results/local-patch-report.md` with summary + warnings.
- [ ] Ensure `test-results/` creation if missing.
### Phase 4 — Task Wiring
- [ ] Add `Test: Local Patch Report` to `.vscode/tasks.json`.
- [ ] Add backend/frontend/full DoD task variants with report execution.
- [ ] Verify tasks run successfully from workspace root.
### Phase 5 — Documentation Alignment
- [ ] Update DoD references in applicable docs/instructions only where this local report is now required.
- [ ] Remove stale references to unrelated placeholder gates in active plan context.
## 8) Validation Commands
Run from repository root unless noted.
1. Generate backend coverage input:
```bash
cd backend && go test ./... -coverprofile=coverage.txt
```
2. Generate frontend coverage input:
```bash
cd frontend && npm run test:coverage
```
3. Generate local patch report directly:
```bash
./scripts/local-patch-report.sh
```
4. Generate local patch report via task:
```bash
# VS Code task: Test: Local Patch Report
```
5. Validate artifacts exist:
```bash
test -f test-results/local-patch-report.md
test -f test-results/local-patch-report.json
```
6. Validate baseline recorded in JSON:
```bash
jq -r '.baseline' test-results/local-patch-report.json
# expected: origin/main...HEAD
```
## 9) Acceptance Criteria
- [ ] Plan remains single-scope: local pre-CI patch report for DoD unit testing only.
- [ ] Inputs are explicit and used:
- [ ] `backend/coverage.txt`
- [ ] `frontend/coverage/lcov.info`
- [ ] `origin/main...HEAD`
- [ ] Outputs are generated on every successful run:
- [ ] `test-results/local-patch-report.md`
- [ ] `test-results/local-patch-report.json`
- [ ] Initial policy is non-blocking warning mode.
- [ ] Default thresholds are explicit:
- [ ] Overall patch coverage: `90`
- [ ] Backend patch coverage: `85`
- [ ] Frontend patch coverage: `85`
- [ ] Threshold source precedence is explicit: env vars first, then defaults.
- [ ] Future hard-gate mode is documented as optional and deferred.
- [ ] Concrete script + task wiring tasks are present and executable.
- [ ] Validation commands are present and reproducible.
- [ ] Stale unrelated placeholder gates are removed from this active spec.

View File

@@ -28,12 +28,18 @@ mkdir -p coverage/.tmp
npm run test:coverage -- --run
SUMMARY_FILE="coverage/coverage-summary.json"
LCOV_FILE="coverage/lcov.info"
if [ ! -f "$SUMMARY_FILE" ]; then
echo "Error: Coverage summary file not found at $SUMMARY_FILE"
exit 1
fi
if [ ! -f "$LCOV_FILE" ]; then
echo "Error: LCOV coverage file not found at $LCOV_FILE"
exit 1
fi
# Extract coverage metrics and validate
LINES_PERCENT=$(python3 - <<'PY'
import json

52
scripts/local-patch-report.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
BASELINE="${CHARON_PATCH_BASELINE:-origin/main...HEAD}"
BACKEND_COVERAGE_FILE="$ROOT_DIR/backend/coverage.txt"
FRONTEND_COVERAGE_FILE="$ROOT_DIR/frontend/coverage/lcov.info"
JSON_OUT="$ROOT_DIR/test-results/local-patch-report.json"
MD_OUT="$ROOT_DIR/test-results/local-patch-report.md"
if ! command -v git >/dev/null 2>&1; then
echo "Error: git is required to generate local patch report." >&2
exit 1
fi
if ! command -v go >/dev/null 2>&1; then
echo "Error: go is required to generate local patch report." >&2
exit 1
fi
if [[ ! -f "$BACKEND_COVERAGE_FILE" ]]; then
echo "Error: backend coverage input missing at $BACKEND_COVERAGE_FILE" >&2
exit 1
fi
if [[ ! -f "$FRONTEND_COVERAGE_FILE" ]]; then
echo "Error: frontend coverage input missing at $FRONTEND_COVERAGE_FILE" >&2
exit 1
fi
BASE_REF="$BASELINE"
if [[ "$BASELINE" == *"..."* ]]; then
BASE_REF="${BASELINE%%...*}"
fi
if [[ -n "$BASE_REF" ]] && ! git -C "$ROOT_DIR" rev-parse --verify --quiet "${BASE_REF}^{commit}" >/dev/null; then
echo "Error: baseline base ref '$BASE_REF' is not available locally. Set CHARON_PATCH_BASELINE to a valid range and retry." >&2
exit 1
fi
mkdir -p "$ROOT_DIR/test-results"
(
cd "$ROOT_DIR/backend"
go run ./cmd/localpatchreport \
--repo-root "$ROOT_DIR" \
--baseline "$BASELINE" \
--backend-coverage "$BACKEND_COVERAGE_FILE" \
--frontend-coverage "$FRONTEND_COVERAGE_FILE" \
--json-out "$JSON_OUT" \
--md-out "$MD_OUT"
)