chore: git cache cleanup

This commit is contained in:
GitHub Actions
2026-03-04 18:34:49 +00:00
parent c32cce2a88
commit 27c252600a
2001 changed files with 683185 additions and 0 deletions

304
backend/cmd/api/main.go Normal file
View File

@@ -0,0 +1,304 @@
// Package main is the entry point for the Charon backend API.
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/Wikid82/charon/backend/internal/api/handlers"
"github.com/Wikid82/charon/backend/internal/api/middleware"
"github.com/Wikid82/charon/backend/internal/api/routes"
"github.com/Wikid82/charon/backend/internal/caddy"
"github.com/Wikid82/charon/backend/internal/cerberus"
"github.com/Wikid82/charon/backend/internal/config"
"github.com/Wikid82/charon/backend/internal/database"
"github.com/Wikid82/charon/backend/internal/logger"
"github.com/Wikid82/charon/backend/internal/models"
"github.com/Wikid82/charon/backend/internal/server"
"github.com/Wikid82/charon/backend/internal/services"
"github.com/Wikid82/charon/backend/internal/version"
_ "github.com/Wikid82/charon/backend/pkg/dnsprovider/builtin" // Register built-in DNS providers
"github.com/gin-gonic/gin"
"gopkg.in/natefinch/lumberjack.v2"
)
// parsePluginSignatures reads the CHARON_PLUGIN_SIGNATURES environment variable
// and returns the parsed signature allowlist for plugin verification.
//
// Modes:
// - nil return (permissive): Env var unset/empty — all plugins allowed
// - empty map (strict): Env var set to "{}" — no external plugins allowed
// - populated map: Only plugins with matching signatures are allowed
func parsePluginSignatures() map[string]string {
envVal := os.Getenv("CHARON_PLUGIN_SIGNATURES")
if envVal == "" {
logger.Log().Info("Plugin signature verification: PERMISSIVE mode (CHARON_PLUGIN_SIGNATURES not set)")
return nil
}
var signatures map[string]string
if err := json.Unmarshal([]byte(envVal), &signatures); err != nil {
logger.Log().WithError(err).Error("Failed to parse CHARON_PLUGIN_SIGNATURES JSON — falling back to permissive mode")
return nil
}
// Validate all signatures have sha256: prefix
for name, sig := range signatures {
if !strings.HasPrefix(sig, "sha256:") {
logger.Log().Errorf("Invalid signature for plugin %q: must have sha256: prefix — falling back to permissive mode", name)
return nil
}
}
if len(signatures) == 0 {
logger.Log().Info("Plugin signature verification: STRICT mode (empty allowlist — no external plugins permitted)")
} else {
logger.Log().Infof("Plugin signature verification: STRICT mode (%d plugin(s) in allowlist)", len(signatures))
}
return signatures
}
func main() {
// Setup logging with rotation
logDir := "/app/data/logs"
// #nosec G301 -- Log directory with standard permissions
if err := os.MkdirAll(logDir, 0o755); err != nil {
// Fallback to local directory if /app/data fails (e.g. local dev)
logDir = "data/logs"
// #nosec G301 -- Fallback log directory with standard permissions
_ = os.MkdirAll(logDir, 0o755)
}
logFile := filepath.Join(logDir, "charon.log")
rotator := &lumberjack.Logger{
Filename: logFile,
MaxSize: 10, // megabytes
MaxBackups: 3,
MaxAge: 28, // days
Compress: true,
}
// Ensure legacy cpmp.log exists as symlink for compatibility (cpmp is a legacy name for Charon)
legacyLog := filepath.Join(logDir, "cpmp.log")
if _, err := os.Lstat(legacyLog); os.IsNotExist(err) {
_ = os.Symlink(logFile, legacyLog) // ignore errors
}
// Log to both stdout and file
mw := io.MultiWriter(os.Stdout, rotator)
log.SetOutput(mw)
gin.DefaultWriter = mw
// Initialize a basic logger so CLI and early code can log.
logger.Init(false, mw)
// Handle CLI commands
if len(os.Args) > 1 {
switch os.Args[1] {
case "migrate":
cfg, err := config.Load()
if err != nil {
log.Fatalf("load config: %v", err)
}
db, err := database.Connect(cfg.DatabasePath)
if err != nil {
log.Fatalf("connect database: %v", err)
}
logger.Log().Info("Running database migrations for all models...")
if err := db.AutoMigrate(
// Core models
&models.ProxyHost{},
&models.Location{},
&models.CaddyConfig{},
&models.RemoteServer{},
&models.SSLCertificate{},
&models.AccessList{},
&models.SecurityHeaderProfile{},
&models.User{},
&models.Setting{},
&models.ImportSession{},
&models.Notification{},
&models.NotificationProvider{},
&models.NotificationTemplate{},
&models.NotificationConfig{},
&models.UptimeMonitor{},
&models.UptimeHeartbeat{},
&models.UptimeHost{},
&models.UptimeNotificationEvent{},
&models.Domain{},
&models.UserPermittedHost{},
// Security models
&models.SecurityConfig{},
&models.SecurityDecision{},
&models.SecurityAudit{},
&models.SecurityRuleSet{},
&models.CrowdsecPresetEvent{},
&models.CrowdsecConsoleEnrollment{},
&models.EmergencyToken{}, // Phase 2: Database-backed emergency tokens
// DNS Provider models (Issue #21)
&models.DNSProvider{},
&models.DNSProviderCredential{},
// Plugin model (Phase 5)
&models.Plugin{},
); err != nil {
log.Fatalf("migration failed: %v", err)
}
logger.Log().Info("Migration completed successfully")
return
case "reset-password":
if len(os.Args) != 4 {
log.Fatalf("Usage: %s reset-password <email> <new-password>", os.Args[0])
}
email := os.Args[2]
newPassword := os.Args[3]
cfg, err := config.Load()
if err != nil {
log.Fatalf("load config: %v", err)
}
db, err := database.Connect(cfg.DatabasePath)
if err != nil {
log.Fatalf("connect database: %v", err)
}
var user models.User
if err := db.Where("email = ?", email).First(&user).Error; err != nil {
log.Fatalf("user not found: %v", err)
}
if err := user.SetPassword(newPassword); err != nil {
log.Fatalf("failed to hash password: %v", err)
}
// Unlock account if locked
user.LockedUntil = nil
user.FailedLoginAttempts = 0
if err := db.Save(&user).Error; err != nil {
log.Fatalf("failed to save user: %v", err)
}
logger.Log().Infof("Password updated successfully for user %s", email)
return
}
}
logger.Log().Infof("starting %s backend on version %s", version.Name, version.Full())
cfg, err := config.Load()
if err != nil {
log.Fatalf("load config: %v", err)
}
db, err := database.Connect(cfg.DatabasePath)
if err != nil {
log.Fatalf("connect database: %v", err)
}
// Note: All database migrations are centralized in routes.Register()
// This ensures migrations run exactly once and in the correct order.
// DO NOT add AutoMigrate calls here - they cause "duplicate column" errors.
// Reconcile CrowdSec state after migrations, before HTTP server starts
// This ensures CrowdSec is running if user preference was to have it enabled
crowdsecBinPath := os.Getenv("CHARON_CROWDSEC_BIN")
if crowdsecBinPath == "" {
crowdsecBinPath = "/usr/local/bin/crowdsec"
}
crowdsecDataDir := os.Getenv("CHARON_CROWDSEC_DATA")
if crowdsecDataDir == "" {
crowdsecDataDir = "/app/data/crowdsec"
}
crowdsecExec := handlers.NewDefaultCrowdsecExecutor()
services.ReconcileCrowdSecOnStartup(db, crowdsecExec, crowdsecBinPath, crowdsecDataDir, nil)
// Initialize plugin loader and load external DNS provider plugins (Phase 5)
logger.Log().Info("Initializing DNS provider plugin system...")
pluginDir := os.Getenv("CHARON_PLUGINS_DIR")
if pluginDir == "" {
pluginDir = "/app/plugins"
}
pluginLoader := services.NewPluginLoaderService(db, pluginDir, parsePluginSignatures())
if err := pluginLoader.LoadAllPlugins(); err != nil {
logger.Log().WithError(err).Warn("Failed to load external DNS provider plugins")
}
logger.Log().Info("Plugin system initialized")
router := server.NewRouter(cfg.FrontendDir)
// Initialize structured logger with same writer as stdlib log so both capture logs
logger.Init(cfg.Debug, mw)
// Request ID middleware must run before recovery so the recover logs include the request id
router.Use(middleware.RequestID())
// Log requests with request-scoped logger
router.Use(middleware.RequestLogger())
// Attach a recovery middleware that logs stack traces when debug is enabled
router.Use(middleware.Recovery(cfg.Debug))
// Shared Caddy manager and Cerberus instance for API + emergency server
caddyClient := caddy.NewClient(cfg.CaddyAdminAPI)
caddyManager := caddy.NewManager(caddyClient, db, cfg.CaddyConfigDir, cfg.FrontendDir, cfg.ACMEStaging, cfg.Security)
cerb := cerberus.New(cfg.Security, db)
// Pass config to routes for auth service and certificate service
if err := routes.RegisterWithDeps(router, db, cfg, caddyManager, cerb); err != nil {
log.Fatalf("register routes: %v", err)
}
// Register import handler with config dependencies
routes.RegisterImportHandler(router, db, cfg, cfg.CaddyBinary, cfg.ImportDir, cfg.ImportCaddyfile)
// Check for mounted Caddyfile on startup
if err := handlers.CheckMountedImport(db, cfg.ImportCaddyfile, cfg.CaddyBinary, cfg.ImportDir); err != nil {
logger.Log().WithError(err).Warn("WARNING: failed to process mounted Caddyfile")
}
// Initialize emergency server (Tier 2 break glass)
emergencyServer := server.NewEmergencyServerWithDeps(db, cfg.Emergency, caddyManager, cerb)
if err := emergencyServer.Start(); err != nil {
logger.Log().WithError(err).Fatal("Failed to start emergency server")
}
// Setup graceful shutdown
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
// Start main HTTP server in goroutine
go func() {
addr := fmt.Sprintf(":%s", cfg.HTTPPort)
logger.Log().Infof("starting %s backend on %s", version.Name, addr)
if err := router.Run(addr); err != nil {
logger.Log().WithError(err).Fatal("server error")
}
}()
// Wait for interrupt signal
sig := <-quit
logger.Log().Infof("Received signal %v, initiating graceful shutdown...", sig)
// Graceful shutdown with timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Stop emergency server
if err := emergencyServer.Stop(ctx); err != nil {
logger.Log().WithError(err).Error("Emergency server shutdown error")
}
logger.Log().Info("Server shutdown complete")
}

View File

@@ -0,0 +1,54 @@
package main
import "testing"
func TestParsePluginSignatures(t *testing.T) {
t.Run("unset env returns nil", func(t *testing.T) {
t.Setenv("CHARON_PLUGIN_SIGNATURES", "")
signatures := parsePluginSignatures()
if signatures != nil {
t.Fatalf("expected nil signatures when env is unset, got: %#v", signatures)
}
})
t.Run("invalid json returns nil", func(t *testing.T) {
t.Setenv("CHARON_PLUGIN_SIGNATURES", "{invalid}")
signatures := parsePluginSignatures()
if signatures != nil {
t.Fatalf("expected nil signatures for invalid json, got: %#v", signatures)
}
})
t.Run("invalid prefix returns nil", func(t *testing.T) {
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{"plugin.so":"md5:deadbeef"}`)
signatures := parsePluginSignatures()
if signatures != nil {
t.Fatalf("expected nil signatures for invalid prefix, got: %#v", signatures)
}
})
t.Run("empty allowlist returns empty map", func(t *testing.T) {
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{}`)
signatures := parsePluginSignatures()
if signatures == nil {
t.Fatal("expected non-nil empty map for strict empty allowlist")
}
if len(signatures) != 0 {
t.Fatalf("expected empty map, got: %#v", signatures)
}
})
t.Run("valid allowlist returns parsed map", func(t *testing.T) {
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{"plugin-a.so":"sha256:abc123","plugin-b.so":"sha256:def456"}`)
signatures := parsePluginSignatures()
if signatures == nil {
t.Fatal("expected parsed signatures map, got nil")
}
if got := signatures["plugin-a.so"]; got != "sha256:abc123" {
t.Fatalf("unexpected plugin-a signature: %q", got)
}
if got := signatures["plugin-b.so"]; got != "sha256:def456" {
t.Fatalf("unexpected plugin-b signature: %q", got)
}
})
}

View File

@@ -0,0 +1,404 @@
package main
import (
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"syscall"
"testing"
"time"
"github.com/Wikid82/charon/backend/internal/database"
"github.com/Wikid82/charon/backend/internal/models"
)
func TestResetPasswordCommand_Succeeds(t *testing.T) {
if os.Getenv("CHARON_TEST_RUN_MAIN") == "1" {
// Child process: emulate CLI args and run main().
email := os.Getenv("CHARON_TEST_EMAIL")
newPassword := os.Getenv("CHARON_TEST_NEW_PASSWORD")
os.Args = []string{"charon", "reset-password", email, newPassword}
main()
return
}
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
// #nosec G301 -- Test fixture directory with standard permissions
if err := os.MkdirAll(filepath.Dir(dbPath), 0o755); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
db, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("connect db: %v", err)
}
if err = db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate: %v", err)
}
email := "user@example.com"
user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true}
user.PasswordHash = "$2a$10$example_hashed_password"
if err = db.Create(&user).Error; err != nil {
t.Fatalf("seed user: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=TestResetPasswordCommand_Succeeds") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
cmd.Dir = tmp
cmd.Env = append(os.Environ(),
"CHARON_TEST_RUN_MAIN=1",
"CHARON_TEST_EMAIL="+email,
"CHARON_TEST_NEW_PASSWORD=new-password",
"CHARON_DB_PATH="+dbPath,
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("expected exit 0; err=%v; output=%s", err, string(out))
}
}
func TestMigrateCommand_Succeeds(t *testing.T) {
if os.Getenv("CHARON_TEST_RUN_MAIN") == "1" {
// Child process: emulate CLI args and run main().
os.Args = []string{"charon", "migrate"}
main()
return
}
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
// #nosec G301 -- Test fixture directory with standard permissions
if err := os.MkdirAll(filepath.Dir(dbPath), 0o755); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
// Create database without security tables
db, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("connect db: %v", err)
}
// Only migrate User table to simulate old database
if err = db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate user: %v", err)
}
// Verify security tables don't exist
if db.Migrator().HasTable(&models.SecurityConfig{}) {
t.Fatal("SecurityConfig table should not exist yet")
}
cmd := exec.Command(os.Args[0], "-test.run=TestMigrateCommand_Succeeds") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
cmd.Dir = tmp
cmd.Env = append(os.Environ(),
"CHARON_TEST_RUN_MAIN=1",
"CHARON_DB_PATH="+dbPath,
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("expected exit 0; err=%v; output=%s", err, string(out))
}
// Reconnect and verify security tables were created
db2, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("reconnect db: %v", err)
}
securityModels := []any{
&models.SecurityConfig{},
&models.SecurityDecision{},
&models.SecurityAudit{},
&models.SecurityRuleSet{},
&models.CrowdsecPresetEvent{},
&models.CrowdsecConsoleEnrollment{},
}
for _, model := range securityModels {
if !db2.Migrator().HasTable(model) {
t.Errorf("Table for %T was not created by migrate command", model)
}
}
}
func TestStartupVerification_MissingTables(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
// Create database without security tables
db, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("connect db: %v", err)
}
// Only migrate User table to simulate old database
if err = db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate user: %v", err)
}
// Verify security tables don't exist
if db.Migrator().HasTable(&models.SecurityConfig{}) {
t.Fatal("SecurityConfig table should not exist yet")
}
// Close and reopen to simulate startup scenario
sqlDB, _ := db.DB()
_ = sqlDB.Close()
db, err = database.Connect(dbPath)
if err != nil {
t.Fatalf("reconnect db: %v", err)
}
// Simulate startup verification logic from main.go
securityModels := []any{
&models.SecurityConfig{},
&models.SecurityDecision{},
&models.SecurityAudit{},
&models.SecurityRuleSet{},
&models.CrowdsecPresetEvent{},
&models.CrowdsecConsoleEnrollment{},
}
missingTables := false
for _, model := range securityModels {
if !db.Migrator().HasTable(model) {
missingTables = true
t.Logf("Missing table for model %T", model)
}
}
if !missingTables {
t.Fatal("Expected to find missing tables but all were present")
}
// Run auto-migration (simulating startup verification logic)
if err := db.AutoMigrate(securityModels...); err != nil {
t.Fatalf("failed to migrate security tables: %v", err)
}
// Verify all tables now exist
for _, model := range securityModels {
if !db.Migrator().HasTable(model) {
t.Errorf("Table for %T was not created by auto-migration", model)
}
}
}
func TestMain_MigrateCommand_InProcess(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
db, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("connect db: %v", err)
}
if err = db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate user: %v", err)
}
originalArgs := os.Args
t.Cleanup(func() { os.Args = originalArgs })
t.Setenv("CHARON_DB_PATH", dbPath)
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
os.Args = []string{"charon", "migrate"}
main()
db2, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("reconnect db: %v", err)
}
securityModels := []any{
&models.SecurityConfig{},
&models.SecurityDecision{},
&models.SecurityAudit{},
&models.SecurityRuleSet{},
&models.CrowdsecPresetEvent{},
&models.CrowdsecConsoleEnrollment{},
}
for _, model := range securityModels {
if !db2.Migrator().HasTable(model) {
t.Errorf("Table for %T was not created by migrate command", model)
}
}
}
func TestMain_ResetPasswordCommand_InProcess(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
db, err := database.Connect(dbPath)
if err != nil {
t.Fatalf("connect db: %v", err)
}
if err = db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate: %v", err)
}
email := "user@example.com"
user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true}
user.PasswordHash = "$2a$10$example_hashed_password"
user.FailedLoginAttempts = 3
if err = db.Create(&user).Error; err != nil {
t.Fatalf("seed user: %v", err)
}
originalArgs := os.Args
t.Cleanup(func() { os.Args = originalArgs })
t.Setenv("CHARON_DB_PATH", dbPath)
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
os.Args = []string{"charon", "reset-password", email, "new-password"}
main()
var updated models.User
if err := db.Where("email = ?", email).First(&updated).Error; err != nil {
t.Fatalf("fetch updated user: %v", err)
}
if updated.PasswordHash == "$2a$10$example_hashed_password" {
t.Fatal("expected password hash to be updated")
}
if updated.FailedLoginAttempts != 0 {
t.Fatalf("expected failed login attempts reset to 0, got %d", updated.FailedLoginAttempts)
}
}
func TestMain_DefaultStartupGracefulShutdown_Subprocess(t *testing.T) {
if os.Getenv("CHARON_TEST_RUN_MAIN_SERVER") == "1" {
os.Args = []string{"charon"}
signalPort := os.Getenv("CHARON_TEST_SIGNAL_PORT")
go func() {
if signalPort != "" {
_ = waitForTCPReady("127.0.0.1:"+signalPort, 10*time.Second)
}
process, err := os.FindProcess(os.Getpid())
if err == nil {
_ = process.Signal(syscall.SIGTERM)
}
}()
main()
return
}
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
httpPort, err := findFreeTCPPort()
if err != nil {
t.Fatalf("find free http port: %v", err)
}
err = os.MkdirAll(filepath.Dir(dbPath), 0o750)
if err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=TestMain_DefaultStartupGracefulShutdown_Subprocess") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
cmd.Dir = tmp
cmd.Env = append(os.Environ(),
"CHARON_TEST_RUN_MAIN_SERVER=1",
"CHARON_DB_PATH="+dbPath,
"CHARON_HTTP_PORT="+httpPort,
"CHARON_TEST_SIGNAL_PORT="+httpPort,
"CHARON_EMERGENCY_SERVER_ENABLED=false",
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
"CHARON_IMPORT_CADDYFILE="+filepath.Join(tmp, "imports", "does-not-exist", "Caddyfile"),
"CHARON_FRONTEND_DIR="+filepath.Join(tmp, "frontend", "dist"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("expected startup/shutdown to exit 0; err=%v; output=%s", err, string(out))
}
}
func TestMain_DefaultStartupGracefulShutdown_InProcess(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "data", "test.db")
httpPort, err := findFreeTCPPort()
if err != nil {
t.Fatalf("find free http port: %v", err)
}
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
originalArgs := os.Args
t.Cleanup(func() { os.Args = originalArgs })
t.Setenv("CHARON_DB_PATH", dbPath)
t.Setenv("CHARON_HTTP_PORT", httpPort)
t.Setenv("CHARON_EMERGENCY_SERVER_ENABLED", "false")
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
t.Setenv("CHARON_IMPORT_CADDYFILE", filepath.Join(tmp, "imports", "does-not-exist", "Caddyfile"))
t.Setenv("CHARON_FRONTEND_DIR", filepath.Join(tmp, "frontend", "dist"))
os.Args = []string{"charon"}
go func() {
_ = waitForTCPReady("127.0.0.1:"+httpPort, 10*time.Second)
process, err := os.FindProcess(os.Getpid())
if err == nil {
_ = process.Signal(syscall.SIGTERM)
}
}()
main()
}
func findFreeTCPPort() (string, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return "", fmt.Errorf("listen free port: %w", err)
}
defer func() {
_ = listener.Close()
}()
addr, ok := listener.Addr().(*net.TCPAddr)
if !ok {
return "", fmt.Errorf("unexpected listener addr type: %T", listener.Addr())
}
return fmt.Sprintf("%d", addr.Port), nil
}
func waitForTCPReady(address string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
conn, err := net.DialTimeout("tcp", address, 100*time.Millisecond)
if err == nil {
_ = conn.Close()
return nil
}
time.Sleep(25 * time.Millisecond)
}
return fmt.Errorf("timed out waiting for TCP readiness at %s", address)
}

View File

@@ -0,0 +1,290 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/Wikid82/charon/backend/internal/patchreport"
)
type thresholdJSON struct {
Overall float64 `json:"overall_patch_coverage_min"`
Backend float64 `json:"backend_patch_coverage_min"`
Frontend float64 `json:"frontend_patch_coverage_min"`
}
type thresholdSourcesJSON struct {
Overall string `json:"overall"`
Backend string `json:"backend"`
Frontend string `json:"frontend"`
}
type artifactsJSON struct {
Markdown string `json:"markdown"`
JSON string `json:"json"`
}
type reportJSON struct {
Baseline string `json:"baseline"`
GeneratedAt string `json:"generated_at"`
Mode string `json:"mode"`
Thresholds thresholdJSON `json:"thresholds"`
ThresholdSources thresholdSourcesJSON `json:"threshold_sources"`
Overall patchreport.ScopeCoverage `json:"overall"`
Backend patchreport.ScopeCoverage `json:"backend"`
Frontend patchreport.ScopeCoverage `json:"frontend"`
FilesNeedingCoverage []patchreport.FileCoverageDetail `json:"files_needing_coverage,omitempty"`
Warnings []string `json:"warnings,omitempty"`
Artifacts artifactsJSON `json:"artifacts"`
}
func main() {
repoRootFlag := flag.String("repo-root", ".", "Repository root path")
baselineFlag := flag.String("baseline", "origin/development...HEAD", "Git diff baseline")
backendCoverageFlag := flag.String("backend-coverage", "backend/coverage.txt", "Backend Go coverage profile")
frontendCoverageFlag := flag.String("frontend-coverage", "frontend/coverage/lcov.info", "Frontend LCOV coverage report")
jsonOutFlag := flag.String("json-out", "test-results/local-patch-report.json", "Path to JSON output report")
mdOutFlag := flag.String("md-out", "test-results/local-patch-report.md", "Path to markdown output report")
flag.Parse()
repoRoot, err := filepath.Abs(*repoRootFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "error resolving repo root: %v\n", err)
os.Exit(1)
}
backendCoveragePath := resolvePath(repoRoot, *backendCoverageFlag)
frontendCoveragePath := resolvePath(repoRoot, *frontendCoverageFlag)
jsonOutPath := resolvePath(repoRoot, *jsonOutFlag)
mdOutPath := resolvePath(repoRoot, *mdOutFlag)
err = assertFileExists(backendCoveragePath, "backend coverage file")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
err = assertFileExists(frontendCoveragePath, "frontend coverage file")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
diffContent, err := gitDiff(repoRoot, *baselineFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "error generating git diff: %v\n", err)
os.Exit(1)
}
backendChanged, frontendChanged, err := patchreport.ParseUnifiedDiffChangedLines(diffContent)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing changed lines from diff: %v\n", err)
os.Exit(1)
}
backendCoverage, err := patchreport.ParseGoCoverageProfile(backendCoveragePath)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing backend coverage: %v\n", err)
os.Exit(1)
}
frontendCoverage, err := patchreport.ParseLCOVProfile(frontendCoveragePath)
if err != nil {
fmt.Fprintf(os.Stderr, "error parsing frontend coverage: %v\n", err)
os.Exit(1)
}
overallThreshold := patchreport.ResolveThreshold("CHARON_OVERALL_PATCH_COVERAGE_MIN", 90, nil)
backendThreshold := patchreport.ResolveThreshold("CHARON_BACKEND_PATCH_COVERAGE_MIN", 85, nil)
frontendThreshold := patchreport.ResolveThreshold("CHARON_FRONTEND_PATCH_COVERAGE_MIN", 85, nil)
backendScope := patchreport.ComputeScopeCoverage(backendChanged, backendCoverage)
frontendScope := patchreport.ComputeScopeCoverage(frontendChanged, frontendCoverage)
overallScope := patchreport.MergeScopeCoverage(backendScope, frontendScope)
backendFilesNeedingCoverage := patchreport.ComputeFilesNeedingCoverage(backendChanged, backendCoverage, backendThreshold.Value)
frontendFilesNeedingCoverage := patchreport.ComputeFilesNeedingCoverage(frontendChanged, frontendCoverage, frontendThreshold.Value)
filesNeedingCoverage := patchreport.MergeFileCoverageDetails(backendFilesNeedingCoverage, frontendFilesNeedingCoverage)
backendScope = patchreport.ApplyStatus(backendScope, backendThreshold.Value)
frontendScope = patchreport.ApplyStatus(frontendScope, frontendThreshold.Value)
overallScope = patchreport.ApplyStatus(overallScope, overallThreshold.Value)
warnings := patchreport.SortedWarnings([]string{
overallThreshold.Warning,
backendThreshold.Warning,
frontendThreshold.Warning,
})
if overallScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Overall patch coverage %.1f%% is below threshold %.1f%%", overallScope.PatchCoveragePct, overallThreshold.Value))
}
if backendScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Backend patch coverage %.1f%% is below threshold %.1f%%", backendScope.PatchCoveragePct, backendThreshold.Value))
}
if frontendScope.Status == "warn" {
warnings = append(warnings, fmt.Sprintf("Frontend patch coverage %.1f%% is below threshold %.1f%%", frontendScope.PatchCoveragePct, frontendThreshold.Value))
}
report := reportJSON{
Baseline: *baselineFlag,
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
Mode: "warn",
Thresholds: thresholdJSON{
Overall: overallThreshold.Value,
Backend: backendThreshold.Value,
Frontend: frontendThreshold.Value,
},
ThresholdSources: thresholdSourcesJSON{
Overall: overallThreshold.Source,
Backend: backendThreshold.Source,
Frontend: frontendThreshold.Source,
},
Overall: overallScope,
Backend: backendScope,
Frontend: frontendScope,
FilesNeedingCoverage: filesNeedingCoverage,
Warnings: warnings,
Artifacts: artifactsJSON{
Markdown: relOrAbs(repoRoot, mdOutPath),
JSON: relOrAbs(repoRoot, jsonOutPath),
},
}
if err := os.MkdirAll(filepath.Dir(jsonOutPath), 0o750); err != nil {
fmt.Fprintf(os.Stderr, "error creating json output directory: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll(filepath.Dir(mdOutPath), 0o750); err != nil {
fmt.Fprintf(os.Stderr, "error creating markdown output directory: %v\n", err)
os.Exit(1)
}
if err := writeJSON(jsonOutPath, report); err != nil {
fmt.Fprintf(os.Stderr, "error writing json report: %v\n", err)
os.Exit(1)
}
if err := writeMarkdown(mdOutPath, report, relOrAbs(repoRoot, backendCoveragePath), relOrAbs(repoRoot, frontendCoveragePath)); err != nil {
fmt.Fprintf(os.Stderr, "error writing markdown report: %v\n", err)
os.Exit(1)
}
fmt.Printf("Local patch report generated (mode=%s)\n", report.Mode)
fmt.Printf("JSON: %s\n", relOrAbs(repoRoot, jsonOutPath))
fmt.Printf("Markdown: %s\n", relOrAbs(repoRoot, mdOutPath))
for _, warning := range warnings {
fmt.Printf("WARN: %s\n", warning)
}
}
func resolvePath(repoRoot, configured string) string {
if filepath.IsAbs(configured) {
return configured
}
return filepath.Join(repoRoot, configured)
}
func relOrAbs(repoRoot, path string) string {
rel, err := filepath.Rel(repoRoot, path)
if err != nil {
return filepath.ToSlash(path)
}
return filepath.ToSlash(rel)
}
func assertFileExists(path, label string) error {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("missing %s at %s: %w", label, path, err)
}
if info.IsDir() {
return fmt.Errorf("expected %s to be a file but found directory: %s", label, path)
}
return nil
}
func gitDiff(repoRoot, baseline string) (string, error) {
cmd := exec.Command("git", "-C", repoRoot, "diff", "--unified=0", baseline)
output, err := cmd.CombinedOutput()
if err != nil {
return "", fmt.Errorf("git diff %s failed: %w (%s)", baseline, err, strings.TrimSpace(string(output)))
}
return string(output), nil
}
func writeJSON(path string, report reportJSON) error {
encoded, err := json.MarshalIndent(report, "", " ")
if err != nil {
return fmt.Errorf("marshal report json: %w", err)
}
encoded = append(encoded, '\n')
if err := os.WriteFile(path, encoded, 0o600); err != nil {
return fmt.Errorf("write report json file: %w", err)
}
return nil
}
func writeMarkdown(path string, report reportJSON, backendCoveragePath, frontendCoveragePath string) error {
var builder strings.Builder
builder.WriteString("# Local Patch Coverage Report\n\n")
builder.WriteString("## Metadata\n\n")
builder.WriteString(fmt.Sprintf("- Generated: %s\n", report.GeneratedAt))
builder.WriteString(fmt.Sprintf("- Baseline: `%s`\n", report.Baseline))
builder.WriteString(fmt.Sprintf("- Mode: `%s`\n\n", report.Mode))
builder.WriteString("## Inputs\n\n")
builder.WriteString(fmt.Sprintf("- Backend coverage: `%s`\n", backendCoveragePath))
builder.WriteString(fmt.Sprintf("- Frontend coverage: `%s`\n\n", frontendCoveragePath))
builder.WriteString("## Resolved Thresholds\n\n")
builder.WriteString("| Scope | Minimum (%) | Source |\n")
builder.WriteString("|---|---:|---|\n")
builder.WriteString(fmt.Sprintf("| Overall | %.1f | %s |\n", report.Thresholds.Overall, report.ThresholdSources.Overall))
builder.WriteString(fmt.Sprintf("| Backend | %.1f | %s |\n", report.Thresholds.Backend, report.ThresholdSources.Backend))
builder.WriteString(fmt.Sprintf("| Frontend | %.1f | %s |\n\n", report.Thresholds.Frontend, report.ThresholdSources.Frontend))
builder.WriteString("## Coverage Summary\n\n")
builder.WriteString("| Scope | Changed Lines | Covered Lines | Patch Coverage (%) | Status |\n")
builder.WriteString("|---|---:|---:|---:|---|\n")
builder.WriteString(scopeRow("Overall", report.Overall))
builder.WriteString(scopeRow("Backend", report.Backend))
builder.WriteString(scopeRow("Frontend", report.Frontend))
builder.WriteString("\n")
if len(report.FilesNeedingCoverage) > 0 {
builder.WriteString("## Files Needing Coverage\n\n")
builder.WriteString("| Path | Patch Coverage (%) | Uncovered Changed Lines | Uncovered Changed Line Ranges |\n")
builder.WriteString("|---|---:|---:|---|\n")
for _, fileCoverage := range report.FilesNeedingCoverage {
ranges := "-"
if len(fileCoverage.UncoveredChangedLineRange) > 0 {
ranges = strings.Join(fileCoverage.UncoveredChangedLineRange, ", ")
}
builder.WriteString(fmt.Sprintf("| `%s` | %.1f | %d | %s |\n", fileCoverage.Path, fileCoverage.PatchCoveragePct, fileCoverage.UncoveredChangedLines, ranges))
}
builder.WriteString("\n")
}
if len(report.Warnings) > 0 {
builder.WriteString("## Warnings\n\n")
for _, warning := range report.Warnings {
builder.WriteString(fmt.Sprintf("- %s\n", warning))
}
builder.WriteString("\n")
}
builder.WriteString("## Artifacts\n\n")
builder.WriteString(fmt.Sprintf("- Markdown: `%s`\n", report.Artifacts.Markdown))
builder.WriteString(fmt.Sprintf("- JSON: `%s`\n", report.Artifacts.JSON))
if err := os.WriteFile(path, []byte(builder.String()), 0o600); err != nil {
return fmt.Errorf("write markdown file: %w", err)
}
return nil
}
func scopeRow(name string, scope patchreport.ScopeCoverage) string {
return fmt.Sprintf("| %s | %d | %d | %.1f | %s |\n", name, scope.ChangedLines, scope.CoveredLines, scope.PatchCoveragePct, scope.Status)
}

File diff suppressed because it is too large Load Diff

295
backend/cmd/seed/main.go Normal file
View File

@@ -0,0 +1,295 @@
package main
import (
"io"
"log"
"os"
"time"
"github.com/Wikid82/charon/backend/internal/logger"
"github.com/Wikid82/charon/backend/internal/util"
"github.com/glebarez/sqlite"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"gorm.io/gorm"
gormlogger "gorm.io/gorm/logger"
"github.com/Wikid82/charon/backend/internal/models"
)
func logSeedResult(entry *logrus.Entry, result *gorm.DB, errorMessage string, logCreated func(), existsMessage string) {
switch {
case result.Error != nil:
entry.WithError(result.Error).Error(errorMessage)
case result.RowsAffected > 0:
logCreated()
default:
entry.Info(existsMessage)
}
}
func main() {
// Connect to database
// Initialize simple logger to stdout
mw := io.MultiWriter(os.Stdout)
logger.Init(false, mw)
// Configure GORM logger to ignore "record not found" errors
// These are expected during seed operations when checking if records exist
gormLog := gormlogger.New(
log.New(os.Stdout, "\r\n", log.LstdFlags),
gormlogger.Config{
SlowThreshold: 200 * time.Millisecond,
LogLevel: gormlogger.Warn,
IgnoreRecordNotFoundError: true,
Colorful: false,
},
)
db, err := gorm.Open(sqlite.Open("./data/charon.db"), &gorm.Config{
Logger: gormLog,
})
if err != nil {
logger.Log().WithError(err).Fatal("Failed to connect to database")
}
// Auto migrate
if err := db.AutoMigrate(
&models.User{},
&models.ProxyHost{},
&models.CaddyConfig{},
&models.RemoteServer{},
&models.SSLCertificate{},
&models.AccessList{},
&models.Setting{},
&models.ImportSession{},
); err != nil {
logger.Log().WithError(err).Fatal("Failed to migrate database")
}
logger.Log().Info("✓ Database migrated successfully")
// Seed Remote Servers
remoteServers := []models.RemoteServer{
{
UUID: uuid.NewString(),
Name: "Local Docker Registry",
Provider: "docker",
Host: "localhost",
Port: 5000,
Scheme: "http",
Description: "Local Docker container registry",
Enabled: true,
Reachable: false,
},
{
UUID: uuid.NewString(),
Name: "Development API Server",
Provider: "generic",
Host: "192.168.1.100",
Port: 8080,
Scheme: "http",
Description: "Main development API backend",
Enabled: true,
Reachable: false,
},
{
UUID: uuid.NewString(),
Name: "Staging Web App",
Provider: "vm",
Host: "staging.internal",
Port: 3000,
Scheme: "http",
Description: "Staging environment web application",
Enabled: true,
Reachable: false,
},
{
UUID: uuid.NewString(),
Name: "Database Admin",
Provider: "docker",
Host: "localhost",
Port: 8081,
Scheme: "http",
Description: "PhpMyAdmin or similar DB management tool",
Enabled: false,
Reachable: false,
},
}
for _, server := range remoteServers {
result := db.Where("host = ? AND port = ?", server.Host, server.Port).FirstOrCreate(&server)
logEntry := logger.Log().WithField("server", server.Name)
logSeedResult(
logEntry,
result,
"Failed to seed remote server",
func() {
logEntry.Infof("✓ Created remote server: %s (%s:%d)", server.Name, server.Host, server.Port)
},
"Remote server already exists",
)
}
// Seed Proxy Hosts
proxyHosts := []models.ProxyHost{
{
UUID: uuid.NewString(),
Name: "Development App",
DomainNames: "app.local.dev",
ForwardScheme: "http",
ForwardHost: "localhost",
ForwardPort: 3000,
SSLForced: false,
WebsocketSupport: true,
HSTSEnabled: false,
BlockExploits: true,
Enabled: true,
},
{
UUID: uuid.NewString(),
Name: "API Server",
DomainNames: "api.local.dev",
ForwardScheme: "http",
ForwardHost: "192.168.1.100",
ForwardPort: 8080,
SSLForced: false,
WebsocketSupport: false,
HSTSEnabled: false,
BlockExploits: true,
Enabled: true,
},
{
UUID: uuid.NewString(),
Name: "Docker Registry",
DomainNames: "docker.local.dev",
ForwardScheme: "http",
ForwardHost: "localhost",
ForwardPort: 5000,
SSLForced: false,
WebsocketSupport: false,
HSTSEnabled: false,
BlockExploits: true,
Enabled: false,
},
}
for _, host := range proxyHosts {
result := db.Where("domain_names = ?", host.DomainNames).FirstOrCreate(&host)
logEntry := logger.Log().WithField("host", util.SanitizeForLog(host.DomainNames))
logSeedResult(
logEntry,
result,
"Failed to seed proxy host",
func() {
logEntry.Infof("✓ Created proxy host: %s -> %s://%s:%d", host.DomainNames, host.ForwardScheme, host.ForwardHost, host.ForwardPort)
},
"Proxy host already exists",
)
}
// Seed Settings
settings := []models.Setting{
{
Key: "app_name",
Value: "Charon",
Type: "string",
Category: "general",
},
{
Key: "default_scheme",
Value: "http",
Type: "string",
Category: "general",
},
{
Key: "enable_ssl_by_default",
Value: "false",
Type: "bool",
Category: "security",
},
}
for _, setting := range settings {
result := db.Where("key = ?", setting.Key).FirstOrCreate(&setting)
logEntry := logger.Log().WithField("setting", setting.Key)
logSeedResult(
logEntry,
result,
"Failed to seed setting",
func() {
logEntry.Infof("✓ Created setting: %s = %s", setting.Key, setting.Value)
},
"Setting already exists",
)
}
// Seed default admin user (for future authentication)
defaultAdminEmail := os.Getenv("CHARON_DEFAULT_ADMIN_EMAIL")
if defaultAdminEmail == "" {
defaultAdminEmail = "admin@localhost"
}
defaultAdminPassword := os.Getenv("CHARON_DEFAULT_ADMIN_PASSWORD")
// If a default password is not specified, leave the hashed placeholder (non-loginable)
forceAdmin := os.Getenv("CHARON_FORCE_DEFAULT_ADMIN") == "1"
user := models.User{
UUID: uuid.NewString(),
Email: defaultAdminEmail,
Name: "Administrator",
Role: "admin",
Enabled: true,
}
// If a default password provided, use SetPassword to generate a proper bcrypt hash
if defaultAdminPassword != "" {
if err := user.SetPassword(defaultAdminPassword); err != nil {
logger.Log().WithError(err).Error("Failed to hash default admin password")
}
} else {
// Keep previous behavior: using example hashed password (not valid)
user.PasswordHash = "$2a$10$example_hashed_password"
}
var existing models.User
// Find by email first - use Take instead of First to avoid GORM's "record not found" log
result := db.Where("email = ?", user.Email).Take(&existing)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
// Not found -> create new user
createResult := db.Create(&user)
if createResult.Error != nil {
logger.Log().WithError(createResult.Error).Error("Failed to seed user")
} else if createResult.RowsAffected > 0 {
logger.Log().WithField("user", user.Email).Infof("✓ Created default user: %s", user.Email)
}
} else {
// Unexpected error
logger.Log().WithError(result.Error).Error("Failed to query for existing user")
}
} else {
// Found existing user - optionally update if forced
if forceAdmin {
existing.Email = user.Email
existing.Name = user.Name
existing.Role = user.Role
existing.Enabled = user.Enabled
if defaultAdminPassword != "" {
if err := existing.SetPassword(defaultAdminPassword); err == nil {
db.Save(&existing)
logger.Log().WithField("user", existing.Email).Infof("✓ Updated existing admin user password for: %s", existing.Email)
} else {
logger.Log().WithError(err).Error("Failed to update existing admin password")
}
} else {
db.Save(&existing)
logger.Log().WithField("user", existing.Email).Info("User already exists")
}
} else {
logger.Log().WithField("user", existing.Email).Info("User already exists")
}
}
logger.Log().Info("\n✓ Database seeding completed successfully!")
logger.Log().Info(" You can now start the application and see sample data.")
}

View File

@@ -0,0 +1,38 @@
//go:build ignore
// +build ignore
package main
import (
"os"
"path/filepath"
"testing"
)
func TestSeedMain_CreatesDatabaseFile(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd: %v", err)
}
tmp := t.TempDir()
if err := os.Chdir(tmp); err != nil {
t.Fatalf("chdir: %v", err)
}
t.Cleanup(func() { _ = os.Chdir(wd) })
if err := os.MkdirAll("data", 0o755); err != nil {
t.Fatalf("mkdir data: %v", err)
}
main()
dbPath := filepath.Join("data", "charon.db")
info, err := os.Stat(dbPath)
if err != nil {
t.Fatalf("expected db file to exist at %s: %v", dbPath, err)
}
if info.Size() == 0 {
t.Fatalf("expected db file to be non-empty")
}
}

View File

@@ -0,0 +1,201 @@
package main
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/Wikid82/charon/backend/internal/models"
"github.com/sirupsen/logrus"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func TestSeedMain_Smoke(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd: %v", err)
}
tmp := t.TempDir()
err = os.Chdir(tmp)
if err != nil {
t.Fatalf("chdir: %v", err)
}
t.Cleanup(func() { _ = os.Chdir(wd) })
// #nosec G301 -- Test data directory, 0o755 acceptable for test environment
err = os.MkdirAll("data", 0o750)
if err != nil {
t.Fatalf("mkdir data: %v", err)
}
main()
p := filepath.Join("data", "charon.db")
if _, err := os.Stat(p); err != nil {
t.Fatalf("expected db file to exist: %v", err)
}
}
func TestSeedMain_ForceAdminUpdatesExistingUserPassword(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd: %v", err)
}
tmp := t.TempDir()
err = os.Chdir(tmp)
if err != nil {
t.Fatalf("chdir: %v", err)
}
t.Cleanup(func() {
_ = os.Chdir(wd)
})
err = os.MkdirAll("data", 0o750)
if err != nil {
t.Fatalf("mkdir data: %v", err)
}
dbPath := filepath.Join("data", "charon.db")
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{})
if err != nil {
t.Fatalf("open db: %v", err)
}
if err := db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate: %v", err)
}
seeded := models.User{
UUID: "existing-user",
Email: "admin@localhost",
Name: "Old Name",
Role: models.RolePassthrough,
Enabled: false,
PasswordHash: "$2a$10$example_hashed_password",
}
if err := db.Create(&seeded).Error; err != nil {
t.Fatalf("create seeded user: %v", err)
}
t.Setenv("CHARON_FORCE_DEFAULT_ADMIN", "1")
t.Setenv("CHARON_DEFAULT_ADMIN_PASSWORD", "new-password")
main()
var updated models.User
if err := db.Where("email = ?", "admin@localhost").First(&updated).Error; err != nil {
t.Fatalf("fetch updated user: %v", err)
}
if updated.PasswordHash == "$2a$10$example_hashed_password" {
t.Fatal("expected password hash to be updated for forced admin")
}
if updated.Role != "admin" {
t.Fatalf("expected role admin, got %q", updated.Role)
}
if !updated.Enabled {
t.Fatal("expected forced admin to be enabled")
}
}
func TestSeedMain_ForceAdminWithoutPasswordUpdatesMetadata(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd: %v", err)
}
tmp := t.TempDir()
err = os.Chdir(tmp)
if err != nil {
t.Fatalf("chdir: %v", err)
}
t.Cleanup(func() {
_ = os.Chdir(wd)
})
err = os.MkdirAll("data", 0o750)
if err != nil {
t.Fatalf("mkdir data: %v", err)
}
dbPath := filepath.Join("data", "charon.db")
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{})
if err != nil {
t.Fatalf("open db: %v", err)
}
if err := db.AutoMigrate(&models.User{}); err != nil {
t.Fatalf("automigrate: %v", err)
}
seeded := models.User{
UUID: "existing-user-no-pass",
Email: "admin@localhost",
Name: "Old Name",
Role: models.RolePassthrough,
Enabled: false,
PasswordHash: "$2a$10$example_hashed_password",
}
if err := db.Create(&seeded).Error; err != nil {
t.Fatalf("create seeded user: %v", err)
}
t.Setenv("CHARON_FORCE_DEFAULT_ADMIN", "1")
t.Setenv("CHARON_DEFAULT_ADMIN_PASSWORD", "")
main()
var updated models.User
if err := db.Where("email = ?", "admin@localhost").First(&updated).Error; err != nil {
t.Fatalf("fetch updated user: %v", err)
}
if updated.Role != "admin" {
t.Fatalf("expected role admin, got %q", updated.Role)
}
if !updated.Enabled {
t.Fatal("expected forced admin to be enabled")
}
if updated.PasswordHash != "$2a$10$example_hashed_password" {
t.Fatal("expected password hash to remain unchanged when no password is provided")
}
}
func TestLogSeedResult_Branches(t *testing.T) {
entry := logrus.New().WithField("component", "seed-test")
t.Run("error branch", func(t *testing.T) {
createdCalled := false
result := &gorm.DB{Error: errors.New("insert failed")}
logSeedResult(entry, result, "error", func() {
createdCalled = true
}, "exists")
if createdCalled {
t.Fatal("created callback should not be called on error")
}
})
t.Run("created branch", func(t *testing.T) {
createdCalled := false
result := &gorm.DB{RowsAffected: 1}
logSeedResult(entry, result, "error", func() {
createdCalled = true
}, "exists")
if !createdCalled {
t.Fatal("created callback should be called when rows are affected")
}
})
t.Run("exists branch", func(t *testing.T) {
createdCalled := false
result := &gorm.DB{RowsAffected: 0}
logSeedResult(entry, result, "error", func() {
createdCalled = true
}, "exists")
if createdCalled {
t.Fatal("created callback should not be called when rows are not affected")
}
})
}