chore: git cache cleanup
This commit is contained in:
304
backend/cmd/api/main.go
Normal file
304
backend/cmd/api/main.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// Package main is the entry point for the Charon backend API.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Wikid82/charon/backend/internal/api/handlers"
|
||||
"github.com/Wikid82/charon/backend/internal/api/middleware"
|
||||
"github.com/Wikid82/charon/backend/internal/api/routes"
|
||||
"github.com/Wikid82/charon/backend/internal/caddy"
|
||||
"github.com/Wikid82/charon/backend/internal/cerberus"
|
||||
"github.com/Wikid82/charon/backend/internal/config"
|
||||
"github.com/Wikid82/charon/backend/internal/database"
|
||||
"github.com/Wikid82/charon/backend/internal/logger"
|
||||
"github.com/Wikid82/charon/backend/internal/models"
|
||||
"github.com/Wikid82/charon/backend/internal/server"
|
||||
"github.com/Wikid82/charon/backend/internal/services"
|
||||
"github.com/Wikid82/charon/backend/internal/version"
|
||||
_ "github.com/Wikid82/charon/backend/pkg/dnsprovider/builtin" // Register built-in DNS providers
|
||||
"github.com/gin-gonic/gin"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
// parsePluginSignatures reads the CHARON_PLUGIN_SIGNATURES environment variable
|
||||
// and returns the parsed signature allowlist for plugin verification.
|
||||
//
|
||||
// Modes:
|
||||
// - nil return (permissive): Env var unset/empty — all plugins allowed
|
||||
// - empty map (strict): Env var set to "{}" — no external plugins allowed
|
||||
// - populated map: Only plugins with matching signatures are allowed
|
||||
func parsePluginSignatures() map[string]string {
|
||||
envVal := os.Getenv("CHARON_PLUGIN_SIGNATURES")
|
||||
if envVal == "" {
|
||||
logger.Log().Info("Plugin signature verification: PERMISSIVE mode (CHARON_PLUGIN_SIGNATURES not set)")
|
||||
return nil
|
||||
}
|
||||
|
||||
var signatures map[string]string
|
||||
if err := json.Unmarshal([]byte(envVal), &signatures); err != nil {
|
||||
logger.Log().WithError(err).Error("Failed to parse CHARON_PLUGIN_SIGNATURES JSON — falling back to permissive mode")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate all signatures have sha256: prefix
|
||||
for name, sig := range signatures {
|
||||
if !strings.HasPrefix(sig, "sha256:") {
|
||||
logger.Log().Errorf("Invalid signature for plugin %q: must have sha256: prefix — falling back to permissive mode", name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(signatures) == 0 {
|
||||
logger.Log().Info("Plugin signature verification: STRICT mode (empty allowlist — no external plugins permitted)")
|
||||
} else {
|
||||
logger.Log().Infof("Plugin signature verification: STRICT mode (%d plugin(s) in allowlist)", len(signatures))
|
||||
}
|
||||
|
||||
return signatures
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Setup logging with rotation
|
||||
logDir := "/app/data/logs"
|
||||
// #nosec G301 -- Log directory with standard permissions
|
||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||
// Fallback to local directory if /app/data fails (e.g. local dev)
|
||||
logDir = "data/logs"
|
||||
// #nosec G301 -- Fallback log directory with standard permissions
|
||||
_ = os.MkdirAll(logDir, 0o755)
|
||||
}
|
||||
|
||||
logFile := filepath.Join(logDir, "charon.log")
|
||||
rotator := &lumberjack.Logger{
|
||||
Filename: logFile,
|
||||
MaxSize: 10, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, // days
|
||||
Compress: true,
|
||||
}
|
||||
|
||||
// Ensure legacy cpmp.log exists as symlink for compatibility (cpmp is a legacy name for Charon)
|
||||
legacyLog := filepath.Join(logDir, "cpmp.log")
|
||||
if _, err := os.Lstat(legacyLog); os.IsNotExist(err) {
|
||||
_ = os.Symlink(logFile, legacyLog) // ignore errors
|
||||
}
|
||||
|
||||
// Log to both stdout and file
|
||||
mw := io.MultiWriter(os.Stdout, rotator)
|
||||
log.SetOutput(mw)
|
||||
gin.DefaultWriter = mw
|
||||
// Initialize a basic logger so CLI and early code can log.
|
||||
logger.Init(false, mw)
|
||||
|
||||
// Handle CLI commands
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "migrate":
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
log.Fatalf("load config: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(cfg.DatabasePath)
|
||||
if err != nil {
|
||||
log.Fatalf("connect database: %v", err)
|
||||
}
|
||||
|
||||
logger.Log().Info("Running database migrations for all models...")
|
||||
if err := db.AutoMigrate(
|
||||
// Core models
|
||||
&models.ProxyHost{},
|
||||
&models.Location{},
|
||||
&models.CaddyConfig{},
|
||||
&models.RemoteServer{},
|
||||
&models.SSLCertificate{},
|
||||
&models.AccessList{},
|
||||
&models.SecurityHeaderProfile{},
|
||||
&models.User{},
|
||||
&models.Setting{},
|
||||
&models.ImportSession{},
|
||||
&models.Notification{},
|
||||
&models.NotificationProvider{},
|
||||
&models.NotificationTemplate{},
|
||||
&models.NotificationConfig{},
|
||||
&models.UptimeMonitor{},
|
||||
&models.UptimeHeartbeat{},
|
||||
&models.UptimeHost{},
|
||||
&models.UptimeNotificationEvent{},
|
||||
&models.Domain{},
|
||||
&models.UserPermittedHost{},
|
||||
// Security models
|
||||
&models.SecurityConfig{},
|
||||
&models.SecurityDecision{},
|
||||
&models.SecurityAudit{},
|
||||
&models.SecurityRuleSet{},
|
||||
&models.CrowdsecPresetEvent{},
|
||||
&models.CrowdsecConsoleEnrollment{},
|
||||
&models.EmergencyToken{}, // Phase 2: Database-backed emergency tokens
|
||||
// DNS Provider models (Issue #21)
|
||||
&models.DNSProvider{},
|
||||
&models.DNSProviderCredential{},
|
||||
// Plugin model (Phase 5)
|
||||
&models.Plugin{},
|
||||
); err != nil {
|
||||
log.Fatalf("migration failed: %v", err)
|
||||
}
|
||||
|
||||
logger.Log().Info("Migration completed successfully")
|
||||
return
|
||||
|
||||
case "reset-password":
|
||||
if len(os.Args) != 4 {
|
||||
log.Fatalf("Usage: %s reset-password <email> <new-password>", os.Args[0])
|
||||
}
|
||||
email := os.Args[2]
|
||||
newPassword := os.Args[3]
|
||||
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
log.Fatalf("load config: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(cfg.DatabasePath)
|
||||
if err != nil {
|
||||
log.Fatalf("connect database: %v", err)
|
||||
}
|
||||
|
||||
var user models.User
|
||||
if err := db.Where("email = ?", email).First(&user).Error; err != nil {
|
||||
log.Fatalf("user not found: %v", err)
|
||||
}
|
||||
|
||||
if err := user.SetPassword(newPassword); err != nil {
|
||||
log.Fatalf("failed to hash password: %v", err)
|
||||
}
|
||||
|
||||
// Unlock account if locked
|
||||
user.LockedUntil = nil
|
||||
user.FailedLoginAttempts = 0
|
||||
|
||||
if err := db.Save(&user).Error; err != nil {
|
||||
log.Fatalf("failed to save user: %v", err)
|
||||
}
|
||||
|
||||
logger.Log().Infof("Password updated successfully for user %s", email)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logger.Log().Infof("starting %s backend on version %s", version.Name, version.Full())
|
||||
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
log.Fatalf("load config: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(cfg.DatabasePath)
|
||||
if err != nil {
|
||||
log.Fatalf("connect database: %v", err)
|
||||
}
|
||||
|
||||
// Note: All database migrations are centralized in routes.Register()
|
||||
// This ensures migrations run exactly once and in the correct order.
|
||||
// DO NOT add AutoMigrate calls here - they cause "duplicate column" errors.
|
||||
|
||||
// Reconcile CrowdSec state after migrations, before HTTP server starts
|
||||
// This ensures CrowdSec is running if user preference was to have it enabled
|
||||
crowdsecBinPath := os.Getenv("CHARON_CROWDSEC_BIN")
|
||||
if crowdsecBinPath == "" {
|
||||
crowdsecBinPath = "/usr/local/bin/crowdsec"
|
||||
}
|
||||
crowdsecDataDir := os.Getenv("CHARON_CROWDSEC_DATA")
|
||||
if crowdsecDataDir == "" {
|
||||
crowdsecDataDir = "/app/data/crowdsec"
|
||||
}
|
||||
|
||||
crowdsecExec := handlers.NewDefaultCrowdsecExecutor()
|
||||
services.ReconcileCrowdSecOnStartup(db, crowdsecExec, crowdsecBinPath, crowdsecDataDir, nil)
|
||||
|
||||
// Initialize plugin loader and load external DNS provider plugins (Phase 5)
|
||||
logger.Log().Info("Initializing DNS provider plugin system...")
|
||||
pluginDir := os.Getenv("CHARON_PLUGINS_DIR")
|
||||
if pluginDir == "" {
|
||||
pluginDir = "/app/plugins"
|
||||
}
|
||||
pluginLoader := services.NewPluginLoaderService(db, pluginDir, parsePluginSignatures())
|
||||
if err := pluginLoader.LoadAllPlugins(); err != nil {
|
||||
logger.Log().WithError(err).Warn("Failed to load external DNS provider plugins")
|
||||
}
|
||||
logger.Log().Info("Plugin system initialized")
|
||||
|
||||
router := server.NewRouter(cfg.FrontendDir)
|
||||
// Initialize structured logger with same writer as stdlib log so both capture logs
|
||||
logger.Init(cfg.Debug, mw)
|
||||
// Request ID middleware must run before recovery so the recover logs include the request id
|
||||
router.Use(middleware.RequestID())
|
||||
// Log requests with request-scoped logger
|
||||
router.Use(middleware.RequestLogger())
|
||||
// Attach a recovery middleware that logs stack traces when debug is enabled
|
||||
router.Use(middleware.Recovery(cfg.Debug))
|
||||
|
||||
// Shared Caddy manager and Cerberus instance for API + emergency server
|
||||
caddyClient := caddy.NewClient(cfg.CaddyAdminAPI)
|
||||
caddyManager := caddy.NewManager(caddyClient, db, cfg.CaddyConfigDir, cfg.FrontendDir, cfg.ACMEStaging, cfg.Security)
|
||||
cerb := cerberus.New(cfg.Security, db)
|
||||
|
||||
// Pass config to routes for auth service and certificate service
|
||||
if err := routes.RegisterWithDeps(router, db, cfg, caddyManager, cerb); err != nil {
|
||||
log.Fatalf("register routes: %v", err)
|
||||
}
|
||||
|
||||
// Register import handler with config dependencies
|
||||
routes.RegisterImportHandler(router, db, cfg, cfg.CaddyBinary, cfg.ImportDir, cfg.ImportCaddyfile)
|
||||
|
||||
// Check for mounted Caddyfile on startup
|
||||
if err := handlers.CheckMountedImport(db, cfg.ImportCaddyfile, cfg.CaddyBinary, cfg.ImportDir); err != nil {
|
||||
logger.Log().WithError(err).Warn("WARNING: failed to process mounted Caddyfile")
|
||||
}
|
||||
|
||||
// Initialize emergency server (Tier 2 break glass)
|
||||
emergencyServer := server.NewEmergencyServerWithDeps(db, cfg.Emergency, caddyManager, cerb)
|
||||
if err := emergencyServer.Start(); err != nil {
|
||||
logger.Log().WithError(err).Fatal("Failed to start emergency server")
|
||||
}
|
||||
|
||||
// Setup graceful shutdown
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start main HTTP server in goroutine
|
||||
go func() {
|
||||
addr := fmt.Sprintf(":%s", cfg.HTTPPort)
|
||||
logger.Log().Infof("starting %s backend on %s", version.Name, addr)
|
||||
|
||||
if err := router.Run(addr); err != nil {
|
||||
logger.Log().WithError(err).Fatal("server error")
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for interrupt signal
|
||||
sig := <-quit
|
||||
logger.Log().Infof("Received signal %v, initiating graceful shutdown...", sig)
|
||||
|
||||
// Graceful shutdown with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Stop emergency server
|
||||
if err := emergencyServer.Stop(ctx); err != nil {
|
||||
logger.Log().WithError(err).Error("Emergency server shutdown error")
|
||||
}
|
||||
|
||||
logger.Log().Info("Server shutdown complete")
|
||||
}
|
||||
54
backend/cmd/api/main_parse_plugin_signatures_test.go
Normal file
54
backend/cmd/api/main_parse_plugin_signatures_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParsePluginSignatures(t *testing.T) {
|
||||
t.Run("unset env returns nil", func(t *testing.T) {
|
||||
t.Setenv("CHARON_PLUGIN_SIGNATURES", "")
|
||||
signatures := parsePluginSignatures()
|
||||
if signatures != nil {
|
||||
t.Fatalf("expected nil signatures when env is unset, got: %#v", signatures)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid json returns nil", func(t *testing.T) {
|
||||
t.Setenv("CHARON_PLUGIN_SIGNATURES", "{invalid}")
|
||||
signatures := parsePluginSignatures()
|
||||
if signatures != nil {
|
||||
t.Fatalf("expected nil signatures for invalid json, got: %#v", signatures)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid prefix returns nil", func(t *testing.T) {
|
||||
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{"plugin.so":"md5:deadbeef"}`)
|
||||
signatures := parsePluginSignatures()
|
||||
if signatures != nil {
|
||||
t.Fatalf("expected nil signatures for invalid prefix, got: %#v", signatures)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty allowlist returns empty map", func(t *testing.T) {
|
||||
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{}`)
|
||||
signatures := parsePluginSignatures()
|
||||
if signatures == nil {
|
||||
t.Fatal("expected non-nil empty map for strict empty allowlist")
|
||||
}
|
||||
if len(signatures) != 0 {
|
||||
t.Fatalf("expected empty map, got: %#v", signatures)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("valid allowlist returns parsed map", func(t *testing.T) {
|
||||
t.Setenv("CHARON_PLUGIN_SIGNATURES", `{"plugin-a.so":"sha256:abc123","plugin-b.so":"sha256:def456"}`)
|
||||
signatures := parsePluginSignatures()
|
||||
if signatures == nil {
|
||||
t.Fatal("expected parsed signatures map, got nil")
|
||||
}
|
||||
if got := signatures["plugin-a.so"]; got != "sha256:abc123" {
|
||||
t.Fatalf("unexpected plugin-a signature: %q", got)
|
||||
}
|
||||
if got := signatures["plugin-b.so"]; got != "sha256:def456" {
|
||||
t.Fatalf("unexpected plugin-b signature: %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
404
backend/cmd/api/main_test.go
Normal file
404
backend/cmd/api/main_test.go
Normal file
@@ -0,0 +1,404 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Wikid82/charon/backend/internal/database"
|
||||
"github.com/Wikid82/charon/backend/internal/models"
|
||||
)
|
||||
|
||||
func TestResetPasswordCommand_Succeeds(t *testing.T) {
|
||||
if os.Getenv("CHARON_TEST_RUN_MAIN") == "1" {
|
||||
// Child process: emulate CLI args and run main().
|
||||
email := os.Getenv("CHARON_TEST_EMAIL")
|
||||
newPassword := os.Getenv("CHARON_TEST_NEW_PASSWORD")
|
||||
os.Args = []string{"charon", "reset-password", email, newPassword}
|
||||
main()
|
||||
return
|
||||
}
|
||||
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
// #nosec G301 -- Test fixture directory with standard permissions
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o755); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("connect db: %v", err)
|
||||
}
|
||||
if err = db.AutoMigrate(&models.User{}); err != nil {
|
||||
t.Fatalf("automigrate: %v", err)
|
||||
}
|
||||
|
||||
email := "user@example.com"
|
||||
user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true}
|
||||
user.PasswordHash = "$2a$10$example_hashed_password"
|
||||
if err = db.Create(&user).Error; err != nil {
|
||||
t.Fatalf("seed user: %v", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], "-test.run=TestResetPasswordCommand_Succeeds") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
|
||||
cmd.Dir = tmp
|
||||
cmd.Env = append(os.Environ(),
|
||||
"CHARON_TEST_RUN_MAIN=1",
|
||||
"CHARON_TEST_EMAIL="+email,
|
||||
"CHARON_TEST_NEW_PASSWORD=new-password",
|
||||
"CHARON_DB_PATH="+dbPath,
|
||||
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
|
||||
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
|
||||
)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("expected exit 0; err=%v; output=%s", err, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateCommand_Succeeds(t *testing.T) {
|
||||
if os.Getenv("CHARON_TEST_RUN_MAIN") == "1" {
|
||||
// Child process: emulate CLI args and run main().
|
||||
os.Args = []string{"charon", "migrate"}
|
||||
main()
|
||||
return
|
||||
}
|
||||
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
// #nosec G301 -- Test fixture directory with standard permissions
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o755); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
// Create database without security tables
|
||||
db, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("connect db: %v", err)
|
||||
}
|
||||
// Only migrate User table to simulate old database
|
||||
if err = db.AutoMigrate(&models.User{}); err != nil {
|
||||
t.Fatalf("automigrate user: %v", err)
|
||||
}
|
||||
|
||||
// Verify security tables don't exist
|
||||
if db.Migrator().HasTable(&models.SecurityConfig{}) {
|
||||
t.Fatal("SecurityConfig table should not exist yet")
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], "-test.run=TestMigrateCommand_Succeeds") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
|
||||
cmd.Dir = tmp
|
||||
cmd.Env = append(os.Environ(),
|
||||
"CHARON_TEST_RUN_MAIN=1",
|
||||
"CHARON_DB_PATH="+dbPath,
|
||||
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
|
||||
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
|
||||
)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("expected exit 0; err=%v; output=%s", err, string(out))
|
||||
}
|
||||
|
||||
// Reconnect and verify security tables were created
|
||||
db2, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("reconnect db: %v", err)
|
||||
}
|
||||
|
||||
securityModels := []any{
|
||||
&models.SecurityConfig{},
|
||||
&models.SecurityDecision{},
|
||||
&models.SecurityAudit{},
|
||||
&models.SecurityRuleSet{},
|
||||
&models.CrowdsecPresetEvent{},
|
||||
&models.CrowdsecConsoleEnrollment{},
|
||||
}
|
||||
|
||||
for _, model := range securityModels {
|
||||
if !db2.Migrator().HasTable(model) {
|
||||
t.Errorf("Table for %T was not created by migrate command", model)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartupVerification_MissingTables(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
// Create database without security tables
|
||||
db, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("connect db: %v", err)
|
||||
}
|
||||
// Only migrate User table to simulate old database
|
||||
if err = db.AutoMigrate(&models.User{}); err != nil {
|
||||
t.Fatalf("automigrate user: %v", err)
|
||||
}
|
||||
|
||||
// Verify security tables don't exist
|
||||
if db.Migrator().HasTable(&models.SecurityConfig{}) {
|
||||
t.Fatal("SecurityConfig table should not exist yet")
|
||||
}
|
||||
|
||||
// Close and reopen to simulate startup scenario
|
||||
sqlDB, _ := db.DB()
|
||||
_ = sqlDB.Close()
|
||||
|
||||
db, err = database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("reconnect db: %v", err)
|
||||
}
|
||||
|
||||
// Simulate startup verification logic from main.go
|
||||
securityModels := []any{
|
||||
&models.SecurityConfig{},
|
||||
&models.SecurityDecision{},
|
||||
&models.SecurityAudit{},
|
||||
&models.SecurityRuleSet{},
|
||||
&models.CrowdsecPresetEvent{},
|
||||
&models.CrowdsecConsoleEnrollment{},
|
||||
}
|
||||
|
||||
missingTables := false
|
||||
for _, model := range securityModels {
|
||||
if !db.Migrator().HasTable(model) {
|
||||
missingTables = true
|
||||
t.Logf("Missing table for model %T", model)
|
||||
}
|
||||
}
|
||||
|
||||
if !missingTables {
|
||||
t.Fatal("Expected to find missing tables but all were present")
|
||||
}
|
||||
|
||||
// Run auto-migration (simulating startup verification logic)
|
||||
if err := db.AutoMigrate(securityModels...); err != nil {
|
||||
t.Fatalf("failed to migrate security tables: %v", err)
|
||||
}
|
||||
|
||||
// Verify all tables now exist
|
||||
for _, model := range securityModels {
|
||||
if !db.Migrator().HasTable(model) {
|
||||
t.Errorf("Table for %T was not created by auto-migration", model)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain_MigrateCommand_InProcess(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("connect db: %v", err)
|
||||
}
|
||||
if err = db.AutoMigrate(&models.User{}); err != nil {
|
||||
t.Fatalf("automigrate user: %v", err)
|
||||
}
|
||||
|
||||
originalArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = originalArgs })
|
||||
|
||||
t.Setenv("CHARON_DB_PATH", dbPath)
|
||||
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
|
||||
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
|
||||
os.Args = []string{"charon", "migrate"}
|
||||
|
||||
main()
|
||||
|
||||
db2, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("reconnect db: %v", err)
|
||||
}
|
||||
|
||||
securityModels := []any{
|
||||
&models.SecurityConfig{},
|
||||
&models.SecurityDecision{},
|
||||
&models.SecurityAudit{},
|
||||
&models.SecurityRuleSet{},
|
||||
&models.CrowdsecPresetEvent{},
|
||||
&models.CrowdsecConsoleEnrollment{},
|
||||
}
|
||||
|
||||
for _, model := range securityModels {
|
||||
if !db2.Migrator().HasTable(model) {
|
||||
t.Errorf("Table for %T was not created by migrate command", model)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain_ResetPasswordCommand_InProcess(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("connect db: %v", err)
|
||||
}
|
||||
if err = db.AutoMigrate(&models.User{}); err != nil {
|
||||
t.Fatalf("automigrate: %v", err)
|
||||
}
|
||||
|
||||
email := "user@example.com"
|
||||
user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true}
|
||||
user.PasswordHash = "$2a$10$example_hashed_password"
|
||||
user.FailedLoginAttempts = 3
|
||||
if err = db.Create(&user).Error; err != nil {
|
||||
t.Fatalf("seed user: %v", err)
|
||||
}
|
||||
|
||||
originalArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = originalArgs })
|
||||
|
||||
t.Setenv("CHARON_DB_PATH", dbPath)
|
||||
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
|
||||
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
|
||||
os.Args = []string{"charon", "reset-password", email, "new-password"}
|
||||
|
||||
main()
|
||||
|
||||
var updated models.User
|
||||
if err := db.Where("email = ?", email).First(&updated).Error; err != nil {
|
||||
t.Fatalf("fetch updated user: %v", err)
|
||||
}
|
||||
if updated.PasswordHash == "$2a$10$example_hashed_password" {
|
||||
t.Fatal("expected password hash to be updated")
|
||||
}
|
||||
if updated.FailedLoginAttempts != 0 {
|
||||
t.Fatalf("expected failed login attempts reset to 0, got %d", updated.FailedLoginAttempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain_DefaultStartupGracefulShutdown_Subprocess(t *testing.T) {
|
||||
if os.Getenv("CHARON_TEST_RUN_MAIN_SERVER") == "1" {
|
||||
os.Args = []string{"charon"}
|
||||
signalPort := os.Getenv("CHARON_TEST_SIGNAL_PORT")
|
||||
|
||||
go func() {
|
||||
if signalPort != "" {
|
||||
_ = waitForTCPReady("127.0.0.1:"+signalPort, 10*time.Second)
|
||||
}
|
||||
process, err := os.FindProcess(os.Getpid())
|
||||
if err == nil {
|
||||
_ = process.Signal(syscall.SIGTERM)
|
||||
}
|
||||
}()
|
||||
|
||||
main()
|
||||
return
|
||||
}
|
||||
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
httpPort, err := findFreeTCPPort()
|
||||
if err != nil {
|
||||
t.Fatalf("find free http port: %v", err)
|
||||
}
|
||||
err = os.MkdirAll(filepath.Dir(dbPath), 0o750)
|
||||
if err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(os.Args[0], "-test.run=TestMain_DefaultStartupGracefulShutdown_Subprocess") //nolint:gosec // G204: Test subprocess pattern using os.Args[0] is safe
|
||||
cmd.Dir = tmp
|
||||
cmd.Env = append(os.Environ(),
|
||||
"CHARON_TEST_RUN_MAIN_SERVER=1",
|
||||
"CHARON_DB_PATH="+dbPath,
|
||||
"CHARON_HTTP_PORT="+httpPort,
|
||||
"CHARON_TEST_SIGNAL_PORT="+httpPort,
|
||||
"CHARON_EMERGENCY_SERVER_ENABLED=false",
|
||||
"CHARON_CADDY_CONFIG_DIR="+filepath.Join(tmp, "caddy"),
|
||||
"CHARON_IMPORT_DIR="+filepath.Join(tmp, "imports"),
|
||||
"CHARON_IMPORT_CADDYFILE="+filepath.Join(tmp, "imports", "does-not-exist", "Caddyfile"),
|
||||
"CHARON_FRONTEND_DIR="+filepath.Join(tmp, "frontend", "dist"),
|
||||
)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("expected startup/shutdown to exit 0; err=%v; output=%s", err, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain_DefaultStartupGracefulShutdown_InProcess(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
dbPath := filepath.Join(tmp, "data", "test.db")
|
||||
httpPort, err := findFreeTCPPort()
|
||||
if err != nil {
|
||||
t.Fatalf("find free http port: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
|
||||
t.Fatalf("mkdir db dir: %v", err)
|
||||
}
|
||||
|
||||
originalArgs := os.Args
|
||||
t.Cleanup(func() { os.Args = originalArgs })
|
||||
|
||||
t.Setenv("CHARON_DB_PATH", dbPath)
|
||||
t.Setenv("CHARON_HTTP_PORT", httpPort)
|
||||
t.Setenv("CHARON_EMERGENCY_SERVER_ENABLED", "false")
|
||||
t.Setenv("CHARON_CADDY_CONFIG_DIR", filepath.Join(tmp, "caddy"))
|
||||
t.Setenv("CHARON_IMPORT_DIR", filepath.Join(tmp, "imports"))
|
||||
t.Setenv("CHARON_IMPORT_CADDYFILE", filepath.Join(tmp, "imports", "does-not-exist", "Caddyfile"))
|
||||
t.Setenv("CHARON_FRONTEND_DIR", filepath.Join(tmp, "frontend", "dist"))
|
||||
os.Args = []string{"charon"}
|
||||
|
||||
go func() {
|
||||
_ = waitForTCPReady("127.0.0.1:"+httpPort, 10*time.Second)
|
||||
process, err := os.FindProcess(os.Getpid())
|
||||
if err == nil {
|
||||
_ = process.Signal(syscall.SIGTERM)
|
||||
}
|
||||
}()
|
||||
|
||||
main()
|
||||
}
|
||||
|
||||
func findFreeTCPPort() (string, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("listen free port: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = listener.Close()
|
||||
}()
|
||||
|
||||
addr, ok := listener.Addr().(*net.TCPAddr)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected listener addr type: %T", listener.Addr())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", addr.Port), nil
|
||||
}
|
||||
|
||||
func waitForTCPReady(address string, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 100*time.Millisecond)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timed out waiting for TCP readiness at %s", address)
|
||||
}
|
||||
Reference in New Issue
Block a user