diff --git a/.docker/README.md b/.docker/README.md index ae05f2d0..c92cee89 100644 --- a/.docker/README.md +++ b/.docker/README.md @@ -95,6 +95,11 @@ Configure the application via `docker-compose.yml`: | `CHARON_HTTP_PORT` | `8080` | Port for the Web UI (`CPM_HTTP_PORT` supported for backward compatibility). | | `CHARON_DB_PATH` | `/app/data/charon.db` | Path to the SQLite database (`CPM_DB_PATH` supported for backward compatibility). | | `CHARON_CADDY_ADMIN_API` | `http://localhost:2019` | Internal URL for Caddy API (`CPM_CADDY_ADMIN_API` supported for backward compatibility). | +| `CHARON_CADDY_CONFIG_ROOT` | `/config` | Path to Caddy autosave configuration directory. | +| `CHARON_CADDY_LOG_DIR` | `/var/log/caddy` | Directory for Caddy access logs. | +| `CHARON_CROWDSEC_LOG_DIR` | `/var/log/crowdsec` | Directory for CrowdSec logs. | +| `CHARON_PLUGINS_DIR` | `/app/plugins` | Directory for DNS provider plugins. | +| `CHARON_SINGLE_CONTAINER_MODE` | `true` | Enables permission repair endpoints for single-container deployments. | ## NAS Deployment Guides diff --git a/.github/instructions/markdown.instructions.md b/.github/instructions/markdown.instructions.md index 724815d0..97b6fa8f 100644 --- a/.github/instructions/markdown.instructions.md +++ b/.github/instructions/markdown.instructions.md @@ -37,13 +37,8 @@ Ensure compliance with the following validation requirements: - **Front Matter**: Include the following fields in the YAML front matter: - `post_title`: The title of the post. - - `author1`: The primary author of the post. - - `post_slug`: The URL slug for the post. - - `microsoft_alias`: The Microsoft alias of the author. - - `featured_image`: The URL of the featured image. - `categories`: The categories for the post. These categories must be from the list in /categories.txt. - `tags`: The tags for the post. - - `ai_note`: Indicate if AI was used in the creation of the post. - `summary`: A brief summary of the post. Recommend a summary based on the content when possible. - `post_date`: The publication date of the post. diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index c26737b7..ad9e4ec0 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -870,6 +870,11 @@ CMD ["/app/charon"] | `CHARON_ENV` | Environment (production/development) | `production` | No | | `CHARON_ENCRYPTION_KEY` | 32-byte base64 key for credential encryption | Auto-generated | No | | `CHARON_EMERGENCY_TOKEN` | 64-char hex for break-glass access | None | Optional | +| `CHARON_CADDY_CONFIG_ROOT` | Caddy autosave config root | `/config` | No | +| `CHARON_CADDY_LOG_DIR` | Caddy log directory | `/var/log/caddy` | No | +| `CHARON_CROWDSEC_LOG_DIR` | CrowdSec log directory | `/var/log/crowdsec` | No | +| `CHARON_PLUGINS_DIR` | DNS provider plugin directory | `/app/plugins` | No | +| `CHARON_SINGLE_CONTAINER_MODE` | Enables permission repair endpoints | `true` | No | | `CROWDSEC_API_KEY` | CrowdSec cloud API key | None | Optional | | `SMTP_HOST` | SMTP server for notifications | None | Optional | | `SMTP_PORT` | SMTP port | `587` | Optional | diff --git a/backend/go.mod b/backend/go.mod index a177efd4..1121e4a8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,6 +1,6 @@ module github.com/Wikid82/charon/backend -go 1.26.0 +go 1.25.0 require ( github.com/containrrr/shoutrrr v0.8.0 diff --git a/backend/internal/api/handlers/additional_coverage_test.go b/backend/internal/api/handlers/additional_coverage_test.go index 1b18ddcd..a0181092 100644 --- a/backend/internal/api/handlers/additional_coverage_test.go +++ b/backend/internal/api/handlers/additional_coverage_test.go @@ -34,6 +34,7 @@ func TestImportHandler_Commit_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/commit", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -54,6 +55,7 @@ func TestImportHandler_Commit_InvalidSessionUUID(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/commit", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -76,6 +78,7 @@ func TestImportHandler_Commit_SessionNotFound(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/commit", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -351,6 +354,7 @@ func TestBackupHandler_List_DBError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) h.List(c) @@ -368,6 +372,7 @@ func TestImportHandler_UploadMulti_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -390,6 +395,7 @@ func TestImportHandler_UploadMulti_MissingCaddyfile(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -413,6 +419,7 @@ func TestImportHandler_UploadMulti_EmptyContent(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -437,6 +444,7 @@ func TestImportHandler_UploadMulti_PathTraversal(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -525,6 +533,7 @@ func TestImportHandler_Upload_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload", bytes.NewBufferString("not json")) c.Request.Header.Set("Content-Type", "application/json") @@ -545,6 +554,7 @@ func TestImportHandler_Upload_EmptyContent(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -583,6 +593,7 @@ func TestBackupHandler_List_ServiceError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("GET", "/backups", http.NoBody) h.List(c) @@ -611,6 +622,7 @@ func TestBackupHandler_Delete_PathTraversal(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "filename", Value: "../../../etc/passwd"}} c.Request = httptest.NewRequest("DELETE", "/backups/../../../etc/passwd", http.NoBody) @@ -659,6 +671,7 @@ func TestBackupHandler_Delete_InternalError2(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "filename", Value: "test.zip"}} c.Request = httptest.NewRequest("DELETE", "/backups/test.zip", http.NoBody) @@ -773,6 +786,7 @@ func TestBackupHandler_Create_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/backups", http.NoBody) h.Create(c) @@ -818,6 +832,7 @@ func TestSettingsHandler_UpdateSetting_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings/test", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -893,6 +908,7 @@ func TestImportHandler_UploadMulti_ValidCaddyfile(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -918,6 +934,7 @@ func TestImportHandler_UploadMulti_SubdirFile(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/import/upload-multi", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") diff --git a/backend/internal/api/handlers/backup_handler.go b/backend/internal/api/handlers/backup_handler.go index b7fb8b28..93e107c0 100644 --- a/backend/internal/api/handlers/backup_handler.go +++ b/backend/internal/api/handlers/backup_handler.go @@ -12,11 +12,16 @@ import ( ) type BackupHandler struct { - service *services.BackupService + service *services.BackupService + securityService *services.SecurityService } func NewBackupHandler(service *services.BackupService) *BackupHandler { - return &BackupHandler{service: service} + return NewBackupHandlerWithDeps(service, nil) +} + +func NewBackupHandlerWithDeps(service *services.BackupService, securityService *services.SecurityService) *BackupHandler { + return &BackupHandler{service: service, securityService: securityService} } func (h *BackupHandler) List(c *gin.Context) { @@ -29,9 +34,16 @@ func (h *BackupHandler) List(c *gin.Context) { } func (h *BackupHandler) Create(c *gin.Context) { + if !requireAdmin(c) { + return + } + filename, err := h.service.CreateBackup() if err != nil { middleware.GetRequestLogger(c).WithField("action", "create_backup").WithError(err).Error("Failed to create backup") + if respondPermissionError(c, h.securityService, "backup_create_failed", err, h.service.BackupDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create backup: " + err.Error()}) return } @@ -40,12 +52,19 @@ func (h *BackupHandler) Create(c *gin.Context) { } func (h *BackupHandler) Delete(c *gin.Context) { + if !requireAdmin(c) { + return + } + filename := c.Param("filename") if err := h.service.DeleteBackup(filename); err != nil { if os.IsNotExist(err) { c.JSON(http.StatusNotFound, gin.H{"error": "Backup not found"}) return } + if respondPermissionError(c, h.securityService, "backup_delete_failed", err, h.service.BackupDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete backup"}) return } @@ -70,6 +89,10 @@ func (h *BackupHandler) Download(c *gin.Context) { } func (h *BackupHandler) Restore(c *gin.Context) { + if !requireAdmin(c) { + return + } + filename := c.Param("filename") if err := h.service.RestoreBackup(filename); err != nil { // codeql[go/log-injection] Safe: User input sanitized via util.SanitizeForLog() @@ -79,6 +102,9 @@ func (h *BackupHandler) Restore(c *gin.Context) { c.JSON(http.StatusNotFound, gin.H{"error": "Backup not found"}) return } + if respondPermissionError(c, h.securityService, "backup_restore_failed", err, h.service.BackupDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to restore backup: " + err.Error()}) return } diff --git a/backend/internal/api/handlers/backup_handler_sanitize_test.go b/backend/internal/api/handlers/backup_handler_sanitize_test.go index a728eb49..2584811a 100644 --- a/backend/internal/api/handlers/backup_handler_sanitize_test.go +++ b/backend/internal/api/handlers/backup_handler_sanitize_test.go @@ -31,6 +31,8 @@ func TestBackupHandlerSanitizesFilename(t *testing.T) { // Create a gin test context and use it to call handler directly w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Set("userID", uint(1)) // Ensure request-scoped logger is present and writes to our buffer c.Set("logger", logger.WithFields(map[string]any{"test": "1"})) diff --git a/backend/internal/api/handlers/backup_handler_test.go b/backend/internal/api/handlers/backup_handler_test.go index 96e066cd..257bf177 100644 --- a/backend/internal/api/handlers/backup_handler_test.go +++ b/backend/internal/api/handlers/backup_handler_test.go @@ -47,6 +47,11 @@ func setupBackupTest(t *testing.T) (*gin.Engine, *services.BackupService, string h := NewBackupHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) api := r.Group("/api/v1") // Manually register routes since we don't have a RegisterRoutes method on the handler yet? // Wait, I didn't check if I added RegisterRoutes to BackupHandler. diff --git a/backend/internal/api/handlers/coverage_quick_test.go b/backend/internal/api/handlers/coverage_quick_test.go index 6ad3b6e0..901a7253 100644 --- a/backend/internal/api/handlers/coverage_quick_test.go +++ b/backend/internal/api/handlers/coverage_quick_test.go @@ -27,6 +27,10 @@ func TestBackupHandlerQuick(t *testing.T) { h := NewBackupHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) // register routes used r.GET("/backups", h.List) r.POST("/backups", h.Create) diff --git a/backend/internal/api/handlers/credential_handler_test.go b/backend/internal/api/handlers/credential_handler_test.go index 31fad4f1..11a2965a 100644 --- a/backend/internal/api/handlers/credential_handler_test.go +++ b/backend/internal/api/handlers/credential_handler_test.go @@ -185,6 +185,9 @@ func TestCredentialHandler_Get(t *testing.T) { created, err := credService.Create(testContext(), provider.ID, createReq) require.NoError(t, err) + // Give SQLite time to release locks + time.Sleep(10 * time.Millisecond) + url := fmt.Sprintf("/api/v1/dns-providers/%d/credentials/%d", provider.ID, created.ID) req, _ := http.NewRequest("GET", url, nil) w := httptest.NewRecorder() diff --git a/backend/internal/api/handlers/encryption_handler.go b/backend/internal/api/handlers/encryption_handler.go index e4f20ab4..d145af33 100644 --- a/backend/internal/api/handlers/encryption_handler.go +++ b/backend/internal/api/handlers/encryption_handler.go @@ -195,24 +195,6 @@ func (h *EncryptionHandler) Validate(c *gin.Context) { }) } -// isAdmin checks if the current user has admin privileges. -// This should ideally use the existing auth middleware context. -func isAdmin(c *gin.Context) bool { - // Check if user is authenticated and is admin - // Auth middleware sets "role" context key (not "user_role") - userRole, exists := c.Get("role") - if !exists { - return false - } - - role, ok := userRole.(string) - if !ok { - return false - } - - return role == "admin" -} - // getActorFromGinContext extracts the user ID from Gin context for audit logging. func getActorFromGinContext(c *gin.Context) string { // Auth middleware sets "userID" (not "user_id") diff --git a/backend/internal/api/handlers/handlers_blackbox_test.go b/backend/internal/api/handlers/handlers_blackbox_test.go index 775039c6..1ecaeacd 100644 --- a/backend/internal/api/handlers/handlers_blackbox_test.go +++ b/backend/internal/api/handlers/handlers_blackbox_test.go @@ -41,6 +41,14 @@ func setupImportTestDB(t *testing.T) *gorm.DB { return db } +func addAdminMiddleware(router *gin.Engine) { + router.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) +} + func TestImportHandler_GetStatus(t *testing.T) { gin.SetMode(gin.TestMode) db := setupImportTestDB(t) @@ -48,6 +56,8 @@ func TestImportHandler_GetStatus(t *testing.T) { // Case 1: No active session, no mount handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.DELETE("/import/cancel", handler.Cancel) session := models.ImportSession{ @@ -72,6 +82,8 @@ func TestImportHandler_Commit(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) session := models.ImportSession{ @@ -119,6 +131,8 @@ func TestImportHandler_Upload(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) payload := map[string]string{ @@ -142,6 +156,8 @@ func TestImportHandler_GetPreview_WithContent(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, "echo", tmpDir, "") router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.GET("/import/preview", handler.GetPreview) // Case: Active session with source file @@ -176,6 +192,8 @@ func TestImportHandler_Commit_Errors(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) // Case 1: Invalid JSON @@ -219,6 +237,7 @@ func TestImportHandler_Cancel_Errors(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.DELETE("/import/cancel", handler.Cancel) // Case 1: Session not found @@ -270,6 +289,7 @@ func TestImportHandler_Upload_Failure(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) payload := map[string]string{ @@ -307,6 +327,7 @@ func TestImportHandler_Upload_Conflict(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) payload := map[string]string{ @@ -343,6 +364,7 @@ func TestImportHandler_GetPreview_BackupContent(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, "echo", tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.GET("/import/preview", handler.GetPreview) // Create backup file @@ -376,6 +398,7 @@ func TestImportHandler_RegisterRoutes(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) api := router.Group("/api/v1") handler.RegisterRoutes(api) @@ -404,6 +427,7 @@ func TestImportHandler_GetPreview_TransientMount(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, mountPath) router := gin.New() + addAdminMiddleware(router) router.GET("/import/preview", handler.GetPreview) w := httptest.NewRecorder() @@ -442,6 +466,7 @@ func TestImportHandler_Commit_TransientUpload(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) router.POST("/import/commit", handler.Commit) @@ -506,6 +531,7 @@ func TestImportHandler_Commit_TransientMount(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, mountPath) router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) // Commit the mount with a random session ID (transient) @@ -547,6 +573,7 @@ func TestImportHandler_Cancel_TransientUpload(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) router.DELETE("/import/cancel", handler.Cancel) @@ -574,6 +601,7 @@ func TestImportHandler_DetectImports(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/detect-imports", handler.DetectImports) tests := []struct { @@ -636,6 +664,7 @@ func TestImportHandler_DetectImports_InvalidJSON(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/detect-imports", handler.DetectImports) // Invalid JSON @@ -658,6 +687,7 @@ func TestImportHandler_UploadMulti(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload-multi", handler.UploadMulti) t.Run("single Caddyfile", func(t *testing.T) { @@ -765,6 +795,7 @@ func TestImportHandler_Cancel_MissingSessionUUID(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.DELETE("/import/cancel", handler.Cancel) // Missing session_uuid parameter @@ -783,6 +814,7 @@ func TestImportHandler_Cancel_InvalidSessionUUID(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.DELETE("/import/cancel", handler.Cancel) // Test "." which becomes empty after filepath.Base processing @@ -801,6 +833,7 @@ func TestImportHandler_Commit_InvalidSessionUUID(t *testing.T) { db := setupImportTestDB(t) handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) // Test "." which becomes empty after filepath.Base processing @@ -888,8 +921,10 @@ func TestImportHandler_Commit_UpdateFailure(t *testing.T) { }, } - handler := handlers.NewImportHandlerWithService(db, mockSvc, "echo", "/tmp", "") + handler := handlers.NewImportHandlerWithService(db, mockSvc, "echo", "/tmp", "", nil) router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) // Request to overwrite existing.com @@ -953,6 +988,7 @@ func TestImportHandler_Commit_CreateFailure(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) // Don't provide resolution, so it defaults to create (not overwrite) @@ -994,6 +1030,7 @@ func TestUpload_NormalizationSuccess(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) // Use single-line Caddyfile format (triggers normalization) @@ -1039,6 +1076,7 @@ func TestUpload_NormalizationFallback(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload", handler.Upload) // Valid Caddyfile that would parse successfully (even if normalization fails) @@ -1107,6 +1145,7 @@ func TestCommit_OverwriteAction(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) payload := map[string]any{ @@ -1176,6 +1215,7 @@ func TestCommit_RenameAction(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) payload := map[string]any{ @@ -1241,6 +1281,7 @@ func TestGetPreview_WithConflictDetails(t *testing.T) { handler := handlers.NewImportHandler(db, fakeCaddy, tmpDir, mountPath) router := gin.New() + addAdminMiddleware(router) router.GET("/import/preview", handler.GetPreview) w := httptest.NewRecorder() @@ -1274,6 +1315,7 @@ func TestSafeJoin_PathTraversalCases(t *testing.T) { tmpDir := t.TempDir() handler := handlers.NewImportHandler(db, "echo", tmpDir, "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/upload-multi", handler.UploadMulti) tests := []struct { @@ -1360,6 +1402,7 @@ func TestCommit_SkipAction(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) payload := map[string]any{ @@ -1411,6 +1454,7 @@ func TestCommit_CustomNames(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", "/tmp", "") router := gin.New() + addAdminMiddleware(router) router.POST("/import/commit", handler.Commit) payload := map[string]any{ @@ -1460,6 +1504,7 @@ func TestGetStatus_AlreadyCommittedMount(t *testing.T) { handler := handlers.NewImportHandler(db, "echo", tmpDir, mountPath) router := gin.New() + addAdminMiddleware(router) router.GET("/import/status", handler.GetStatus) w := httptest.NewRecorder() @@ -1493,8 +1538,10 @@ func TestImportHandler_Commit_SessionSaveWarning(t *testing.T) { createFunc: func(h *models.ProxyHost) error { h.ID = 1; return nil }, } - h := handlers.NewImportHandlerWithService(db, mockSvc, "echo", "/tmp", "") + h := handlers.NewImportHandlerWithService(db, mockSvc, "echo", "/tmp", "", nil) router := gin.New() + addAdminMiddleware(router) + addAdminMiddleware(router) router.POST("/import/commit", h.Commit) // Inject a GORM callback to force an error when updating ImportSession (simulates non-fatal save warning) @@ -1555,6 +1602,8 @@ func TestGetStatus_DatabaseError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Set("userID", uint(1)) c.Request = httptest.NewRequest("GET", "/api/v1/import/status", nil) handler.GetStatus(c) @@ -1587,6 +1636,8 @@ func TestGetPreview_MountAlreadyCommitted(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Set("userID", uint(1)) c.Request = httptest.NewRequest("GET", "/api/v1/import/preview", nil) handler.GetPreview(c) @@ -1611,6 +1662,8 @@ func TestUpload_MkdirAllFailure(t *testing.T) { reqBody := `{"content": "test.local { reverse_proxy localhost:8080 }", "filename": "test.caddy"}` w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Set("userID", uint(1)) c.Request = httptest.NewRequest("POST", "/api/v1/import/upload", strings.NewReader(reqBody)) c.Request.Header.Set("Content-Type", "application/json") diff --git a/backend/internal/api/handlers/import_handler.go b/backend/internal/api/handlers/import_handler.go index 558bc831..af233532 100644 --- a/backend/internal/api/handlers/import_handler.go +++ b/backend/internal/api/handlers/import_handler.go @@ -48,28 +48,35 @@ type ImportHandler struct { importerservice ImporterService importDir string mountPath string + securityService *services.SecurityService } // NewImportHandler creates a new import handler. func NewImportHandler(db *gorm.DB, caddyBinary, importDir, mountPath string) *ImportHandler { + return NewImportHandlerWithDeps(db, caddyBinary, importDir, mountPath, nil) +} + +func NewImportHandlerWithDeps(db *gorm.DB, caddyBinary, importDir, mountPath string, securityService *services.SecurityService) *ImportHandler { return &ImportHandler{ db: db, proxyHostSvc: services.NewProxyHostService(db), importerservice: caddy.NewImporter(caddyBinary), importDir: importDir, mountPath: mountPath, + securityService: securityService, } } // NewImportHandlerWithService creates an import handler with a custom ProxyHostService. // This is primarily used for testing with mock services. -func NewImportHandlerWithService(db *gorm.DB, proxyHostSvc ProxyHostServiceInterface, caddyBinary, importDir, mountPath string) *ImportHandler { +func NewImportHandlerWithService(db *gorm.DB, proxyHostSvc ProxyHostServiceInterface, caddyBinary, importDir, mountPath string, securityService *services.SecurityService) *ImportHandler { return &ImportHandler{ db: db, proxyHostSvc: proxyHostSvc, importerservice: caddy.NewImporter(caddyBinary), importDir: importDir, mountPath: mountPath, + securityService: securityService, } } @@ -273,6 +280,10 @@ func (h *ImportHandler) GetPreview(c *gin.Context) { // Upload handles manual Caddyfile upload or paste. func (h *ImportHandler) Upload(c *gin.Context) { + if !requireAdmin(c) { + return + } + var req struct { Content string `json:"content" binding:"required"` Filename string `json:"filename"` @@ -311,6 +322,9 @@ func (h *ImportHandler) Upload(c *gin.Context) { } // #nosec G301 -- Import uploads directory needs group readability for processing if mkdirErr := os.MkdirAll(uploadsDir, 0o755); mkdirErr != nil { + if respondPermissionError(c, h.securityService, "import_upload_failed", mkdirErr, h.importDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create uploads directory"}) return } @@ -322,6 +336,9 @@ func (h *ImportHandler) Upload(c *gin.Context) { // #nosec G306 -- Caddyfile uploads need group readability for Caddy validation if writeErr := os.WriteFile(tempPath, []byte(normalizedContent), 0o644); writeErr != nil { middleware.GetRequestLogger(c).WithField("tempPath", util.SanitizeForLog(filepath.Base(tempPath))).WithError(writeErr).Error("Import Upload: failed to write temp file") + if respondPermissionError(c, h.securityService, "import_upload_failed", writeErr, h.importDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to write upload"}) return } @@ -435,6 +452,9 @@ func (h *ImportHandler) Upload(c *gin.Context) { } if err := h.db.Create(&session).Error; err != nil { middleware.GetRequestLogger(c).WithError(err).Warn("Import Upload: failed to persist session") + if respondPermissionError(c, h.securityService, "import_upload_failed", err, h.importDir) { + return + } } c.JSON(http.StatusOK, gin.H{ @@ -470,6 +490,10 @@ func (h *ImportHandler) DetectImports(c *gin.Context) { // UploadMulti handles upload of main Caddyfile + multiple site files. func (h *ImportHandler) UploadMulti(c *gin.Context) { + if !requireAdmin(c) { + return + } + var req struct { Files []struct { Filename string `json:"filename" binding:"required"` @@ -504,6 +528,9 @@ func (h *ImportHandler) UploadMulti(c *gin.Context) { } // #nosec G301 -- Session directory with standard permissions for import processing if mkdirErr := os.MkdirAll(sessionDir, 0o755); mkdirErr != nil { + if respondPermissionError(c, h.securityService, "import_upload_failed", mkdirErr, h.importDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create session directory"}) return } @@ -528,6 +555,9 @@ func (h *ImportHandler) UploadMulti(c *gin.Context) { if dir := filepath.Dir(targetPath); dir != sessionDir { // #nosec G301 -- Subdirectory within validated session directory if mkdirErr := os.MkdirAll(dir, 0o755); mkdirErr != nil { + if respondPermissionError(c, h.securityService, "import_upload_failed", mkdirErr, h.importDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to create directory for %s", f.Filename)}) return } @@ -535,6 +565,9 @@ func (h *ImportHandler) UploadMulti(c *gin.Context) { // #nosec G306 -- Imported Caddyfile needs to be readable for processing if writeErr := os.WriteFile(targetPath, []byte(f.Content), 0o644); writeErr != nil { + if respondPermissionError(c, h.securityService, "import_upload_failed", writeErr, h.importDir) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to write file %s", f.Filename)}) return } @@ -663,6 +696,9 @@ func (h *ImportHandler) UploadMulti(c *gin.Context) { } if err := h.db.Create(&session).Error; err != nil { middleware.GetRequestLogger(c).WithError(err).Warn("Import UploadMulti: failed to persist session") + if respondPermissionError(c, h.securityService, "import_upload_failed", err, h.importDir) { + return + } } c.JSON(http.StatusOK, gin.H{ @@ -764,6 +800,10 @@ func safeJoin(baseDir, userPath string) (string, error) { // Commit finalizes the import with user's conflict resolutions. func (h *ImportHandler) Commit(c *gin.Context) { + if !requireAdmin(c) { + return + } + var req struct { SessionUUID string `json:"session_uuid" binding:"required"` Resolutions map[string]string `json:"resolutions"` // domain -> action (keep/skip, overwrite, rename) @@ -784,7 +824,7 @@ func (h *ImportHandler) Commit(c *gin.Context) { return } var result *caddy.ImportResult - if err := h.db.Where("uuid = ? AND status = ?", sid, "reviewing").First(&session).Error; err == nil { + if err := h.db.Where("uuid = ? AND status IN ?", sid, []string{"reviewing", "pending"}).First(&session).Error; err == nil { // DB session found if err := json.Unmarshal([]byte(session.ParsedData), &result); err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to parse import data"}) @@ -910,6 +950,9 @@ func (h *ImportHandler) Commit(c *gin.Context) { } if err := h.db.Save(&session).Error; err != nil { middleware.GetRequestLogger(c).WithError(err).Warn("Warning: failed to save import session") + if respondPermissionError(c, h.securityService, "import_commit_failed", err, h.importDir) { + return + } } c.JSON(http.StatusOK, gin.H{ @@ -922,6 +965,10 @@ func (h *ImportHandler) Commit(c *gin.Context) { // Cancel discards a pending import session. func (h *ImportHandler) Cancel(c *gin.Context) { + if !requireAdmin(c) { + return + } + sessionUUID := c.Query("session_uuid") if sessionUUID == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session_uuid required"}) @@ -937,7 +984,11 @@ func (h *ImportHandler) Cancel(c *gin.Context) { var session models.ImportSession if err := h.db.Where("uuid = ?", sid).First(&session).Error; err == nil { session.Status = "rejected" - h.db.Save(&session) + if saveErr := h.db.Save(&session).Error; saveErr != nil { + if respondPermissionError(c, h.securityService, "import_cancel_failed", saveErr, h.importDir) { + return + } + } c.JSON(http.StatusOK, gin.H{"message": "import cancelled"}) return } @@ -948,6 +999,9 @@ func (h *ImportHandler) Cancel(c *gin.Context) { if _, err := os.Stat(uploadsPath); err == nil { if err := os.Remove(uploadsPath); err != nil { logger.Log().WithError(err).Warn("Failed to remove upload file") + if respondPermissionError(c, h.securityService, "import_cancel_failed", err, h.importDir) { + return + } } c.JSON(http.StatusOK, gin.H{"message": "transient upload cancelled"}) return diff --git a/backend/internal/api/handlers/import_handler_coverage_test.go b/backend/internal/api/handlers/import_handler_coverage_test.go index 1a6ebe24..b59a7783 100644 --- a/backend/internal/api/handlers/import_handler_coverage_test.go +++ b/backend/internal/api/handlers/import_handler_coverage_test.go @@ -72,6 +72,10 @@ func TestUploadMulti_EmptyList(t *testing.T) { w := httptest.NewRecorder() _, r := gin.CreateTestContext(w) + r.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) r.POST("/upload-multi", h.UploadMulti) // Create JSON with empty files list @@ -116,6 +120,10 @@ func TestUploadMulti_FileServerDetected(t *testing.T) { w := httptest.NewRecorder() _, r := gin.CreateTestContext(w) + r.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) r.POST("/upload-multi", h.UploadMulti) req := map[string]interface{}{ @@ -155,6 +163,10 @@ func TestUploadMulti_NoSitesParsed(t *testing.T) { w := httptest.NewRecorder() _, r := gin.CreateTestContext(w) + r.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) r.POST("/upload-multi", h.UploadMulti) req := map[string]interface{}{ diff --git a/backend/internal/api/handlers/import_handler_sanitize_test.go b/backend/internal/api/handlers/import_handler_sanitize_test.go index 993606f8..8609f029 100644 --- a/backend/internal/api/handlers/import_handler_sanitize_test.go +++ b/backend/internal/api/handlers/import_handler_sanitize_test.go @@ -28,6 +28,10 @@ func TestImportUploadSanitizesFilename(t *testing.T) { router := gin.New() router.Use(middleware.RequestID()) + router.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) router.POST("/import/upload", svc.Upload) buf := &bytes.Buffer{} diff --git a/backend/internal/api/handlers/import_handler_test.go b/backend/internal/api/handlers/import_handler_test.go index 1c3d6092..c125cca1 100644 --- a/backend/internal/api/handlers/import_handler_test.go +++ b/backend/internal/api/handlers/import_handler_test.go @@ -106,6 +106,13 @@ func setupTestHandler(t *testing.T, db *gorm.DB) (*ImportHandler, *mockProxyHost return handler, mockSvc, mockImport } +func addAdminMiddleware(router *gin.Engine) { + router.Use(func(c *gin.Context) { + setAdminContext(c) + c.Next() + }) +} + // TestUpload_NormalizationSuccess verifies single-line Caddyfile formatting func TestUpload_NormalizationSuccess(t *testing.T) { testutil.WithTx(t, setupImportTestDB(t), func(tx *gorm.DB) { @@ -142,6 +149,7 @@ func TestUpload_NormalizationSuccess(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -190,6 +198,7 @@ func TestUpload_NormalizationFailure(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -230,6 +239,7 @@ func TestUpload_PathTraversalBlocked(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -270,6 +280,7 @@ func TestUploadMulti_ArchiveExtraction(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -315,6 +326,7 @@ func TestUploadMulti_ConflictDetection(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -353,6 +365,7 @@ func TestCommit_TransientToImport(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -397,6 +410,7 @@ func TestCommit_RollbackOnError(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -429,6 +443,7 @@ func TestDetectImports_EmptyCaddyfile(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -573,6 +588,7 @@ func TestImportHandler_Upload_NullByteInjection(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -599,6 +615,7 @@ func TestImportHandler_DetectImports_MalformedFile(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) @@ -744,6 +761,7 @@ func TestImportHandler_Upload_InvalidSessionPaths(t *testing.T) { gin.SetMode(gin.TestMode) router := gin.New() + addAdminMiddleware(router) handler.RegisterRoutes(router.Group("/api/v1")) router.ServeHTTP(w, req) diff --git a/backend/internal/api/handlers/logs_handler_test.go b/backend/internal/api/handlers/logs_handler_test.go index a3fba55e..90872944 100644 --- a/backend/internal/api/handlers/logs_handler_test.go +++ b/backend/internal/api/handlers/logs_handler_test.go @@ -80,17 +80,22 @@ func TestLogsLifecycle(t *testing.T) { var logs []services.LogFile err := json.Unmarshal(resp.Body.Bytes(), &logs) require.NoError(t, err) - require.Len(t, logs, 2) // access.log and cpmp.log + require.GreaterOrEqual(t, len(logs), 2) - // Verify content of one log file - found := false + hasAccess := false + hasCharon := false for _, l := range logs { if l.Name == "access.log" { - found = true + hasAccess = true + require.Greater(t, l.Size, int64(0)) + } + if l.Name == "charon.log" { + hasCharon = true require.Greater(t, l.Size, int64(0)) } } - require.True(t, found) + require.True(t, hasAccess) + require.True(t, hasCharon) // 2. Read log req = httptest.NewRequest(http.MethodGet, "/api/v1/logs/access.log?limit=2", http.NoBody) diff --git a/backend/internal/api/handlers/notification_coverage_test.go b/backend/internal/api/handlers/notification_coverage_test.go index 063b5c6f..820feb63 100644 --- a/backend/internal/api/handlers/notification_coverage_test.go +++ b/backend/internal/api/handlers/notification_coverage_test.go @@ -23,6 +23,11 @@ func setupNotificationCoverageDB(t *testing.T) *gorm.DB { return db } +func setAdminContext(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) +} + // Notification Handler Tests func TestNotificationHandler_List_Error(t *testing.T) { @@ -36,6 +41,9 @@ func TestNotificationHandler_List_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) + setAdminContext(c) + setAdminContext(c) c.Request = httptest.NewRequest("GET", "/notifications", http.NoBody) h.List(c) @@ -56,6 +64,7 @@ func TestNotificationHandler_List_UnreadOnly(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("GET", "/notifications?unread=true", http.NoBody) h.List(c) @@ -74,6 +83,7 @@ func TestNotificationHandler_MarkAsRead_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} h.MarkAsRead(c) @@ -93,6 +103,7 @@ func TestNotificationHandler_MarkAllAsRead_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) h.MarkAllAsRead(c) @@ -113,6 +124,7 @@ func TestNotificationProviderHandler_List_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) h.List(c) @@ -128,6 +140,7 @@ func TestNotificationProviderHandler_Create_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers", bytes.NewBufferString("invalid json")) c.Request.Header.Set("Content-Type", "application/json") @@ -155,6 +168,7 @@ func TestNotificationProviderHandler_Create_DBError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -180,6 +194,7 @@ func TestNotificationProviderHandler_Create_InvalidTemplate(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -196,6 +211,7 @@ func TestNotificationProviderHandler_Update_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} c.Request = httptest.NewRequest("PUT", "/providers/test-id", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -227,6 +243,7 @@ func TestNotificationProviderHandler_Update_InvalidTemplate(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: provider.ID}} c.Request = httptest.NewRequest("PUT", "/providers/"+provider.ID, bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -255,6 +272,7 @@ func TestNotificationProviderHandler_Update_DBError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} c.Request = httptest.NewRequest("PUT", "/providers/test-id", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -275,6 +293,7 @@ func TestNotificationProviderHandler_Delete_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} h.Delete(c) @@ -291,6 +310,7 @@ func TestNotificationProviderHandler_Test_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers/test", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -307,6 +327,7 @@ func TestNotificationProviderHandler_Templates(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) h.Templates(c) @@ -324,6 +345,7 @@ func TestNotificationProviderHandler_Preview_InvalidJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers/preview", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -349,6 +371,7 @@ func TestNotificationProviderHandler_Preview_WithData(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers/preview", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -371,6 +394,7 @@ func TestNotificationProviderHandler_Preview_InvalidTemplate(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/providers/preview", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -392,6 +416,7 @@ func TestNotificationTemplateHandler_List_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) h.List(c) @@ -407,6 +432,7 @@ func TestNotificationTemplateHandler_Create_BadJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -432,6 +458,7 @@ func TestNotificationTemplateHandler_Create_DBError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -448,6 +475,7 @@ func TestNotificationTemplateHandler_Update_BadJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} c.Request = httptest.NewRequest("PUT", "/templates/test-id", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -474,6 +502,7 @@ func TestNotificationTemplateHandler_Update_DBError(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} c.Request = httptest.NewRequest("PUT", "/templates/test-id", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -494,6 +523,7 @@ func TestNotificationTemplateHandler_Delete_Error(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Params = gin.Params{{Key: "id", Value: "test-id"}} h.Delete(c) @@ -510,6 +540,7 @@ func TestNotificationTemplateHandler_Preview_BadJSON(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates/preview", bytes.NewBufferString("invalid")) c.Request.Header.Set("Content-Type", "application/json") @@ -531,6 +562,7 @@ func TestNotificationTemplateHandler_Preview_TemplateNotFound(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates/preview", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -563,6 +595,7 @@ func TestNotificationTemplateHandler_Preview_WithStoredTemplate(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates/preview", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -584,6 +617,7 @@ func TestNotificationTemplateHandler_Preview_InvalidTemplate(t *testing.T) { w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("POST", "/templates/preview", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") diff --git a/backend/internal/api/handlers/notification_provider_handler.go b/backend/internal/api/handlers/notification_provider_handler.go index 783f2f3f..c39929cb 100644 --- a/backend/internal/api/handlers/notification_provider_handler.go +++ b/backend/internal/api/handlers/notification_provider_handler.go @@ -13,11 +13,17 @@ import ( ) type NotificationProviderHandler struct { - service *services.NotificationService + service *services.NotificationService + securityService *services.SecurityService + dataRoot string } func NewNotificationProviderHandler(service *services.NotificationService) *NotificationProviderHandler { - return &NotificationProviderHandler{service: service} + return NewNotificationProviderHandlerWithDeps(service, nil, "") +} + +func NewNotificationProviderHandlerWithDeps(service *services.NotificationService, securityService *services.SecurityService, dataRoot string) *NotificationProviderHandler { + return &NotificationProviderHandler{service: service, securityService: securityService, dataRoot: dataRoot} } func (h *NotificationProviderHandler) List(c *gin.Context) { @@ -30,6 +36,10 @@ func (h *NotificationProviderHandler) List(c *gin.Context) { } func (h *NotificationProviderHandler) Create(c *gin.Context) { + if !requireAdmin(c) { + return + } + var provider models.NotificationProvider if err := c.ShouldBindJSON(&provider); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) @@ -42,6 +52,9 @@ func (h *NotificationProviderHandler) Create(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } + if respondPermissionError(c, h.securityService, "notification_provider_save_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create provider"}) return } @@ -49,6 +62,10 @@ func (h *NotificationProviderHandler) Create(c *gin.Context) { } func (h *NotificationProviderHandler) Update(c *gin.Context) { + if !requireAdmin(c) { + return + } + id := c.Param("id") var provider models.NotificationProvider if err := c.ShouldBindJSON(&provider); err != nil { @@ -62,6 +79,9 @@ func (h *NotificationProviderHandler) Update(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } + if respondPermissionError(c, h.securityService, "notification_provider_save_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update provider"}) return } @@ -69,8 +89,15 @@ func (h *NotificationProviderHandler) Update(c *gin.Context) { } func (h *NotificationProviderHandler) Delete(c *gin.Context) { + if !requireAdmin(c) { + return + } + id := c.Param("id") if err := h.service.DeleteProvider(id); err != nil { + if respondPermissionError(c, h.securityService, "notification_provider_delete_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete provider"}) return } diff --git a/backend/internal/api/handlers/notification_provider_handler_test.go b/backend/internal/api/handlers/notification_provider_handler_test.go index 2469d339..77a1f950 100644 --- a/backend/internal/api/handlers/notification_provider_handler_test.go +++ b/backend/internal/api/handlers/notification_provider_handler_test.go @@ -26,6 +26,11 @@ func setupNotificationProviderTest(t *testing.T) (*gin.Engine, *gorm.DB) { handler := handlers.NewNotificationProviderHandler(service) r := gin.Default() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) api := r.Group("/api/v1") providers := api.Group("/notifications/providers") providers.GET("", handler.List) diff --git a/backend/internal/api/handlers/notification_template_handler.go b/backend/internal/api/handlers/notification_template_handler.go index 65c1847e..04cc3f22 100644 --- a/backend/internal/api/handlers/notification_template_handler.go +++ b/backend/internal/api/handlers/notification_template_handler.go @@ -9,11 +9,17 @@ import ( ) type NotificationTemplateHandler struct { - service *services.NotificationService + service *services.NotificationService + securityService *services.SecurityService + dataRoot string } func NewNotificationTemplateHandler(s *services.NotificationService) *NotificationTemplateHandler { - return &NotificationTemplateHandler{service: s} + return NewNotificationTemplateHandlerWithDeps(s, nil, "") +} + +func NewNotificationTemplateHandlerWithDeps(s *services.NotificationService, securityService *services.SecurityService, dataRoot string) *NotificationTemplateHandler { + return &NotificationTemplateHandler{service: s, securityService: securityService, dataRoot: dataRoot} } func (h *NotificationTemplateHandler) List(c *gin.Context) { @@ -26,12 +32,19 @@ func (h *NotificationTemplateHandler) List(c *gin.Context) { } func (h *NotificationTemplateHandler) Create(c *gin.Context) { + if !requireAdmin(c) { + return + } + var t models.NotificationTemplate if err := c.ShouldBindJSON(&t); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } if err := h.service.CreateTemplate(&t); err != nil { + if respondPermissionError(c, h.securityService, "notification_template_save_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create template"}) return } @@ -39,6 +52,10 @@ func (h *NotificationTemplateHandler) Create(c *gin.Context) { } func (h *NotificationTemplateHandler) Update(c *gin.Context) { + if !requireAdmin(c) { + return + } + id := c.Param("id") var t models.NotificationTemplate if err := c.ShouldBindJSON(&t); err != nil { @@ -47,6 +64,9 @@ func (h *NotificationTemplateHandler) Update(c *gin.Context) { } t.ID = id if err := h.service.UpdateTemplate(&t); err != nil { + if respondPermissionError(c, h.securityService, "notification_template_save_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update template"}) return } @@ -54,8 +74,15 @@ func (h *NotificationTemplateHandler) Update(c *gin.Context) { } func (h *NotificationTemplateHandler) Delete(c *gin.Context) { + if !requireAdmin(c) { + return + } + id := c.Param("id") if err := h.service.DeleteTemplate(id); err != nil { + if respondPermissionError(c, h.securityService, "notification_template_delete_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete template"}) return } diff --git a/backend/internal/api/handlers/notification_template_handler_test.go b/backend/internal/api/handlers/notification_template_handler_test.go index 31fcdc25..acdfa469 100644 --- a/backend/internal/api/handlers/notification_template_handler_test.go +++ b/backend/internal/api/handlers/notification_template_handler_test.go @@ -26,6 +26,11 @@ func TestNotificationTemplateHandler_CRUDAndPreview(t *testing.T) { h := NewNotificationTemplateHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) api := r.Group("/api/v1") api.GET("/notifications/templates", h.List) api.POST("/notifications/templates", h.Create) @@ -89,6 +94,11 @@ func TestNotificationTemplateHandler_Create_InvalidJSON(t *testing.T) { svc := services.NewNotificationService(db) h := NewNotificationTemplateHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) r.POST("/api/templates", h.Create) req := httptest.NewRequest(http.MethodPost, "/api/templates", strings.NewReader(`{invalid}`)) @@ -105,6 +115,11 @@ func TestNotificationTemplateHandler_Update_InvalidJSON(t *testing.T) { svc := services.NewNotificationService(db) h := NewNotificationTemplateHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) r.PUT("/api/templates/:id", h.Update) req := httptest.NewRequest(http.MethodPut, "/api/templates/test-id", strings.NewReader(`{invalid}`)) @@ -121,6 +136,11 @@ func TestNotificationTemplateHandler_Preview_InvalidJSON(t *testing.T) { svc := services.NewNotificationService(db) h := NewNotificationTemplateHandler(svc) r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) r.POST("/api/templates/preview", h.Preview) req := httptest.NewRequest(http.MethodPost, "/api/templates/preview", strings.NewReader(`{invalid}`)) diff --git a/backend/internal/api/handlers/permission_helpers.go b/backend/internal/api/handlers/permission_helpers.go new file mode 100644 index 00000000..6a10a353 --- /dev/null +++ b/backend/internal/api/handlers/permission_helpers.go @@ -0,0 +1,110 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + + "github.com/gin-gonic/gin" + + "github.com/Wikid82/charon/backend/internal/models" + "github.com/Wikid82/charon/backend/internal/services" + "github.com/Wikid82/charon/backend/internal/util" +) + +func requireAdmin(c *gin.Context) bool { + if isAdmin(c) { + return true + } + c.JSON(http.StatusForbidden, gin.H{ + "error": "admin privileges required", + "error_code": "permissions_admin_only", + }) + return false +} + +func isAdmin(c *gin.Context) bool { + role, _ := c.Get("role") + roleStr, _ := role.(string) + return roleStr == "admin" +} + +func respondPermissionError(c *gin.Context, securityService *services.SecurityService, action string, err error, path string) bool { + code, ok := util.MapSaveErrorCode(err) + if !ok { + return false + } + + admin := isAdmin(c) + response := gin.H{ + "error": permissionErrorMessage(code), + "error_code": code, + } + + if admin { + if path != "" { + response["path"] = path + } + response["help"] = buildPermissionHelp(path) + } else { + response["help"] = "Check volume permissions or contact an administrator." + } + + logPermissionAudit(securityService, c, action, code, path, admin) + c.JSON(http.StatusInternalServerError, response) + return true +} + +func permissionErrorMessage(code string) string { + switch code { + case "permissions_db_readonly": + return "database is read-only" + case "permissions_db_locked": + return "database is locked" + case "permissions_readonly": + return "filesystem is read-only" + case "permissions_write_denied": + return "permission denied" + default: + return "permission error" + } +} + +func buildPermissionHelp(path string) string { + uid := os.Geteuid() + gid := os.Getegid() + if path == "" { + return fmt.Sprintf("chown -R %d:%d ", uid, gid) + } + return fmt.Sprintf("chown -R %d:%d %s", uid, gid, path) +} + +func logPermissionAudit(securityService *services.SecurityService, c *gin.Context, action, code, path string, admin bool) { + if securityService == nil { + return + } + + details := map[string]any{ + "error_code": code, + "admin": admin, + } + if admin && path != "" { + details["path"] = path + } + detailsJSON, _ := json.Marshal(details) + + actor := "unknown" + if userID, ok := c.Get("userID"); ok { + actor = fmt.Sprintf("%v", userID) + } + + _ = securityService.LogAudit(&models.SecurityAudit{ + Actor: actor, + Action: action, + EventCategory: "permissions", + Details: string(detailsJSON), + IPAddress: c.ClientIP(), + UserAgent: c.Request.UserAgent(), + }) +} diff --git a/backend/internal/api/handlers/security_notifications.go b/backend/internal/api/handlers/security_notifications.go index 99d7acd7..2467f2f5 100644 --- a/backend/internal/api/handlers/security_notifications.go +++ b/backend/internal/api/handlers/security_notifications.go @@ -3,11 +3,14 @@ package handlers import ( "fmt" "net/http" + "net/mail" + "strings" "github.com/gin-gonic/gin" "github.com/Wikid82/charon/backend/internal/models" "github.com/Wikid82/charon/backend/internal/security" + "github.com/Wikid82/charon/backend/internal/services" ) // SecurityNotificationServiceInterface defines the interface for security notification service. @@ -18,12 +21,18 @@ type SecurityNotificationServiceInterface interface { // SecurityNotificationHandler handles notification settings endpoints. type SecurityNotificationHandler struct { - service SecurityNotificationServiceInterface + service SecurityNotificationServiceInterface + securityService *services.SecurityService + dataRoot string } // NewSecurityNotificationHandler creates a new handler instance. func NewSecurityNotificationHandler(service SecurityNotificationServiceInterface) *SecurityNotificationHandler { - return &SecurityNotificationHandler{service: service} + return NewSecurityNotificationHandlerWithDeps(service, nil, "") +} + +func NewSecurityNotificationHandlerWithDeps(service SecurityNotificationServiceInterface, securityService *services.SecurityService, dataRoot string) *SecurityNotificationHandler { + return &SecurityNotificationHandler{service: service, securityService: securityService, dataRoot: dataRoot} } // GetSettings retrieves the current notification settings. @@ -38,6 +47,10 @@ func (h *SecurityNotificationHandler) GetSettings(c *gin.Context) { // UpdateSettings updates the notification settings. func (h *SecurityNotificationHandler) UpdateSettings(c *gin.Context) { + if !requireAdmin(c) { + return + } + var config models.NotificationConfig if err := c.ShouldBindJSON(&config); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) @@ -66,10 +79,48 @@ func (h *SecurityNotificationHandler) UpdateSettings(c *gin.Context) { } } + if normalized, err := normalizeEmailRecipients(config.EmailRecipients); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } else { + config.EmailRecipients = normalized + } + if err := h.service.UpdateSettings(&config); err != nil { + if respondPermissionError(c, h.securityService, "security_notifications_save_failed", err, h.dataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update settings"}) return } c.JSON(http.StatusOK, gin.H{"message": "Settings updated successfully"}) } + +func normalizeEmailRecipients(input string) (string, error) { + trimmed := strings.TrimSpace(input) + if trimmed == "" { + return "", nil + } + + parts := strings.Split(trimmed, ",") + valid := make([]string, 0, len(parts)) + invalid := make([]string, 0) + for _, part := range parts { + candidate := strings.TrimSpace(part) + if candidate == "" { + continue + } + if _, err := mail.ParseAddress(candidate); err != nil { + invalid = append(invalid, candidate) + continue + } + valid = append(valid, candidate) + } + + if len(invalid) > 0 { + return "", fmt.Errorf("invalid email recipients: %s", strings.Join(invalid, ", ")) + } + + return strings.Join(valid, ", "), nil +} diff --git a/backend/internal/api/handlers/security_notifications_test.go b/backend/internal/api/handlers/security_notifications_test.go index 70602c07..269c6d95 100644 --- a/backend/internal/api/handlers/security_notifications_test.go +++ b/backend/internal/api/handlers/security_notifications_test.go @@ -137,6 +137,7 @@ func TestSecurityNotificationHandler_UpdateSettings_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(malformedJSON)) c.Request.Header.Set("Content-Type", "application/json") @@ -182,6 +183,7 @@ func TestSecurityNotificationHandler_UpdateSettings_InvalidMinLogLevel(t *testin gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -233,6 +235,7 @@ func TestSecurityNotificationHandler_UpdateSettings_InvalidWebhookURL_SSRF(t *te gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -284,6 +287,7 @@ func TestSecurityNotificationHandler_UpdateSettings_PrivateIPWebhook(t *testing. gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -320,6 +324,7 @@ func TestSecurityNotificationHandler_UpdateSettings_ServiceError(t *testing.T) { gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -363,6 +368,7 @@ func TestSecurityNotificationHandler_UpdateSettings_Success(t *testing.T) { gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") @@ -411,6 +417,7 @@ func TestSecurityNotificationHandler_UpdateSettings_EmptyWebhookURL(t *testing.T gin.SetMode(gin.TestMode) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) + setAdminContext(c) c.Request = httptest.NewRequest("PUT", "/settings", bytes.NewBuffer(body)) c.Request.Header.Set("Content-Type", "application/json") diff --git a/backend/internal/api/handlers/settings_handler.go b/backend/internal/api/handlers/settings_handler.go index 73c88233..dd1c1f5c 100644 --- a/backend/internal/api/handlers/settings_handler.go +++ b/backend/internal/api/handlers/settings_handler.go @@ -33,6 +33,8 @@ type SettingsHandler struct { MailService *services.MailService CaddyManager CaddyConfigManager // For triggering config reload on security settings change Cerberus CacheInvalidator // For invalidating cache on security settings change + SecuritySvc *services.SecurityService + DataRoot string } func NewSettingsHandler(db *gorm.DB) *SettingsHandler { @@ -43,12 +45,14 @@ func NewSettingsHandler(db *gorm.DB) *SettingsHandler { } // NewSettingsHandlerWithDeps creates a SettingsHandler with all dependencies for config reload -func NewSettingsHandlerWithDeps(db *gorm.DB, caddyMgr CaddyConfigManager, cerberus CacheInvalidator) *SettingsHandler { +func NewSettingsHandlerWithDeps(db *gorm.DB, caddyMgr CaddyConfigManager, cerberus CacheInvalidator, securitySvc *services.SecurityService, dataRoot string) *SettingsHandler { return &SettingsHandler{ DB: db, MailService: services.NewMailService(db), CaddyManager: caddyMgr, Cerberus: cerberus, + SecuritySvc: securitySvc, + DataRoot: dataRoot, } } @@ -78,6 +82,10 @@ type UpdateSettingRequest struct { // UpdateSetting updates or creates a setting. func (h *SettingsHandler) UpdateSetting(c *gin.Context) { + if !requireAdmin(c) { + return + } + var req UpdateSettingRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) @@ -105,6 +113,9 @@ func (h *SettingsHandler) UpdateSetting(c *gin.Context) { // Upsert if err := h.DB.Where(models.Setting{Key: req.Key}).Assign(setting).FirstOrCreate(&setting).Error; err != nil { + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save setting"}) return } @@ -117,6 +128,9 @@ func (h *SettingsHandler) UpdateSetting(c *gin.Context) { Type: "bool", } if err := h.DB.Where(models.Setting{Key: cerberusSetting.Key}).Assign(cerberusSetting).FirstOrCreate(&cerberusSetting).Error; err != nil { + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to enable Cerberus"}) return } @@ -127,10 +141,16 @@ func (h *SettingsHandler) UpdateSetting(c *gin.Context) { Type: "bool", } if err := h.DB.Where(models.Setting{Key: legacyCerberus.Key}).Assign(legacyCerberus).FirstOrCreate(&legacyCerberus).Error; err != nil { + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to enable Cerberus"}) return } if err := h.ensureSecurityConfigEnabled(); err != nil { + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to enable security config"}) return } @@ -142,6 +162,9 @@ func (h *SettingsHandler) UpdateSetting(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid admin_whitelist"}) return } + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update security config"}) return } @@ -176,9 +199,7 @@ func (h *SettingsHandler) UpdateSetting(c *gin.Context) { // PATCH /api/v1/config // Requires admin authentication func (h *SettingsHandler) PatchConfig(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -202,46 +223,49 @@ func (h *SettingsHandler) PatchConfig(c *gin.Context) { updates["feature.cerberus.enabled"] = "true" } - // Validate and apply each update - for key, value := range updates { - // Special validation for admin_whitelist (CIDR format) - if key == "security.admin_whitelist" { - if err := validateAdminWhitelist(value); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("Invalid admin_whitelist: %v", err)}) - return + if err := h.DB.Transaction(func(tx *gorm.DB) error { + for key, value := range updates { + if key == "security.admin_whitelist" { + if err := validateAdminWhitelist(value); err != nil { + return fmt.Errorf("invalid admin_whitelist: %w", err) + } + } + + setting := models.Setting{ + Key: key, + Value: value, + Category: strings.Split(key, ".")[0], + Type: "string", + } + + if err := tx.Where(models.Setting{Key: key}).Assign(setting).FirstOrCreate(&setting).Error; err != nil { + return fmt.Errorf("save setting %s: %w", key, err) } } - // Upsert setting - setting := models.Setting{ - Key: key, - Value: value, - Category: strings.Split(key, ".")[0], - Type: "string", - } - - if err := h.DB.Where(models.Setting{Key: key}).Assign(setting).FirstOrCreate(&setting).Error; err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Failed to save setting %s", key)}) - return - } - } - - if hasAdminWhitelist { - if err := h.syncAdminWhitelist(adminWhitelist); err != nil { - if errors.Is(err, services.ErrInvalidAdminCIDR) { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid admin_whitelist"}) - return + if hasAdminWhitelist { + if err := h.syncAdminWhitelistWithDB(tx, adminWhitelist); err != nil { + return err } - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update security config"}) - return } - } - if aclEnabled { - if err := h.ensureSecurityConfigEnabled(); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to enable security config"}) + if aclEnabled { + if err := h.ensureSecurityConfigEnabledWithDB(tx); err != nil { + return err + } + } + + return nil + }); err != nil { + if errors.Is(err, services.ErrInvalidAdminCIDR) || strings.Contains(err.Error(), "invalid admin_whitelist") { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid admin_whitelist"}) return } + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save settings"}) + return } // Trigger cache invalidation and Caddy reload for security settings @@ -277,6 +301,9 @@ func (h *SettingsHandler) PatchConfig(c *gin.Context) { // Return current config state var settings []models.Setting if err := h.DB.Find(&settings).Error; err != nil { + if respondPermissionError(c, h.SecuritySvc, "settings_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to fetch updated config"}) return } @@ -291,19 +318,23 @@ func (h *SettingsHandler) PatchConfig(c *gin.Context) { } func (h *SettingsHandler) ensureSecurityConfigEnabled() error { + return h.ensureSecurityConfigEnabledWithDB(h.DB) +} + +func (h *SettingsHandler) ensureSecurityConfigEnabledWithDB(db *gorm.DB) error { var cfg models.SecurityConfig - err := h.DB.Where("name = ?", "default").First(&cfg).Error + err := db.Where("name = ?", "default").First(&cfg).Error if err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { cfg = models.SecurityConfig{Name: "default", Enabled: true} - return h.DB.Create(&cfg).Error + return db.Create(&cfg).Error } return err } if cfg.Enabled { return nil } - return h.DB.Model(&cfg).Update("enabled", true).Error + return db.Model(&cfg).Update("enabled", true).Error } // flattenConfig converts nested map to flat key-value pairs with dot notation @@ -348,7 +379,11 @@ func validateAdminWhitelist(whitelist string) error { } func (h *SettingsHandler) syncAdminWhitelist(whitelist string) error { - securitySvc := services.NewSecurityService(h.DB) + return h.syncAdminWhitelistWithDB(h.DB, whitelist) +} + +func (h *SettingsHandler) syncAdminWhitelistWithDB(db *gorm.DB, whitelist string) error { + securitySvc := services.NewSecurityService(db) cfg, err := securitySvc.Get() if err != nil { if err != services.ErrSecurityConfigNotFound { @@ -408,9 +443,7 @@ func MaskPasswordForTest(password string) string { // UpdateSMTPConfig updates the SMTP configuration. func (h *SettingsHandler) UpdateSMTPConfig(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -436,6 +469,9 @@ func (h *SettingsHandler) UpdateSMTPConfig(c *gin.Context) { } if err := h.MailService.SaveSMTPConfig(config); err != nil { + if respondPermissionError(c, h.SecuritySvc, "smtp_save_failed", err, h.DataRoot) { + return + } c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save SMTP configuration: " + err.Error()}) return } @@ -445,9 +481,7 @@ func (h *SettingsHandler) UpdateSMTPConfig(c *gin.Context) { // TestSMTPConfig tests the SMTP connection. func (h *SettingsHandler) TestSMTPConfig(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -467,9 +501,7 @@ func (h *SettingsHandler) TestSMTPConfig(c *gin.Context) { // SendTestEmail sends a test email to verify the SMTP configuration. func (h *SettingsHandler) SendTestEmail(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -515,9 +547,7 @@ func (h *SettingsHandler) SendTestEmail(c *gin.Context) { // ValidatePublicURL validates a URL is properly formatted for use as the application URL. func (h *SettingsHandler) ValidatePublicURL(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -559,10 +589,7 @@ func (h *SettingsHandler) ValidatePublicURL(c *gin.Context) { // 3. Runtime protection: ssrfSafeDialer validates IPs again at connection time // This multi-layer approach satisfies both static analysis (CodeQL) and runtime security. func (h *SettingsHandler) TestPublicURL(c *gin.Context) { - // Admin-only access check - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } diff --git a/backend/internal/api/handlers/settings_handler_test.go b/backend/internal/api/handlers/settings_handler_test.go index 1d1ead74..c9be6a45 100644 --- a/backend/internal/api/handlers/settings_handler_test.go +++ b/backend/internal/api/handlers/settings_handler_test.go @@ -127,6 +127,16 @@ func setupSettingsTestDB(t *testing.T) *gorm.DB { return db } +func newAdminRouter() *gin.Engine { + router := gin.New() + router.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) + return router +} + func TestSettingsHandler_GetSettings(t *testing.T) { gin.SetMode(gin.TestMode) db := setupSettingsTestDB(t) @@ -135,7 +145,7 @@ func TestSettingsHandler_GetSettings(t *testing.T) { db.Create(&models.Setting{Key: "test_key", Value: "test_value", Category: "general", Type: "string"}) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.GET("/settings", handler.GetSettings) w := httptest.NewRecorder() @@ -159,7 +169,7 @@ func TestSettingsHandler_GetSettings_DatabaseError(t *testing.T) { _ = sqlDB.Close() handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.GET("/settings", handler.GetSettings) w := httptest.NewRecorder() @@ -178,7 +188,7 @@ func TestSettingsHandler_UpdateSettings(t *testing.T) { db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.POST("/settings", handler.UpdateSetting) // Test Create @@ -221,7 +231,7 @@ func TestSettingsHandler_UpdateSetting_SyncsAdminWhitelist(t *testing.T) { db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.POST("/settings", handler.UpdateSetting) payload := map[string]string{ @@ -248,7 +258,7 @@ func TestSettingsHandler_UpdateSetting_EnablesCerberusWhenACLEnabled(t *testing. db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.POST("/settings", handler.UpdateSetting) payload := map[string]string{ @@ -290,7 +300,7 @@ func TestSettingsHandler_PatchConfig_SyncsAdminWhitelist(t *testing.T) { db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -322,7 +332,7 @@ func TestSettingsHandler_PatchConfig_EnablesCerberusWhenACLEnabled(t *testing.T) db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -361,7 +371,7 @@ func TestSettingsHandler_UpdateSetting_DatabaseError(t *testing.T) { db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.POST("/settings", handler.UpdateSetting) // Close the database to force an error @@ -391,7 +401,7 @@ func TestSettingsHandler_Errors(t *testing.T) { db := setupSettingsTestDB(t) handler := handlers.NewSettingsHandler(db) - router := gin.New() + router := newAdminRouter() router.POST("/settings", handler.UpdateSetting) // Invalid JSON @@ -438,7 +448,7 @@ func TestSettingsHandler_GetSMTPConfig(t *testing.T) { db.Create(&models.Setting{Key: "smtp_from_address", Value: "noreply@example.com", Category: "smtp", Type: "string"}) db.Create(&models.Setting{Key: "smtp_encryption", Value: "starttls", Category: "smtp", Type: "string"}) - router := gin.New() + router := newAdminRouter() router.GET("/settings/smtp", handler.GetSMTPConfig) w := httptest.NewRecorder() @@ -459,7 +469,7 @@ func TestSettingsHandler_GetSMTPConfig_Empty(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.GET("/settings/smtp", handler.GetSMTPConfig) w := httptest.NewRecorder() @@ -479,7 +489,7 @@ func TestSettingsHandler_GetSMTPConfig_DatabaseError(t *testing.T) { sqlDB, _ := db.DB() _ = sqlDB.Close() - router := gin.New() + router := newAdminRouter() router.GET("/settings/smtp", handler.GetSMTPConfig) w := httptest.NewRecorder() @@ -493,7 +503,7 @@ func TestSettingsHandler_UpdateSMTPConfig_NonAdmin(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "user") c.Next() @@ -519,7 +529,7 @@ func TestSettingsHandler_UpdateSMTPConfig_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -538,7 +548,7 @@ func TestSettingsHandler_UpdateSMTPConfig_Success(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -573,7 +583,7 @@ func TestSettingsHandler_UpdateSMTPConfig_KeepExistingPassword(t *testing.T) { db.Create(&models.Setting{Key: "smtp_from_address", Value: "old@example.com", Category: "smtp", Type: "string"}) db.Create(&models.Setting{Key: "smtp_encryption", Value: "none", Category: "smtp", Type: "string"}) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -606,7 +616,7 @@ func TestSettingsHandler_TestSMTPConfig_NonAdmin(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "user") c.Next() @@ -624,7 +634,7 @@ func TestSettingsHandler_TestSMTPConfig_NotConfigured(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -652,7 +662,7 @@ func TestSettingsHandler_TestSMTPConfig_Success(t *testing.T) { db.Create(&models.Setting{Key: "smtp_port", Value: fmt.Sprintf("%d", port), Category: "smtp", Type: "number"}) db.Create(&models.Setting{Key: "smtp_encryption", Value: "none", Category: "smtp", Type: "string"}) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -674,7 +684,7 @@ func TestSettingsHandler_SendTestEmail_NonAdmin(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "user") c.Next() @@ -695,7 +705,7 @@ func TestSettingsHandler_SendTestEmail_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -714,7 +724,7 @@ func TestSettingsHandler_SendTestEmail_NotConfigured(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -746,7 +756,7 @@ func TestSettingsHandler_SendTestEmail_Success(t *testing.T) { db.Create(&models.Setting{Key: "smtp_from_address", Value: "noreply@example.com", Category: "smtp", Type: "string"}) db.Create(&models.Setting{Key: "smtp_encryption", Value: "none", Category: "smtp", Type: "string"}) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -780,7 +790,7 @@ func TestSettingsHandler_ValidatePublicURL_NonAdmin(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "user") c.Next() @@ -801,7 +811,7 @@ func TestSettingsHandler_ValidatePublicURL_InvalidFormat(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -838,7 +848,7 @@ func TestSettingsHandler_ValidatePublicURL_Success(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -878,7 +888,7 @@ func TestSettingsHandler_TestPublicURL_NonAdmin(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "user") c.Next() @@ -899,7 +909,7 @@ func TestSettingsHandler_TestPublicURL_NoRole(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() // No role set in context router.POST("/settings/test-url", handler.TestPublicURL) @@ -917,7 +927,7 @@ func TestSettingsHandler_TestPublicURL_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -936,7 +946,7 @@ func TestSettingsHandler_TestPublicURL_InvalidURL(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -961,7 +971,7 @@ func TestSettingsHandler_TestPublicURL_PrivateIPBlocked(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1017,7 +1027,7 @@ func TestSettingsHandler_TestPublicURL_Success(t *testing.T) { // Alternative: Refactor handler to accept injectable URL validator (future improvement). publicTestURL := "https://example.com" - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1045,7 +1055,7 @@ func TestSettingsHandler_TestPublicURL_DNSFailure(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1074,7 +1084,7 @@ func TestSettingsHandler_TestPublicURL_ConnectivityError(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1165,7 +1175,7 @@ func TestSettingsHandler_TestPublicURL_SSRFProtection(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1200,7 +1210,7 @@ func TestSettingsHandler_TestPublicURL_EmbeddedCredentials(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1228,7 +1238,7 @@ func TestSettingsHandler_TestPublicURL_EmptyURL(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1260,7 +1270,7 @@ func TestSettingsHandler_TestPublicURL_InvalidScheme(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1300,7 +1310,7 @@ func TestSettingsHandler_ValidatePublicURL_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1319,7 +1329,7 @@ func TestSettingsHandler_ValidatePublicURL_URLWithWarning(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1350,7 +1360,7 @@ func TestSettingsHandler_UpdateSMTPConfig_DatabaseError(t *testing.T) { sqlDB, _ := db.DB() _ = sqlDB.Close() - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() @@ -1379,7 +1389,7 @@ func TestSettingsHandler_TestPublicURL_IPv6LocalhostBlocked(t *testing.T) { gin.SetMode(gin.TestMode) handler, _ := setupSettingsHandlerWithMail(t) - router := gin.New() + router := newAdminRouter() router.Use(func(c *gin.Context) { c.Set("role", "admin") c.Next() diff --git a/backend/internal/api/handlers/system_permissions_handler.go b/backend/internal/api/handlers/system_permissions_handler.go new file mode 100644 index 00000000..94f3f661 --- /dev/null +++ b/backend/internal/api/handlers/system_permissions_handler.go @@ -0,0 +1,437 @@ +package handlers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/gin-gonic/gin" + + "github.com/Wikid82/charon/backend/internal/config" + "github.com/Wikid82/charon/backend/internal/models" + "github.com/Wikid82/charon/backend/internal/services" + "github.com/Wikid82/charon/backend/internal/util" +) + +type PermissionChecker interface { + Check(path, required string) util.PermissionCheck +} + +type OSChecker struct{} + +func (OSChecker) Check(path, required string) util.PermissionCheck { + return util.CheckPathPermissions(path, required) +} + +type SystemPermissionsHandler struct { + cfg config.Config + checker PermissionChecker + securityService *services.SecurityService +} + +type permissionsPathSpec struct { + Path string + Required string +} + +type permissionsRepairRequest struct { + Paths []string `json:"paths" binding:"required,min=1"` + GroupMode bool `json:"group_mode"` +} + +type permissionsRepairResult struct { + Path string `json:"path"` + Status string `json:"status"` + OwnerUID int `json:"owner_uid,omitempty"` + OwnerGID int `json:"owner_gid,omitempty"` + ModeBefore string `json:"mode_before,omitempty"` + ModeAfter string `json:"mode_after,omitempty"` + Message string `json:"message,omitempty"` + ErrorCode string `json:"error_code,omitempty"` +} + +func NewSystemPermissionsHandler(cfg config.Config, securityService *services.SecurityService, checker PermissionChecker) *SystemPermissionsHandler { + if checker == nil { + checker = OSChecker{} + } + return &SystemPermissionsHandler{ + cfg: cfg, + checker: checker, + securityService: securityService, + } +} + +func (h *SystemPermissionsHandler) GetPermissions(c *gin.Context) { + if !requireAdmin(c) { + h.logAudit(c, "permissions_diagnostics", "blocked", "permissions_admin_only", 0) + return + } + + paths := h.defaultPaths() + results := make([]util.PermissionCheck, 0, len(paths)) + for _, spec := range paths { + results = append(results, h.checker.Check(spec.Path, spec.Required)) + } + + h.logAudit(c, "permissions_diagnostics", "ok", "", len(results)) + c.JSON(http.StatusOK, gin.H{"paths": results}) +} + +func (h *SystemPermissionsHandler) RepairPermissions(c *gin.Context) { + if !requireAdmin(c) { + h.logAudit(c, "permissions_repair", "blocked", "permissions_admin_only", 0) + return + } + + if !h.cfg.SingleContainer { + h.logAudit(c, "permissions_repair", "blocked", "permissions_repair_disabled", 0) + c.JSON(http.StatusForbidden, gin.H{ + "error": "repair disabled", + "error_code": "permissions_repair_disabled", + }) + return + } + + if os.Geteuid() != 0 { + h.logAudit(c, "permissions_repair", "blocked", "permissions_non_root", 0) + c.JSON(http.StatusForbidden, gin.H{ + "error": "root privileges required", + "error_code": "permissions_non_root", + }) + return + } + + var req permissionsRepairRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + results := make([]permissionsRepairResult, 0, len(req.Paths)) + allowlist := h.allowlistRoots() + + for _, rawPath := range req.Paths { + result := h.repairPath(rawPath, req.GroupMode, allowlist) + results = append(results, result) + } + + h.logAudit(c, "permissions_repair", "ok", "", len(results)) + c.JSON(http.StatusOK, gin.H{"paths": results}) +} + +func (h *SystemPermissionsHandler) repairPath(rawPath string, groupMode bool, allowlist []string) permissionsRepairResult { + cleanPath, invalidCode := normalizePath(rawPath) + if invalidCode != "" { + return permissionsRepairResult{ + Path: rawPath, + Status: "error", + ErrorCode: invalidCode, + Message: "invalid path", + } + } + + info, err := os.Lstat(cleanPath) + if err != nil { + if os.IsNotExist(err) { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_missing_path", + Message: "path does not exist", + } + } + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_repair_failed", + Message: err.Error(), + } + } + + if info.Mode()&os.ModeSymlink != 0 { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_symlink_rejected", + Message: "symlink not allowed", + } + } + + hasSymlinkComponent, symlinkErr := pathHasSymlink(cleanPath) + if symlinkErr != nil { + if os.IsNotExist(symlinkErr) { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_missing_path", + Message: "path does not exist", + } + } + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_repair_failed", + Message: symlinkErr.Error(), + } + } + if hasSymlinkComponent { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_symlink_rejected", + Message: "symlink not allowed", + } + } + + resolved, err := filepath.EvalSymlinks(cleanPath) + if err != nil { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_repair_failed", + Message: err.Error(), + } + } + + if !isWithinAllowlist(resolved, allowlist) { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_outside_allowlist", + Message: "path outside allowlist", + } + } + + if !info.IsDir() && !info.Mode().IsRegular() { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_unsupported_type", + Message: "unsupported path type", + } + } + + uid := os.Geteuid() + gid := os.Getegid() + modeBefore := fmt.Sprintf("%04o", info.Mode().Perm()) + modeAfter := targetMode(info.IsDir(), groupMode) + + alreadyOwned := isOwnedBy(info, uid, gid) + alreadyMode := modeBefore == modeAfter + if alreadyOwned && alreadyMode { + return permissionsRepairResult{ + Path: cleanPath, + Status: "skipped", + OwnerUID: uid, + OwnerGID: gid, + ModeBefore: modeBefore, + ModeAfter: modeAfter, + Message: "ownership and mode already correct", + ErrorCode: "permissions_repair_skipped", + } + } + + if err := os.Chown(cleanPath, uid, gid); err != nil { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: mapRepairErrorCode(err), + Message: err.Error(), + } + } + + parsedMode, parseErr := parseMode(modeAfter) + if parseErr != nil { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: "permissions_repair_failed", + Message: parseErr.Error(), + } + } + if err := os.Chmod(cleanPath, parsedMode); err != nil { + return permissionsRepairResult{ + Path: cleanPath, + Status: "error", + ErrorCode: mapRepairErrorCode(err), + Message: err.Error(), + } + } + + return permissionsRepairResult{ + Path: cleanPath, + Status: "repaired", + OwnerUID: uid, + OwnerGID: gid, + ModeBefore: modeBefore, + ModeAfter: modeAfter, + Message: "ownership and mode updated", + } +} + +func (h *SystemPermissionsHandler) defaultPaths() []permissionsPathSpec { + dataRoot := filepath.Dir(h.cfg.DatabasePath) + return []permissionsPathSpec{ + {Path: dataRoot, Required: "rwx"}, + {Path: h.cfg.DatabasePath, Required: "rw"}, + {Path: filepath.Join(dataRoot, "backups"), Required: "rwx"}, + {Path: filepath.Join(dataRoot, "imports"), Required: "rwx"}, + {Path: filepath.Join(dataRoot, "caddy"), Required: "rwx"}, + {Path: filepath.Join(dataRoot, "crowdsec"), Required: "rwx"}, + {Path: filepath.Join(dataRoot, "geoip"), Required: "rwx"}, + {Path: h.cfg.ConfigRoot, Required: "rwx"}, + {Path: h.cfg.CaddyLogDir, Required: "rwx"}, + {Path: h.cfg.CrowdSecLogDir, Required: "rwx"}, + {Path: h.cfg.PluginsDir, Required: "r-x"}, + } +} + +func (h *SystemPermissionsHandler) allowlistRoots() []string { + dataRoot := filepath.Dir(h.cfg.DatabasePath) + return []string{ + dataRoot, + h.cfg.ConfigRoot, + h.cfg.CaddyLogDir, + h.cfg.CrowdSecLogDir, + } +} + +func (h *SystemPermissionsHandler) logAudit(c *gin.Context, action, result, code string, pathCount int) { + if h.securityService == nil { + return + } + payload := map[string]any{ + "result": result, + "error_code": code, + "path_count": pathCount, + "admin": isAdmin(c), + } + payloadJSON, _ := json.Marshal(payload) + + actor := "unknown" + if userID, ok := c.Get("userID"); ok { + actor = fmt.Sprintf("%v", userID) + } + + _ = h.securityService.LogAudit(&models.SecurityAudit{ + Actor: actor, + Action: action, + EventCategory: "permissions", + Details: string(payloadJSON), + IPAddress: c.ClientIP(), + UserAgent: c.Request.UserAgent(), + }) +} + +func normalizePath(rawPath string) (string, string) { + if rawPath == "" { + return "", "permissions_invalid_path" + } + if !filepath.IsAbs(rawPath) { + return "", "permissions_invalid_path" + } + clean := filepath.Clean(rawPath) + if clean == "." || clean == ".." { + return "", "permissions_invalid_path" + } + if containsParentReference(clean) { + return "", "permissions_invalid_path" + } + return clean, "" +} + +func containsParentReference(clean string) bool { + if clean == ".." { + return true + } + if strings.HasPrefix(clean, ".."+string(os.PathSeparator)) { + return true + } + if strings.Contains(clean, string(os.PathSeparator)+".."+string(os.PathSeparator)) { + return true + } + return strings.HasSuffix(clean, string(os.PathSeparator)+"..") +} + +func pathHasSymlink(path string) (bool, error) { + clean := filepath.Clean(path) + parts := strings.Split(clean, string(os.PathSeparator)) + current := string(os.PathSeparator) + for _, part := range parts { + if part == "" { + continue + } + current = filepath.Join(current, part) + info, err := os.Lstat(current) + if err != nil { + return false, err + } + if info.Mode()&os.ModeSymlink != 0 { + return true, nil + } + } + return false, nil +} + +func isWithinAllowlist(path string, allowlist []string) bool { + for _, root := range allowlist { + rel, err := filepath.Rel(root, path) + if err != nil { + continue + } + if rel == "." || (!strings.HasPrefix(rel, ".."+string(os.PathSeparator)) && rel != "..") { + return true + } + } + return false +} + +func targetMode(isDir, groupMode bool) string { + if isDir { + if groupMode { + return "0770" + } + return "0700" + } + if groupMode { + return "0660" + } + return "0600" +} + +func parseMode(mode string) (os.FileMode, error) { + if mode == "" { + return 0, fmt.Errorf("mode required") + } + var parsed uint32 + if _, err := fmt.Sscanf(mode, "%o", &parsed); err != nil { + return 0, fmt.Errorf("parse mode: %w", err) + } + return os.FileMode(parsed), nil +} + +func isOwnedBy(info os.FileInfo, uid, gid int) bool { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return int(stat.Uid) == uid && int(stat.Gid) == gid +} + +func mapRepairErrorCode(err error) string { + switch { + case err == nil: + return "" + case errors.Is(err, syscall.EROFS): + return "permissions_readonly" + case errors.Is(err, syscall.EACCES) || os.IsPermission(err): + return "permissions_write_denied" + default: + return "permissions_repair_failed" + } +} diff --git a/backend/internal/api/handlers/system_permissions_handler_test.go b/backend/internal/api/handlers/system_permissions_handler_test.go new file mode 100644 index 00000000..04d2ae17 --- /dev/null +++ b/backend/internal/api/handlers/system_permissions_handler_test.go @@ -0,0 +1,107 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" + + "github.com/Wikid82/charon/backend/internal/config" + "github.com/Wikid82/charon/backend/internal/util" +) + +type stubPermissionChecker struct{} + +func (stubPermissionChecker) Check(path, required string) util.PermissionCheck { + return util.PermissionCheck{ + Path: path, + Required: required, + Exists: true, + Writable: true, + OwnerUID: 1000, + OwnerGID: 1000, + Mode: "0755", + } +} + +func TestSystemPermissionsHandler_GetPermissions_Admin(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := config.Config{ + DatabasePath: "/app/data/charon.db", + ConfigRoot: "/config", + CaddyLogDir: "/var/log/caddy", + CrowdSecLogDir: "/var/log/crowdsec", + PluginsDir: "/app/plugins", + } + + h := NewSystemPermissionsHandler(cfg, nil, stubPermissionChecker{}) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Request = httptest.NewRequest(http.MethodGet, "/system/permissions", http.NoBody) + + h.GetPermissions(c) + + require.Equal(t, http.StatusOK, w.Code) + + var payload struct { + Paths []map[string]any `json:"paths"` + } + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &payload)) + require.NotEmpty(t, payload.Paths) + + first := payload.Paths[0] + require.NotEmpty(t, first["path"]) + require.NotEmpty(t, first["required"]) + require.NotEmpty(t, first["mode"]) +} + +func TestSystemPermissionsHandler_GetPermissions_NonAdmin(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := config.Config{} + h := NewSystemPermissionsHandler(cfg, nil, stubPermissionChecker{}) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Set("role", "user") + c.Request = httptest.NewRequest(http.MethodGet, "/system/permissions", http.NoBody) + + h.GetPermissions(c) + + require.Equal(t, http.StatusForbidden, w.Code) + + var payload map[string]string + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &payload)) + require.Equal(t, "permissions_admin_only", payload["error_code"]) +} + +func TestSystemPermissionsHandler_RepairPermissions_NonRoot(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("test requires non-root execution") + } + + gin.SetMode(gin.TestMode) + + cfg := config.Config{SingleContainer: true} + h := NewSystemPermissionsHandler(cfg, nil, stubPermissionChecker{}) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Set("role", "admin") + c.Request = httptest.NewRequest(http.MethodPost, "/system/permissions/repair", http.NoBody) + + h.RepairPermissions(c) + + require.Equal(t, http.StatusForbidden, w.Code) + + var payload map[string]string + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &payload)) + require.Equal(t, "permissions_non_root", payload["error_code"]) +} diff --git a/backend/internal/api/routes/routes.go b/backend/internal/api/routes/routes.go index 55c9dfb3..80264788 100644 --- a/backend/internal/api/routes/routes.go +++ b/backend/internal/api/routes/routes.go @@ -157,7 +157,8 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM // Backup routes backupService := services.NewBackupService(&cfg) backupService.Start() // Start cron scheduler for scheduled backups - backupHandler := handlers.NewBackupHandler(backupService) + securityService := services.NewSecurityService(db) + backupHandler := handlers.NewBackupHandlerWithDeps(backupService, securityService) // DB Health endpoint (uses backup service for last backup time) dbHealthHandler := handlers.NewDBHealthHandler(db, backupService) @@ -221,20 +222,26 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM protected.GET("/websocket/connections", wsStatusHandler.GetConnections) protected.GET("/websocket/stats", wsStatusHandler.GetStats) + dataRoot := filepath.Dir(cfg.DatabasePath) + // Security Notification Settings securityNotificationService := services.NewSecurityNotificationService(db) - securityNotificationHandler := handlers.NewSecurityNotificationHandler(securityNotificationService) + securityNotificationHandler := handlers.NewSecurityNotificationHandlerWithDeps(securityNotificationService, securityService, dataRoot) protected.GET("/security/notifications/settings", securityNotificationHandler.GetSettings) protected.PUT("/security/notifications/settings", securityNotificationHandler.UpdateSettings) + // System permissions diagnostics and repair + systemPermissionsHandler := handlers.NewSystemPermissionsHandler(cfg, securityService, nil) + protected.GET("/system/permissions", systemPermissionsHandler.GetPermissions) + protected.POST("/system/permissions/repair", systemPermissionsHandler.RepairPermissions) + // Audit Logs - securityService := services.NewSecurityService(db) auditLogHandler := handlers.NewAuditLogHandler(securityService) protected.GET("/audit-logs", auditLogHandler.List) protected.GET("/audit-logs/:uuid", auditLogHandler.Get) // Settings - with CaddyManager and Cerberus for security settings reload - settingsHandler := handlers.NewSettingsHandlerWithDeps(db, caddyManager, cerb) + settingsHandler := handlers.NewSettingsHandlerWithDeps(db, caddyManager, cerb, securityService, dataRoot) protected.GET("/settings", settingsHandler.GetSettings) protected.POST("/settings", settingsHandler.UpdateSetting) @@ -387,7 +394,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM protected.POST("/uptime/sync", uptimeHandler.Sync) // Notification Providers - notificationProviderHandler := handlers.NewNotificationProviderHandler(notificationService) + notificationProviderHandler := handlers.NewNotificationProviderHandlerWithDeps(notificationService, securityService, dataRoot) protected.GET("/notifications/providers", notificationProviderHandler.List) protected.POST("/notifications/providers", notificationProviderHandler.Create) protected.PUT("/notifications/providers/:id", notificationProviderHandler.Update) @@ -397,7 +404,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM protected.GET("/notifications/templates", notificationProviderHandler.Templates) // External notification templates (saved templates for providers) - notificationTemplateHandler := handlers.NewNotificationTemplateHandler(notificationService) + notificationTemplateHandler := handlers.NewNotificationTemplateHandlerWithDeps(notificationService, securityService, dataRoot) protected.GET("/notifications/external-templates", notificationTemplateHandler.List) protected.POST("/notifications/external-templates", notificationTemplateHandler.Create) protected.PUT("/notifications/external-templates/:id", notificationTemplateHandler.Update) @@ -640,7 +647,8 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM // RegisterImportHandler wires up import routes with config dependencies. func RegisterImportHandler(router *gin.Engine, db *gorm.DB, caddyBinary, importDir, mountPath string) { - importHandler := handlers.NewImportHandler(db, caddyBinary, importDir, mountPath) + securityService := services.NewSecurityService(db) + importHandler := handlers.NewImportHandlerWithDeps(db, caddyBinary, importDir, mountPath, securityService) api := router.Group("/api/v1") importHandler.RegisterRoutes(api) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 1599baff..1e2f9520 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -14,6 +14,7 @@ type Config struct { Environment string HTTPPort string DatabasePath string + ConfigRoot string FrontendDir string CaddyAdminAPI string CaddyConfigDir string @@ -23,6 +24,10 @@ type Config struct { JWTSecret string EncryptionKey string ACMEStaging bool + SingleContainer bool + PluginsDir string + CaddyLogDir string + CrowdSecLogDir string Debug bool Security SecurityConfig Emergency EmergencyConfig @@ -82,6 +87,7 @@ func Load() (Config, error) { Environment: getEnvAny("development", "CHARON_ENV", "CPM_ENV"), HTTPPort: getEnvAny("8080", "CHARON_HTTP_PORT", "CPM_HTTP_PORT"), DatabasePath: getEnvAny(filepath.Join("data", "charon.db"), "CHARON_DB_PATH", "CPM_DB_PATH"), + ConfigRoot: getEnvAny("/config", "CHARON_CADDY_CONFIG_ROOT"), FrontendDir: getEnvAny(filepath.Clean(filepath.Join("..", "frontend", "dist")), "CHARON_FRONTEND_DIR", "CPM_FRONTEND_DIR"), CaddyAdminAPI: getEnvAny("http://localhost:2019", "CHARON_CADDY_ADMIN_API", "CPM_CADDY_ADMIN_API"), CaddyConfigDir: getEnvAny(filepath.Join("data", "caddy"), "CHARON_CADDY_CONFIG_DIR", "CPM_CADDY_CONFIG_DIR"), @@ -91,6 +97,10 @@ func Load() (Config, error) { JWTSecret: getEnvAny("change-me-in-production", "CHARON_JWT_SECRET", "CPM_JWT_SECRET"), EncryptionKey: getEnvAny("", "CHARON_ENCRYPTION_KEY"), ACMEStaging: getEnvAny("", "CHARON_ACME_STAGING", "CPM_ACME_STAGING") == "true", + SingleContainer: strings.EqualFold(getEnvAny("true", "CHARON_SINGLE_CONTAINER_MODE"), "true"), + PluginsDir: getEnvAny("/app/plugins", "CHARON_PLUGINS_DIR"), + CaddyLogDir: getEnvAny("/var/log/caddy", "CHARON_CADDY_LOG_DIR"), + CrowdSecLogDir: getEnvAny("/var/log/crowdsec", "CHARON_CROWDSEC_LOG_DIR"), Security: loadSecurityConfig(), Emergency: loadEmergencyConfig(), Debug: getEnvAny("false", "CHARON_DEBUG", "CPM_DEBUG") == "true", diff --git a/backend/internal/models/notification_config.go b/backend/internal/models/notification_config.go index e3097c7b..9c3f0203 100644 --- a/backend/internal/models/notification_config.go +++ b/backend/internal/models/notification_config.go @@ -9,14 +9,16 @@ import ( // NotificationConfig stores configuration for security notifications. type NotificationConfig struct { - ID string `gorm:"primaryKey" json:"id"` - Enabled bool `json:"enabled"` - MinLogLevel string `json:"min_log_level"` // error, warn, info, debug - WebhookURL string `json:"webhook_url"` - NotifyWAFBlocks bool `json:"notify_waf_blocks"` - NotifyACLDenies bool `json:"notify_acl_denies"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID string `gorm:"primaryKey" json:"id"` + Enabled bool `json:"enabled"` + MinLogLevel string `json:"min_log_level"` // error, warn, info, debug + WebhookURL string `json:"webhook_url"` + NotifyWAFBlocks bool `json:"notify_waf_blocks"` + NotifyACLDenies bool `json:"notify_acl_denies"` + NotifyRateLimitHits bool `json:"notify_rate_limit_hits"` + EmailRecipients string `json:"email_recipients"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } // BeforeCreate sets the ID if not already set. diff --git a/backend/internal/services/log_service.go b/backend/internal/services/log_service.go index 7c92b0fd..b5c6f004 100644 --- a/backend/internal/services/log_service.go +++ b/backend/internal/services/log_service.go @@ -17,13 +17,14 @@ import ( ) type LogService struct { - LogDir string + LogDir string + CaddyLogDir string } func NewLogService(cfg *config.Config) *LogService { // Assuming logs are in data/logs relative to app root logDir := filepath.Join(filepath.Dir(cfg.DatabasePath), "logs") - return &LogService{LogDir: logDir} + return &LogService{LogDir: logDir, CaddyLogDir: cfg.CaddyLogDir} } func (s *LogService) logDirs() []string { @@ -42,15 +43,14 @@ func (s *LogService) logDirs() []string { } addDir(s.LogDir) + if s.CaddyLogDir != "" { + addDir(s.CaddyLogDir) + } if accessLogPath := os.Getenv("CHARON_CADDY_ACCESS_LOG"); accessLogPath != "" { addDir(filepath.Dir(accessLogPath)) } - if _, err := os.Stat("/var/log/caddy"); err == nil { - addDir("/var/log/caddy") - } - return dirs } diff --git a/backend/internal/services/security_notification_service.go b/backend/internal/services/security_notification_service.go index 6050bf46..e5fa7734 100644 --- a/backend/internal/services/security_notification_service.go +++ b/backend/internal/services/security_notification_service.go @@ -33,10 +33,12 @@ func (s *SecurityNotificationService) GetSettings() (*models.NotificationConfig, if err == gorm.ErrRecordNotFound { // Return default config if none exists return &models.NotificationConfig{ - Enabled: false, - MinLogLevel: "error", - NotifyWAFBlocks: true, - NotifyACLDenies: true, + Enabled: false, + MinLogLevel: "error", + NotifyWAFBlocks: true, + NotifyACLDenies: true, + NotifyRateLimitHits: true, + EmailRecipients: "", }, nil } return &config, err diff --git a/backend/internal/util/permissions.go b/backend/internal/util/permissions.go new file mode 100644 index 00000000..7fd2c16f --- /dev/null +++ b/backend/internal/util/permissions.go @@ -0,0 +1,151 @@ +package util + +import ( + "errors" + "fmt" + "os" + "strings" + "syscall" +) + +type PermissionCheck struct { + Path string `json:"path"` + Required string `json:"required"` + Exists bool `json:"exists"` + Writable bool `json:"writable"` + OwnerUID int `json:"owner_uid"` + OwnerGID int `json:"owner_gid"` + Mode string `json:"mode"` + Error string `json:"error,omitempty"` + ErrorCode string `json:"error_code,omitempty"` +} + +func CheckPathPermissions(path, required string) PermissionCheck { + result := PermissionCheck{ + Path: path, + Required: required, + } + + info, err := os.Stat(path) + if err != nil { + result.Writable = false + result.Error = err.Error() + result.ErrorCode = MapDiagnosticErrorCode(err) + return result + } + + result.Exists = true + + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + result.OwnerUID = int(stat.Uid) + result.OwnerGID = int(stat.Gid) + } + result.Mode = fmt.Sprintf("%04o", info.Mode().Perm()) + + if !info.IsDir() && !info.Mode().IsRegular() { + result.Writable = false + result.Error = "unsupported file type" + result.ErrorCode = "permissions_unsupported_type" + return result + } + + if strings.Contains(required, "w") { + if info.IsDir() { + probeFile, probeErr := os.CreateTemp(path, "permcheck-*") + if probeErr != nil { + result.Writable = false + result.Error = probeErr.Error() + result.ErrorCode = MapDiagnosticErrorCode(probeErr) + return result + } + if closeErr := probeFile.Close(); closeErr != nil { + result.Writable = false + result.Error = closeErr.Error() + result.ErrorCode = MapDiagnosticErrorCode(closeErr) + return result + } + if removeErr := os.Remove(probeFile.Name()); removeErr != nil { + result.Writable = false + result.Error = removeErr.Error() + result.ErrorCode = MapDiagnosticErrorCode(removeErr) + return result + } + result.Writable = true + return result + } + + file, openErr := os.OpenFile(path, os.O_WRONLY, 0) + if openErr != nil { + result.Writable = false + result.Error = openErr.Error() + result.ErrorCode = MapDiagnosticErrorCode(openErr) + return result + } + if closeErr := file.Close(); closeErr != nil { + result.Writable = false + result.Error = closeErr.Error() + result.ErrorCode = MapDiagnosticErrorCode(closeErr) + return result + } + result.Writable = true + return result + } + + result.Writable = false + return result +} + +func MapDiagnosticErrorCode(err error) string { + switch { + case err == nil: + return "" + case os.IsNotExist(err): + return "permissions_missing_path" + case errors.Is(err, syscall.EROFS): + return "permissions_readonly" + case errors.Is(err, syscall.EACCES) || os.IsPermission(err): + return "permissions_write_denied" + default: + return "permissions_write_failed" + } +} + +func MapSaveErrorCode(err error) (string, bool) { + switch { + case err == nil: + return "", false + case IsSQLiteReadOnlyError(err): + return "permissions_db_readonly", true + case IsSQLiteLockedError(err): + return "permissions_db_locked", true + case errors.Is(err, syscall.EROFS): + return "permissions_readonly", true + case errors.Is(err, syscall.EACCES) || os.IsPermission(err): + return "permissions_write_denied", true + case strings.Contains(strings.ToLower(err.Error()), "permission denied"): + return "permissions_write_denied", true + default: + return "", false + } +} + +func IsSQLiteReadOnlyError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "readonly") || + strings.Contains(msg, "read-only") || + strings.Contains(msg, "attempt to write a readonly database") || + strings.Contains(msg, "sqlite_readonly") +} + +func IsSQLiteLockedError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "database is locked") || + strings.Contains(msg, "sqlite_busy") || + strings.Contains(msg, "database locked") +} diff --git a/backend/internal/util/permissions_test.go b/backend/internal/util/permissions_test.go new file mode 100644 index 00000000..7b0b645d --- /dev/null +++ b/backend/internal/util/permissions_test.go @@ -0,0 +1,57 @@ +package util + +import ( + "errors" + "fmt" + "syscall" + "testing" +) + +func TestMapSaveErrorCode(t *testing.T) { + tests := []struct { + name string + err error + wantCode string + wantOK bool + }{ + { + name: "sqlite readonly", + err: errors.New("attempt to write a readonly database"), + wantCode: "permissions_db_readonly", + wantOK: true, + }, + { + name: "sqlite locked", + err: errors.New("database is locked"), + wantCode: "permissions_db_locked", + wantOK: true, + }, + { + name: "permission denied", + err: fmt.Errorf("write failed: %w", syscall.EACCES), + wantCode: "permissions_write_denied", + wantOK: true, + }, + { + name: "not a permission error", + err: errors.New("other error"), + wantCode: "", + wantOK: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + code, ok := MapSaveErrorCode(tt.err) + if code != tt.wantCode || ok != tt.wantOK { + t.Fatalf("MapSaveErrorCode() = (%q, %v), want (%q, %v)", code, ok, tt.wantCode, tt.wantOK) + } + }) + } +} + +func TestIsSQLiteReadOnlyError(t *testing.T) { + if !IsSQLiteReadOnlyError(errors.New("SQLITE_READONLY")) { + t.Fatalf("expected SQLITE_READONLY to be detected") + } +} diff --git a/docs/plans/current_spec.md b/docs/plans/current_spec.md index 2a3d0eb7..b9a567cf 100644 --- a/docs/plans/current_spec.md +++ b/docs/plans/current_spec.md @@ -1,801 +1,1018 @@ -# E2E Playwright Shard Timeout Investigation — Current Spec - -Last updated: 2026-02-10 - -## Goal -- Concise summary: investigate GitHub Actions run https://github.com/Wikid82/Charon/actions/runs/21865692694 where the E2E Playwright job reports Shard 3 stopping at ~30 minutes despite configured timeouts of ~40 minutes. Produce reproducible diagnostics, collect artifacts/logs, identify root cause hypotheses, and provide prioritized remediations and short-term unblock steps. - -## Phases -- Discover: collect logs and artifacts. -- Analyze: review config and correlate shard → tests. -- Remediate: short-term and long-term fixes. -- Verify: reproduce and confirm the fix. - --- - -## 1) Discover — exact places to collect logs & artifacts - -### GitHub Actions (run-level) -- Run page: https://github.com/Wikid82/Charon/actions/runs/21865692694 -- Run logs (zip): GET https://api.github.com/repos/Wikid82/Charon/actions/runs/21865692694/logs - - Programmatic commands: - ```bash - export GITHUB_OWNER=Wikid82 - export GITHUB_REPO=Charon - export RUN_ID=21865692694 - # Requires GITHUB_TOKEN set with repo access - curl -H "Accept: application/vnd.github+json" \ - -H "Authorization: token $GITHUB_TOKEN" \ - -L "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/actions/runs/$RUN_ID/logs" \ - -o run-${RUN_ID}-logs.zip - unzip -d run-${RUN_ID}-logs run-${RUN_ID}-logs.zip - ``` -- Artifacts list (API): - ```bash - curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/actions/runs/$RUN_ID/artifacts" | jq '.' - ``` -- gh CLI (interactive/script): - ```bash - gh run view $RUN_ID --repo $GITHUB_OWNER/$GITHUB_REPO --log > run-$RUN_ID-summary.log - gh run download $RUN_ID --repo $GITHUB_OWNER/$GITHUB_REPO --dir artifacts-$RUN_ID - ``` - -### GitHub Actions (job-level) -- List jobs for the run and find Playwright shard job(s): - ```bash - curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/actions/runs/$RUN_ID/jobs" | jq '.jobs[] | {id: .id, name: .name, runner_name: .runner_name, started_at: .started_at, completed_at: .completed_at}' - ``` -- For JOB_ID identified as the shard job, download job logs: - ```bash - curl -H "Authorization: token $GITHUB_TOKEN" -L \ - "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/actions/jobs/$JOB_ID/logs" -o job-${JOB_ID}-logs.zip - unzip -d job-${JOB_ID}-logs job-${JOB_ID}-logs.zip - ``` - -### Playwright test outputs used by this project -- Search and collect the following files in the repo root (or workflow-run directories): - - `playwright.config.ts`, `playwright.config.js`, `playwright.config.mjs` - - `package.json` scripts invoking Playwright (e.g., `test:e2e`, `e2e:ci`) - - `.github/workflows/*` steps that run Playwright -- Typical Playwright outputs to collect (per-shard): - - `/trace.zip` - - `/test-results.json` or `test-results/*` - - `/video/*` - - `/*.log` (stdout/stderr) - -Observed local example (for context): the developer ran -`npx playwright test --project=chromium --output=/tmp/playwright-chromium-output --reporter=list > /tmp/playwright-chromium.log 2>&1` — look for similar invocations in workflows/scripts. - -### Repository container logs (containers/) -- containers/charon: - - Files to check: `containers/charon/docker-compose.yml`, any `logs/` or `data/` directories under `containers/charon/`. - - Local commands (when reproducing): - ```bash - docker compose -f containers/charon/docker-compose.yml logs --no-color --timestamps > containers-charon-logs.txt - docker logs --timestamps --since "1h" charon-e2e > charon-e2e.log 2>&1 || true - ``` -- containers/caddy: - - Files: `containers/caddy/Caddyfile`, `containers/caddy/config/`, `containers/caddy/logs/` - - Local checks: - ```bash - docker logs --timestamps caddy > caddy.log 2>&1 || true - curl -sS http://127.0.0.1:2019/ || true # admin - curl -sS http://127.0.0.1:2020/ || true # emergency - ``` - ---- - -## 2) Analyze — specific files and config to review (exact paths) - -- Workflows (search these paths): - - `.github/workflows/*.yml` — likely candidates: `.github/workflows/e2e.yml`, `.github/workflows/ci.yml`, `.github/workflows/playwright.yml` (run `grep -R "playwright" .github/workflows || true`). - - Look for `timeout-minutes:` either at top-level workflow or under `jobs:.timeout-minutes`. - -- Playwright config files: - - `/projects/Charon/playwright.config.ts` - - `/projects/Charon/playwright.config.js` - - `/projects/Charon/playwright.config.mjs` - - Inspect `projects`, `workers`, `retries`, `outputDir`, `reporter` sections. - -- package.json and scripts: - - `/projects/Charon/package.json` — inspect `scripts` for e.g. `test:e2e`, `e2e:ci` and the exact Playwright CLI flags used by CI. - -- GitHub skill scripts & E2E runner: - - `.github/skills/scripts/skill-runner.sh` — used in `docs` and testing instructions; check for `docker-rebuild-e2e`, `test-e2e-playwright-coverage`. - - Commands: - ```bash - sed -n '1,240p' .github/skills/scripts/skill-runner.sh - grep -n "docker-rebuild-e2e\|test-e2e-playwright-coverage\|playwright" -n .github/skills || true - ``` - -- Makefile: - - `/projects/Charon/Makefile` — search for targets related to `e2e`, `playwright`, `rebuild`. - ---- - -## 3) Steps to download GitHub Actions logs & artifacts for run 21865692694 - -### Programmatic (API) -1. List artifacts for run: -```bash -curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/Wikid82/Charon/actions/runs/21865692694/artifacts" | jq '.' -``` -2. Download run logs (zip): -```bash -curl -H "Authorization: token $GITHUB_TOKEN" -L \ - "https://api.github.com/repos/Wikid82/Charon/actions/runs/21865692694/logs" -o run-21865692694-logs.zip -unzip -d run-21865692694-logs run-21865692694-logs.zip -``` -3. List jobs to find Playwright shard job id(s): -```bash -curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/Wikid82/Charon/actions/runs/21865692694/jobs" | jq '.jobs[] | {id: .id, name: .name, runner_name: .runner_name, started_at: .started_at, completed_at: .completed_at}' -``` -4. Download job logs by JOB_ID: -```bash -curl -H "Authorization: token $GITHUB_TOKEN" -L \ - "https://api.github.com/repos/Wikid82/Charon/actions/jobs/$JOB_ID/logs" -o job-$JOB_ID-logs.zip -unzip -d job-$JOB_ID-logs job-$JOB_ID-logs.zip -``` - -### Using gh CLI -```bash -gh run view 21865692694 --repo Wikid82/Charon --log > run-21865692694-summary.log -gh run download 21865692694 --repo Wikid82/Charon --dir artifacts-21865692694 -``` - -### Manual web UI -- Visit run page and download artifacts and job logs from the job view. - ---- - -## 4) How to locate shard-specific logs and correlate shard indices to tests - -- Typical patterns to inspect: - - Look for Playwright CLI flags in the job step (e.g., `--shard=INDEX/TOTAL`, `--output=/tmp/...`). - - If the job ran `npx playwright test --output=/tmp/...`, search the downloaded job logs for that exact command to find the shard index. - -- Commands to list tests assigned to a shard (dry-run): -```bash -# Show which tests a given shard would run (no execution) -npx playwright test --list --shard=INDEX/TOTAL - -# Or run with reporter=list (shows test items as executed) -npx playwright test --shard=INDEX/TOTAL --reporter=list -``` - -- Note: Playwright shard index is zero-based. If CI logs show `--shard=3/4`, double-check whether the team used zero-based numbering; confirm by re-running the `--list` command. - -Expected per-shard artifact names (if implemented): -- `e2e-shard--output` containing `trace.zip`, `video/*`, `test-results.json`, and shard-specific logs (stdout/stderr files). - ---- - -## 5) Runner/container logs to inspect - -- GitHub-hosted runner: review the Actions job logs for runner messages and any `Runner` diagnostic lines. You cannot access host-level logs. - -- Self-hosted runner (if used): retrieve host system logs (requires access to runner host): - ```bash - sudo journalctl -u actions.runner.* -n 1000 > runner-service-journal.log - sudo journalctl -k --since "1 hour ago" | grep -i oom > runner-kernel-oom.log || true - sudo journalctl -u docker.service -n 200 > docker-journal.log - ``` - -- Docker container logs (charon, caddy, charon-e2e): - ```bash - docker ps -a --filter "name=charon" --format "{{.Names}} {{.Status}}" > containers-ps.txt - docker logs --since "1h" charon-e2e > charon-e2e.log 2>&1 || true - docker logs --since "1h" caddy > caddy.log 2>&1 || true - ``` - -Check Caddy admin/emergency ports (2019 & 2020) to confirm the proxy was healthy during the test run: -```bash -curl -sS --max-time 5 http://127.0.0.1:2019/ || echo "admin not responding" -curl -sS --max-time 5 http://127.0.0.1:2020/ || echo "emergency not responding" -``` - ---- - -## 6) Hypotheses for why Shard 3 stopped at ~30m (descriptions + exact artifacts to search) - -H1 — Workflow/job timeout configured smaller than expected -- Search: - - `.github/workflows/*` for `timeout-minutes:` - - job logs for `Timeout` or `Job execution time exceeded` -- Commands: - ```bash - grep -n "timeout-minutes" .github/workflows -R || true - grep -i "timeout" -R run-${RUN_ID}-logs || true - ``` -- Confirmed by: `timeout-minutes: 30` or job logs showing `aborting execution due to timeout`. - -H2 — Runner preemption / connection loss -- Search job logs for: `Runner lost`, `The runner has been shutdown`, `Connection to the server was lost`. -- Commands: - ```bash - grep -iE "runner lost|runner.*shutdown|connection.*lost|Job canceled|cancelled by" -R run-${RUN_ID}-logs || true - ``` -- Confirmed by: runner disconnect lines and abrupt end of logs with no Playwright stack trace. - -H3 — E2E environment container (charon/caddy) died or became unhealthy -- Search container logs for crash/fatal/panic messages and timestamps matching the job stop time. -- Commands: - ```bash - docker ps -a --filter "name=charon" --format '{{.Names}} {{.Status}}' - docker logs charon-e2e --since "2h" | sed -n '1,200p' - grep -iE "panic|fatal|segfault|exited|health.*unhealthy|503|502" containers -R || true - ``` -- Confirmed by: container exit matching job finish time and Caddy returning 502/503 during run. - -H4 — Playwright/Node process killed by OOM -- Search for `Killed`, kernel `oom_reaper` lines, system `dmesg` outputs. -- Commands: - ```bash - grep -R "Killed" job-${JOB_ID}-logs || true - # on self-hosted runner host - sudo journalctl -k --since '2 hours ago' | grep -i oom || true - ``` -- Confirmed by: kernel OOM logs at same timestamp or `Killed` in job logs. - -H5 — Script-level early timeout (explicit `timeout 30m` or `kill`) -- Search `.github/skills` and workflow steps for `timeout 30m`, `timeout 1800`, or `kill` calls. -- Commands: - ```bash - grep -R "\btimeout\b\|kill -9\|kill -15\|pkill" -n .github || true - ``` -- Confirmed by: a script with `timeout 30m` or similar wrapper used in the job. - -H6 — Misinterpreted units or mis-configuration (seconds vs minutes) -- Search for numeric values used in scripts and steps (e.g., `1800` used where minutes expected). -- Commands: - ```bash - grep -R "\b1800\b\|\b3600\b\|timeout-minutes" -n .github || true - ``` -- Confirmed by: a value of `1800` where `timeout-minutes` or similar was expected to be minutes. - -For each hypothesis, the exact lines/entries returned by the grep/journal/docker commands are the evidence to confirm or refute it. Keep timestamps to correlate with the job start/completion times in the run logs. - ---- - -## 7) Prioritized remediation plan (short-term → long-term) - -### Short-term (unblock re-runs quickly) -1. Download and attach all logs/artifacts for run 21865692694 (use `gh run download`) and share with E2E test author. -2. Temporarily bump `timeout-minutes` for the failing workflow to 60 to allow full runs while diagnosing. -3. Add an `if: always()` step to the E2E job that collects diagnostics and uploads them as artifacts (free memory, `dmesg`, `ps aux`, `docker ps -a`, `docker logs charon-e2e`). -4. Re-run just the failing shard with added `DEBUG=pw:api` and `PWDEBUG=1` and persist shard outputs. - -### Medium-term -1. Persist per-shard Playwright outputs via `actions/upload-artifact@v4` for traces/videos/test-results. -2. Add Playwright `retries` for transient failures and `--trace`/`--video` options. -3. Add a CI smoke check before full shard execution to confirm env health. -4. If self-hosted, add runner health checks and alerting (memory, disk, Docker status). - -### Long-term -1. Implement stable test splitting based on historical test durations rather than equal-file sharding. -2. Introduce resource constraints and monitoring to protect against OOM and flapping containers. -3. Build a golden-minimal E2E smoke job that must pass before running full shards. - ---- - -## 8) Minimal reproduction checklist (local) - -1. Rebuild E2E image used by CI (per repo skill): -```bash -.github/skills/scripts/skill-runner.sh docker-rebuild-e2e -``` -2. Start the environment (example): -```bash -docker compose -f containers/charon/docker-compose.yml up -d -``` -3. Set base URL and run the same shard (replace INDEX/TOTAL with values from CI): -```bash -export PLAYWRIGHT_BASE_URL=http://localhost:5173 -DEBUG=pw:api PWDEBUG=1 \ - npx playwright test --shard=INDEX/TOTAL --project=chromium \ - --output=/tmp/playwright-shard-INDEX --reporter=list > /tmp/playwright-shard-INDEX.log 2>&1 -``` -4. If reproducing a timeout, immediately collect: -```bash -docker ps -a --format '{{.Names}} {{.Status}}' > reproduce-docker-ps.txt -docker logs --since '1h' charon-e2e > reproduce-charon-e2e.log || true -tail -n 500 /tmp/playwright-shard-INDEX.log > reproduce-pw-tail.log -``` - ---- - -## 9) Required workflow/scripts changes to improve diagnostics & prevent recurrence - -- Add `timeout-minutes: 60` to `.github/workflows/.yml` while diagnosing; later set to a reasoned SLA (e.g., 50m). -- Add an `always()` step to collect diagnostics on failure and upload artifacts. Example YAML snippet: - ```yaml - - name: Collect diagnostics - if: always() - run: | - uptime > uptime.txt - free -m > free-m.txt - df -h > df-h.txt - ps aux > ps-aux.txt - docker ps -a > docker-ps.txt || true - docker logs --tail 500 charon-e2e > docker-charon-e2e.log || true - - uses: actions/upload-artifact@v4 - with: - name: e2e-diagnostics-${{ github.run_id }} - path: | - uptime.txt - free-m.txt - df-h.txt - ps-aux.txt - docker-ps.txt - docker-charon-e2e.log - ``` - -- Ensure each Playwright shard runs with `--output` pointing to a shard-specific path and upload that path as artifact: - - artifact name convention: `e2e-shard-${{ matrix.index }}-output`. - ---- - -## 10) People/roles to notify & recommended next actions - -- Notify: - - CI/Infra owner or person in `CODEOWNERS` for `.github/workflows` - - E2E test author(s) (owners of failing tests) - - Self-hosted runner owner (if runner_name in job JSON indicates self-hosted) - -- Recommended immediate actions for them: - 1. Download run artifacts and job logs for run 21865692694 and share them with the test author. - 2. Re-run the shard with `DEBUG=pw:api` and `PWDEBUG=1` enabled and ensure per-shard artifacts are uploaded. - 3. If self-hosted, check runner host kernel logs for OOM and Docker container exits at the job time. - ---- - -## 11) Verification steps (post-remediation) - -1. Re-run E2E workflow end-to-end; verify Shard 3 completes. -2. Confirm artifacts `e2e-shard-3-output` exist and contain `trace.zip`, `video/*`, and `test-results.json`. -3. Confirm no `oom_reaper` or `Killed` messages in runner host logs during the run. - ---- - -## Appendix — quick extraction commands summary -```bash -# Download all artifacts and logs for RUN_ID -gh run download 21865692694 --repo Wikid82/Charon --dir ./artifacts-21865692694 - -# List jobs and find Playwright shard job(s) -curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/Wikid82/Charon/actions/runs/21865692694/jobs" | jq '.jobs[] | {id: .id, name: .name, runner_name: .runner_name, started_at: .started_at, completed_at: .completed_at}' - -# Download job logs for JOB_ID -curl -H "Authorization: token $GITHUB_TOKEN" -L \ - "https://api.github.com/repos/Wikid82/Charon/actions/jobs/$JOB_ID/logs" -o job-$JOB_ID-logs.zip -unzip -d job-$JOB_ID-logs job-$JOB_ID-logs.zip - -# Grep for likely causes -grep -iE "timeout|minut|runner lost|cancelled|Killed|OOM|oom_reaper|Out of memory|panic|fatal" -R run-21865692694-logs || true -``` - ---- - -## Next three immediate actions (checklist) -1. Run `gh run download 21865692694 --repo Wikid82/Charon --dir ./artifacts-21865692694` and unzip the run logs. -2. Search the downloaded logs for `timeout-minutes`, `Runner lost`, `Killed`, and `oom_reaper` to triage H1–H4. -3. Re-run the failing shard locally with `DEBUG=pw:api PWDEBUG=1` and `--output=/tmp/playwright-shard-INDEX`, capture outputs, and upload them as artifacts. - ---- - -If you want, I can now (A) download the run artifacts & logs for run 21865692694 using gh/API (requires your GITHUB_TOKEN) and list the job IDs, or (B) open the workflow files in `.github/workflows` and search for `timeout-minutes` and Playwright invocations. Which would you like me to do first? ---- -post_title: "E2E Test Remediation Plan" +post_title: Permissions Integrity Plan author1: "Charon Team" -post_slug: "e2e-test-remediation-plan" -microsoft_alias: "charon-team" -featured_image: "https://wikid82.github.io/charon/assets/images/featured/charon.png" -categories: ["testing"] -tags: ["playwright", "e2e", "remediation", "security"] -ai_note: "true" -summary: "Phased remediation plan for Charon Playwright E2E tests, covering - inventory, dependencies, runtime estimates, and quick start commands." -post_date: "2026-01-28" +post_slug: permissions-integrity-plan-non-root +microsoft_alias: "charon" +featured_image: >- + https://wikid82.github.io/charon/assets/images/featured/charon.png +categories: + - security +tags: + - permissions + - non-root + - diagnostics + - settings +summary: "Plan to harden non-root permissions, add diagnostics, and align + saves." +post_date: "2026-02-11" --- -## 1. Introduction +## Permissions Integrity Plan — Non-Root Containers, Notifications, Saves,
+and Dropdown State -This plan replaces the current spec with a comprehensive, phased remediation -strategy for the Playwright E2E test suite under [tests](tests). The goal is to -stabilize execution, align dependencies, and sequence remediation work so that -core management flows, security controls, and integration workflows become -reliable in Docker-based E2E runs. +Last updated: 2026-02-11 -## 2. Research Findings +## 1) Introduction -### 2.1 Test Harness and Global Dependencies +Running Charon as a non-root container should feel like a locked garden gate: +secure, predictable, and fully functional. Today, permission mismatches on +mounted volumes can silently corrode core features—notifications, settings +saves, and dropdown selections—because persistence depends on writing to +`/app/data`, `/config`, and related paths. This plan focuses on a full, precise +remediation: map every write path, instrument permissions, tighten error +handling, and make the UI reveal permission failures in plain terms. -- Global setup and teardown are enforced by - [tests/global-setup.ts](tests/global-setup.ts), - [tests/auth.setup.ts](tests/auth.setup.ts), and - [tests/security-teardown.setup.ts](tests/security-teardown.setup.ts). -- Global setup validates the emergency token, checks health endpoints, and - resets security settings, which impacts all security-enforcement suites. -- Multiple suites depend on the emergency server (port 2020) and Cerberus - modules with explicit admin whitelist configuration. +**Objectives** +- Ensure all persistent paths are writable for non-root execution without + weakening security. +- Make permission errors visible and actionable from API to UI. +- Reduce multi-request settings saves to avoid partial writes and improve + reliability. +- Align notification settings fields between frontend and backend. +- Provide a clear path for operators to set correct volume ownership. -### 2.2 Test Inventory and Feature Areas +## Handoff Contract -- Core management flows: authentication, navigation, dashboard, proxy hosts, - certificates, access lists in [tests/core](tests/core). -- DNS providers and ACME workflows: [tests/dns-provider-crud.spec.ts] - (tests/dns-provider-crud.spec.ts), - [tests/dns-provider-types.spec.ts](tests/dns-provider-types.spec.ts), - [tests/manual-dns-provider.spec.ts](tests/manual-dns-provider.spec.ts). -- Monitoring: uptime and log streaming in - [tests/monitoring](tests/monitoring). -- Settings: system, account, SMTP, notifications, encryption, user management - in [tests/settings](tests/settings). -- Tasks and imports: backups, Caddyfile import flows, CrowdSec import, and log - viewing in [tests/tasks](tests/tasks). -- Security UI: dashboard, WAF, CrowdSec, headers, rate limiting, and audit logs - in [tests/security](tests/security). -- Security enforcement: ACL, WAF, rate limits, CrowdSec, emergency token, and - break-glass recovery in [tests/security-enforcement](tests/security-enforcement). -- Integration workflows: cross-feature scenarios in - [tests/integration](tests/integration). -- Browser-specific regressions for import flows in - [tests/webkit-specific](tests/webkit-specific) and - [tests/firefox-specific](tests/firefox-specific). -- Debug and diagnostics: certificates and Caddy import debug coverage in - [tests/debug/certificates-debug.spec.ts](tests/debug/certificates-debug.spec.ts), - [tests/tasks/caddy-import-gaps.spec.ts](tests/tasks/caddy-import-gaps.spec.ts), - [tests/tasks/caddy-import-cross-browser.spec.ts](tests/tasks/caddy-import-cross-browser.spec.ts), - and [tests/debug](tests/debug). -- UI triage and regression coverage: dropdown/modal coverage in - [tests/modal-dropdown-triage.spec.ts](tests/modal-dropdown-triage.spec.ts) and - [tests/proxy-host-dropdown-fix.spec.ts](tests/proxy-host-dropdown-fix.spec.ts). -- Shared utilities validation: wait helpers in - [tests/utils/wait-helpers.spec.ts](tests/utils/wait-helpers.spec.ts). +Use this contract to brief implementation and QA. All paths and schemas must +match the plan. -### 2.3 Dependency and Ordering Constraints - -- The security-enforcement suite assumes Cerberus can be toggled on, and its - final tests intentionally restore admin whitelist state - (see [tests/security-enforcement/zzzz-break-glass-recovery.spec.ts] - (tests/security-enforcement/zzzz-break-glass-recovery.spec.ts)). -- Admin whitelist blocking is designed to run last using a zzz prefix - (see [tests/security-enforcement/zzz-admin-whitelist-blocking.spec.ts] - (tests/security-enforcement/zzz-admin-whitelist-blocking.spec.ts)). -- Emergency server tests depend on port 2020 availability - (see [tests/security-enforcement/emergency-server](tests/security-enforcement/emergency-server)). -- Some import suites use real APIs and TestDataManager cleanup; others mock - requests. Remediation must avoid mixing mocked and real flows in a single - phase without clear isolation. - -### 2.4 Runtime and Flake Hotspots - -- Security-enforcement suites include extended retries, network propagation - delays, and rate limit loops. -- Import debug and gap-coverage suites perform real uploads, data creation, and - commit flows, making them sensitive to backend state and Caddy reload timing. -- Monitoring WebSocket tests require stable log streaming state. - -## 3. Technical Specifications - -### 3.1 Test Grouping and Shards - -- **Foundation:** global setup, auth storage state, security teardown. -- **Core UI:** authentication, navigation, dashboard, proxy hosts, certificates, - access lists. -- **Settings:** system, account, SMTP, notifications, encryption, users. -- **Tasks:** backups, logs, Caddyfile import, CrowdSec import. -- **Monitoring:** uptime monitoring and real-time logs. -- **Security UI:** Cerberus dashboard, WAF config, headers, rate limiting, - CrowdSec config, audit logs. -- **Security Enforcement:** ACL/WAF/CrowdSec/rate limit enforcement, emergency - token and break-glass recovery, admin whitelist blocking. -- **Integration:** proxy + cert, proxy + DNS, backup restore, import workflows, - multi-feature workflows. -- **Browser-specific:** WebKit and Firefox import regressions. -- **Debug/POC:** diagnostics and investigation suites (Caddy import debug). - -### 3.2 Dependency Graph (High-Level) - -```mermaid -flowchart TD - A[global-setup + auth.setup] --> B[Core UI + Settings] - A --> C[Tasks + Monitoring] - A --> D[Security UI] - D --> E[Security Enforcement] - E --> F[Break-Glass Recovery] - B --> G[Integration Workflows] - C --> G - G --> H[Browser-specific Suites] +```json +{ + "endpoints": { + "GET /api/v1/system/permissions": { + "response_schema": { + "paths": [ + { + "path": "/app/data", + "required": "rwx", + "writable": false, + "owner_uid": 1000, + "owner_gid": 1000, + "mode": "0755", + "error": "permission denied", + "error_code": "permissions_write_denied" + } + ] + } + }, + "POST /api/v1/system/permissions/repair": { + "request_schema": { + "paths": ["/app/data", "/config"], + "group_mode": false + }, + "response_schema": { + "paths": [ + { + "path": "/app/data", + "status": "repaired", + "owner_uid": 1000, + "owner_gid": 1000, + "mode_before": "0755", + "mode_after": "0700", + "message": "ownership and mode updated" + }, + { + "path": "/config", + "status": "error", + "error_code": "permissions_readonly", + "message": "read-only filesystem" + } + ] + } + } + } +} ``` -### 3.3 Runtime Estimates (Docker Mode) +## 2) Research Findings -| Group | Suite Examples | Expected Runtime | Prerequisites | +### 2.1 Runtime Permissions and Startup Flow + +- Container entrypoint: + [.docker/docker-entrypoint.sh](.docker/docker-entrypoint.sh) + - `is_root()` and `run_as_charon()` drop privileges using `gosu`. + - Warns if `/app/data` or `/config` is not writable; does not repair unless + root. + - Creates `/app/data/caddy`, `/app/data/crowdsec`, `/app/data/geoip` and + `chown` only when root. + - If the container is started with `--user` (non-root), it cannot `chown` or + repair volume permissions. + +- Docker runtime image: [Dockerfile](Dockerfile) + - Creates `charon` user (`uid=1000`, `gid=1000`) and sets ownership of `/app`, + `/config`, `/var/log/crowdsec`, `/var/log/caddy`. + - Entry point starts as root, then drops privileges; this is good for dynamic + socket group handling but still depends on host volume ownership. + - Default environment points DB and data to `/app/data`. + +- Compose volumes: + [.docker/compose/docker-compose.yml](.docker/compose/docker-compose.yml) + - `cpm_data:/app/data` and `caddy_config:/config` are mounted without a user + override. + - `plugins_data:/app/plugins:ro` is read-only, so plugin operations should + never require writes there. + +### 2.2 Persistent Writes and Vulnerable Paths + +- Backend config creates directories with restrictive permissions (0700): + - [backend/internal/config/config.go](backend/internal/config/config.go) + - `Load()` calls `os.MkdirAll(filepath.Dir(cfg.DatabasePath), 0o700)` + - `os.MkdirAll(cfg.CaddyConfigDir, 0o700)` + - `os.MkdirAll(cfg.ImportDir, 0o700)` + - If `/app/data` is owned by root and container runs as non-root, startup can + fail or later writes can silently fail. + +- Database writes: + [backend/internal/database/database.go](backend/internal/database/database.go) + - SQLite file at `CHARON_DB_PATH` (default `/app/data/charon.db`). + - Read-only DB or directory permission failures block settings, notifications, + and any save flows. + +- Backups (writes to `/app/data/backups`): + - + [backend/internal/services/backup_service.go][backup-service-go] + - Uses `os.MkdirAll(backupDir, 0o700)` and `os.Create()` for ZIPs. + +- Import workflows write under `/app/data/imports`: + - + [backend/internal/api/handlers/import_handler.go][import-handler-go] + - Writes to `imports/uploads/` using `os.MkdirAll(..., 0o755)` and + `os.WriteFile(..., 0o644)`. + +### 2.3 Notifications and Settings Persistence + +- Notification providers and templates are stored in DB: + - + [backend/internal/services/notification_service.go][notification-service-go] + - + [backend/internal/api/handlers/
+ notification_handler.go][notification-handler-go] + - UI: + [frontend/src/pages/Notifications.tsx][notifications-page-tsx] + +- Security notification settings are stored in DB: + - Backend: + [backend/internal/services/
+ security_notification_service.go][security-notification-service-go] + - Handler: + [backend/internal/api/handlers/
+ security_notifications.go][security-notifications-handler-go] + - UI modal: + [frontend/src/components/
+ SecurityNotificationSettingsModal.tsx][security-notification-modal-tsx] + +**Field mismatch discovered:** +- Frontend expects `notify_rate_limit_hits` and `email_recipients` and also + offers `min_log_level = fatal`. +- Backend model + [backend/internal/models/notification_config.go][notification-config-go] + only includes: + - `Enabled`, `MinLogLevel`, `NotifyWAFBlocks`, `NotifyACLDenies`, + `WebhookURL`. + - Handler validation allows `debug|info|warn|error` only. This mismatch can + cause failed saves or silent drops, and it is adjacent to permissions issues + because a permissions error amplifies the confusion. + +### 2.4 Settings and Dropdown Persistence + +- System settings save path: + - UI: + [frontend/src/pages/SystemSettings.tsx][system-settings-tsx] + - API client: [frontend/src/api/settings.ts](frontend/src/api/settings.ts) + - Handler: + [backend/internal/api/handlers/settings_handler.go][settings-handler-go] + - Current UI saves multiple settings via multiple requests; a write failure + mid-way can lead to partial persistence. + +- SMTP settings save path: + - UI: + [frontend/src/pages/SMTPSettings.tsx][smtp-settings-tsx] + - Backend: + [backend/internal/services/mail_service.go][mail-service-go] + +- Dropdowns use Radix Select: + - Component: + [frontend/src/components/ui/Select.tsx][select-component-tsx] + - If API writes fail, the UI state can appear to “stick” until a reload resets + it. + +### 2.5 Initial Hygiene Review + +- [.gitignore](.gitignore), [.dockerignore](.dockerignore), + [codecov.yml](codecov.yml) currently do not require changes for permissions + work. +- Dockerfile may require optional enhancements to accommodate PUID/PGID or a + dedicated permissions check, but no mandatory change is confirmed yet. + +## 3) Technical Specifications + +### 3.1 Data Paths, Ownership, and Required Access + +| Path | Purpose | Required Access | Notes | | --- | --- | --- | --- | -| Foundation | global setup + auth | 1-2 min | Docker E2E container, emergency token | -| Core UI | core specs | 6-10 min | Auth storage state, clean data | -| Settings | settings specs | 6-10 min | Auth storage state | -| Tasks | backups/import/logs | 10-16 min | Auth storage state, API mocks and real flows | -| Monitoring | monitoring specs | 5-8 min | WebSocket stability | -| Security UI | security specs | 10-14 min | Cerberus enabled, admin whitelist | -| Security Enforcement | enforcement specs | 15-25 min | Emergency token, port 2020, admin whitelist | -| Integration | integration specs | 12-20 min | Stable core + settings + tasks | -| Browser-specific | firefox/webkit | 8-12 min | Import baseline stable | -| Debug/POC | caddy import debug | 4-6 min | Docker logs available | +| `/app/data` | Primary data root | rwx | Note A | +| `/app/data/charon.db` | SQLite DB | rw | DB and parent dir must be writable | +| `/app/data/backups` | Backup ZIPs | rwx | Created by backup service | +| `/app/data/imports` | Import uploads | rwx | Used by import handler | +| `/app/data/caddy` | Caddy state | rwx | Caddy writes certs and data | +| `/app/data/crowdsec` | CrowdSec persistent config | rwx | Note B | +| `/app/data/geoip` | GeoIP database | rwx | MaxMind GeoIP DB storage | +| `/config` | Caddy config | rwx | Managed by Caddy | +| `/var/log/caddy` | Caddy logs | rwx | Writable when file logging enabled | +| `/var/log/crowdsec` | CrowdSec logs | rwx | Local bouncer and agent logs | +| `/app/plugins` | Plugins | r-x | Should not be writable in production | -Assumed worker count: 4 (default) except security-enforcement which requires -`--workers=1`. Serial execution increases runtime for enforcement suites. +Notes: +- Note A: Must be owned by runtime user or group-writable. +- Note B: Entry point chown when root. -### 3.4 Environment Preconditions +### 3.2 Permission Readiness Diagnostics -- E2E container built and healthy via - `.github/skills/scripts/skill-runner.sh docker-rebuild-e2e`. -- Ports 8080 (UI/API) and 2020 (emergency server) reachable. -- `CHARON_EMERGENCY_TOKEN` configured and valid. -- Admin whitelist includes test runner ranges when Cerberus is enabled. -- Caddy admin health endpoints reachable for import workflows. +**Goal:** Provide definitive, machine-readable permission diagnostics for UI and +logs. -### 3.5 Emergency Server and Security Prerequisites +**Proposed API** -- Port 2020 (emergency server) available and reachable for - [tests/security-enforcement/emergency-server](tests/security-enforcement/emergency-server). -- Port 2019 is reserved for the Caddy admin API; use 2020 for emergency server - tests to avoid conflicts. -- Basic Auth credentials required for emergency server tests. Defaults in test - fixtures are `admin` / `changeme` and should match the E2E compose config. -- Admin whitelist bypass must be configured before enforcement tests that - toggle Cerberus settings. +- `GET /api/v1/system/permissions` + - Returns a list of paths, expected access, current uid/gid ownership, mode + bits, writeability, and a stable `error_code` when a check fails. + - Example response schema: + ```json + { + "paths": [ + { + "path": "/app/data", + "required": "rwx", + "writable": false, + "owner_uid": 1000, + "owner_gid": 1000, + "mode": "0755", + "error": "permission denied", + "error_code": "permissions_write_denied" + } + ] + } + ``` -## 4. Implementation Plan +**Writable determination (explicit, non-destructive):** +- For each path, perform `os.Stat` to capture owner/mode and to confirm the + path exists. +- If the `required` access does not include `w` (for example `r-x`), skip any + writeability probe, do not set `error_code`, and optionally set + `status=expected_readonly` to clarify that non-writable is expected. +- If the path is a directory, attempt a non-destructive writeability probe by + creating a temp file in the directory (`os.CreateTemp`) and then immediately + removing it. +- If the path is a file, attempt to open it with write permissions + (`os.OpenFile` with `os.O_WRONLY` or `os.O_RDWR`) without truncation and close + immediately. +- Do not modify file contents or truncate; no destructive writes are allowed. +- If any step fails, set `writable=false` and return a stable `error_code`. -### Phase 1: Foundation and Test Harness Reliability +**Error code coverage (explicit):** +- The `error_code` field SHALL be returned by diagnostics responses for both + `GET /api/v1/system/permissions` and `POST /api/v1/system/permissions/repair` + whenever a per-path check fails. +- For a `GET` diagnostics entry that is healthy, omit `error_code` and `error`. +- Diagnostics error mapping MUST distinguish read-only vs permission denied: + - `EROFS` -> `permissions_readonly` + - `EACCES` -> `permissions_write_denied` -Objective: Ensure the shared test harness is stable before touching feature -flows. +- `POST /api/v1/system/permissions/repair` (optional) + - Only enabled when process is root. + - Attempts to `chown` and `chmod` only for known safe paths. + - Returns a per-path remediation report. + - **Request schema (explicit):** + ```json + { + "paths": ["/app/data", "/config"], + "group_mode": false + } + ``` + - **Response schema (explicit):** + ```json + { + "paths": [ + { + "path": "/app/data", + "status": "repaired", + "owner_uid": 1000, + "owner_gid": 1000, + "mode_before": "0755", + "mode_after": "0700", + "message": "ownership and mode updated" + }, + { + "path": "/config", + "status": "error", + "error_code": "permissions_readonly", + "message": "read-only filesystem" + } + ] + } + ``` + - **Target ownership and mode rules (explicit):** + - Use runtime UID/GID (effective process UID/GID at time of request). + - Directory mode: `0700` by default; `0770` when `group_mode=true`. + - File mode: `0600` by default; `0660` when `group_mode=true`. + - `group_mode` applies to all provided paths; per-path overrides are not + supported in this plan. + - **Per-path behavior and responses (explicit):** + - For each path in `paths`, validate and act independently. + - If a path is missing, return `status=error` with + `error_code=permissions_missing_path` and do not create it. + - If a path resolves to a directory, apply directory mode rules and + ownership updates. + - If a path resolves to a file, apply file mode rules and ownership + updates. + - If a path resolves to neither a file nor directory, return + `status=error` with `error_code=permissions_unsupported_type`. + - If a path is already correct, return `status=skipped` with a + `message` indicating no change. + - If any mutation fails (read-only FS, permission denied), return + `status=error` and include a stable `error_code`. + - **Allowlist + Symlink Safety:** + - **Allowlist roots (hard-coded, immutable):** + - `/app/data` + - `/config` + - `/var/log/caddy` + - `/var/log/crowdsec` + - Only allow subpaths that remain within these roots after + `filepath.Clean` and `filepath.EvalSymlinks` checks. + - Resolve each requested path with `filepath.EvalSymlinks` and reject any + that resolve outside the allowlist roots. + - Use `os.Lstat` to detect and reject symlinks before any mutation. + - Use no-follow semantics for any filesystem operations (reject if any path + component is a symlink). + - If a path is missing, return a per-path error instead of creating it. + - **Path Normalization (explicit):** + - Only accept absolute paths and reject relative inputs. + - Normalize with `filepath.Clean` before validation. + - Reject any path that resolves to `.` or contains `..` after normalization. + - Reject any request where normalization would change the intended path + outside the allowlist roots. -- Validate global setup and storage state creation - (see [tests/global-setup.ts](tests/global-setup.ts) and - [tests/auth.setup.ts](tests/auth.setup.ts)). -- Confirm emergency server availability and credentials for break-glass suites. -- Establish baseline run for core login/navigation suites. +**Scope:** +- Diagnostics SHALL include all persistent write paths listed in section 3.1, + including `/app/data/geoip`, `/var/log/caddy`, and `/var/log/crowdsec`. +- Any additional persistent write paths referenced elsewhere in this plan SHALL + be included in diagnostics as they are added. +- Diagnostics SHALL include `/app/plugins` as a read-only check with + `required: r-x`. A non-writable result for `/app/plugins` is expected and + MUST NOT be treated as a failure condition; skip the write probe and do not + include an `error_code`. -Estimated runtime: 2-4 minutes +**Backend placement:** +- New handler in `backend/internal/api/handlers/system_permissions_handler.go`. +- Utility in `backend/internal/util/permissions.go` for POSIX stat + access + checks. -Success criteria: +### 3.3 Access Control and Path Exposure -- Storage state created once and reused without re-auth flake. -- Emergency token validation passes and security reset executes. +**Goal:** Ensure diagnostics are admin-only and paths are not exposed to non- +admins. -### Phase 2: Core UI, Settings, Monitoring, and Task Flows +- `GET /api/v1/system/permissions` and `POST /api/v1/system/permissions/repair` + must be admin-only. +- Non-admin requests SHALL return `403` with a stable error code + `permissions_admin_only`. +- Full filesystem paths SHALL only be included for admins; non-admin errors must + omit or redact path details. -Objective: Remediate the highest-traffic user journeys and tasks. +**Redaction and authorization strategy (explicit):** +- Admin enforcement happens in the handler layer using the existing admin guard + middleware; handlers SHALL read the admin flag from request context and fail + closed if the flag is missing. +- Redaction happens in the error response builder at the handler boundary before + JSON serialization. Services return a structured error with optional `path` + and `detail` fields; the handler removes `path` and sensitive filesystem hints + for non-admins and replaces help text with a generic remediation message. +- The redaction decision SHALL not rely on client-provided hints; it must only + use server-side auth context. -- Core UI: authentication, navigation, dashboard, proxy hosts, certificates, - access lists (core CRUD and navigation). -- Settings: system, account, SMTP, notifications, encryption, users. -- Monitoring: uptime and real-time logs. -- Tasks: backups, logs viewing, and base Caddyfile import flows. -- Include modal/dropdown triage coverage and wait helpers validation. +**Non-admin response schema (redacted, brief):** +- Diagnostics (non-admin, 403): + ```json + { + "error": "admin privileges required", + "error_code": "permissions_admin_only" + } + ``` +- Repair (non-admin, 403): + ```json + { + "error": "admin privileges required", + "error_code": "permissions_admin_only" + } + ``` -Estimated runtime: 25-40 minutes +**Save endpoint access (admin-only):** +- Settings and configuration save endpoints SHALL remain admin-only where + applicable (e.g., system settings, SMTP settings, notification + providers/templates, security notification settings, imports, and backups). +- If any save endpoint is currently not admin-gated, the implementation MUST add + admin-only checks or explicitly document the exception in this plan before + implementation. -Success criteria: +#### 3.3.1 Admin-Gated Save Endpoints Checklist -- Core CRUD and navigation pass without retries. -- Monitoring WebSocket tests pass without timeouts. -- Backups and log viewing flows pass with mocks and deterministic waits. +For each endpoint below, confirm the current state and enforce admin-only access +unless explicitly documented as public. -### Phase 3: Security UI and Enforcement +- System settings save + - Current: Verify admin guard is enforced in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- SMTP settings save + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- Notification providers save/update/delete + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- Notification templates save/update/delete + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- Security notification settings save + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- Import create/upload + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. +- Backup create/restore + - Current: Verify admin guard in handler and service. + - Target: Admin-only with `403` and stable error code on failure. + - Verify: API call as non-admin returns `403` without write. -Objective: Stabilize Cerberus UI configuration and enforcement workflows. +### 3.4 Permission-Aware Error Mapping -- Security dashboard and configuration pages. -- WAF, headers, rate limiting, CrowdSec, audit logs. -- Enforcement suites, including emergency token and whitelist blocking order. +**Goal:** When a save fails, the user sees “why.” -Estimated runtime: 30-45 minutes +- Identify key persistence actions and wrap errors with permission hints: + - Settings saves: `SettingsHandler.UpdateSetting()` and `PatchConfig()`. + - SMTP saves: `MailService.SaveSMTPConfig()`. + - Notification providers/templates: `NotificationService.CreateProvider()`, + `UpdateProvider()`, `CreateTemplate()`, `UpdateTemplate()`. + - Security notification settings: + `SecurityNotificationService.UpdateSettings()`. + - Backup creation: `BackupService.CreateBackup()`. + - Import uploads: `ImportHandler.Upload()` and `UploadMulti()`. -Success criteria: +**Error behavior:** +- If error is permission-related (`os.IsPermission`, SQLite read-only), return a + 500 with a standard payload: + - `error`: short message + - `help`: actionable guidance using runtime UID/GID (e.g., + `chown -R : /path/to/volume`) + - `path`: affected path (admin-only; omit or redact for non-admins) + - `code`: stable error code (required for permission-related save failures; + e.g., `permissions_write_failed`) -- Security UI toggles and pages load without state leakage. -- Enforcement suites pass with Cerberus enabled and whitelist configured. -- Break-glass recovery restores bypass state for subsequent suites. +**Audit logging:** +- Log all diagnostics reads and repair attempts as audit events, including + requestor identity, admin flag, and outcome. +- Log permission-related save failures (settings, notifications, imports, + backups, SMTP) as audit events with error codes and redacted path details for + non-admin contexts. -### Phase 4: Integration, Browser-Specific, and Debug Suites +**SQLite read-only detection (explicit):** +- Map SQLite read-only failures by driver code when available (e.g., + `SQLITE_READONLY` and extended codes such as `SQLITE_READONLY_DB`, + `SQLITE_READONLY_DIRECTORY`). +- Also detect string-based error messages to cover driver variations (e.g., + `attempt to write a readonly database`, `readonly database`, `read-only + database`). +- If driver codes are unavailable, fall back to message matching + + `os.IsPermission` to produce the same standard payload. -Objective: Close cross-feature and browser-specific regressions. +### 3.4.1 Canonical Error-Code Catalog (Diagnostics + Repair + Save Failures) -- Integration workflows: proxy + cert, proxy + DNS, backup restore, import to - production, multi-feature workflows. -- Browser-specific Caddy import regressions (Firefox/WebKit). -- Debug/POC suites (Caddy import debug, diagnostics) run as opt-in, - including caddy-import-gaps and cross-browser import coverage. +**Goal:** Provide a single source of truth for error codes used by diagnostics, +repair, and persistence failures. All responses MUST use values from this +catalog. -Estimated runtime: 25-40 minutes +**Scope:** +- Diagnostics: `GET /api/v1/system/permissions` +- Repair: `POST /api/v1/system/permissions/repair` +- Save failures: settings, SMTP, notifications, security notifications, + imports, backups -Success criteria: +| Error Code | Scope | Meaning | +| --- | --- | --- | +| `permissions_admin_only` | Diagnostics/Repair/Save | Note 1 | +| `permissions_non_root` | Repair | Note 2 | +| `permissions_repair_disabled` | Repair | Note 3 | +| `permissions_missing_path` | Diagnostics/Repair | Path does not exist. | +| `permissions_unsupported_type` | Diagnostics/Repair | Note 4 | +| `permissions_outside_allowlist` | Repair | Note 5 | +| `permissions_symlink_rejected` | Repair | Path or a component is a symlink. | +| `permissions_invalid_path` | Diagnostics/Repair | Note 6 | +| `permissions_readonly` | Diagnostics/Repair/Save | Filesystem is read-only. | +| `permissions_write_denied` | Diagnostics/Save | Note 7 | +| `permissions_write_failed` | Save | Note 8 | +| `permissions_db_readonly` | Save | Note 9 | +| `permissions_db_locked` | Save | Note 10 | +| `permissions_repair_failed` | Repair | Note 11 | +| `permissions_repair_skipped` | Repair | No changes required for the path. | -- Integration workflows pass with stable TestDataManager cleanup. -- Browser-specific import tests show consistent API request handling. -- Debug suites remain optional and do not block core pipelines. +Notes: +- Note 1: Request requires admin privileges. +- Note 2: Repair endpoint invoked without root privileges. +- Note 3: Repair endpoint disabled because single-container mode is false. +- Note 4: Path is not a file or directory. +- Note 5: Path resolves outside allowlist roots. +- Note 6: Path is relative, normalizes to `.`/`..`, or fails validation. +- Note 7: Write probe or write operation denied. +- Note 8: Write operation failed for another permission-related reason. +- Note 9: SQLite database or directory is read-only. +- Note 10: SQLite database locked; treat as transient write failure. +- Note 11: Repair attempted but failed (non-permission errors). -## 5. Acceptance Criteria (EARS) +**Mapping rules (explicit):** +- Diagnostics uses `permissions_missing_path`, `permissions_write_denied`, + `permissions_readonly`, `permissions_invalid_path`, + `permissions_unsupported_type` as appropriate. +- Repair uses `permissions_admin_only`, `permissions_non_root`, or + `permissions_repair_disabled` when blocked, and otherwise maps to the per-path + codes above. +- Save failures use `permissions_db_readonly` when SQLite read-only is + detected; otherwise use `permissions_write_denied` or + `permissions_write_failed` depending on `os.IsPermission` and error context. +- Save failures SHALL always include an error code from this catalog. -- WHEN the E2E harness initializes, THE SYSTEM SHALL validate emergency token - and create a reusable auth state without flake. -- WHEN core management tests execute, THE SYSTEM SHALL complete CRUD flows - without manual retries or timeouts. -- WHEN security enforcement suites execute, THE SYSTEM SHALL apply Cerberus - settings with admin whitelist bypass and SHALL restore security state after - completion. -- WHEN integration workflows execute, THE SYSTEM SHALL complete cross-feature - journeys without data collisions or residual state. +### 3.5 Notification Settings Model Alignment -## 6. Quick Start Commands +**Goal:** Align UI fields with backend persistence. -```bash -# Rebuild and start E2E container -.github/skills/scripts/skill-runner.sh docker-rebuild-e2e +- Update + [backend/internal/models/notification_config.go][notification-config-go] + to include: + - `NotifyRateLimitHits bool` + - `EmailRecipients string` +- Update handler validation in + [backend/internal/api/handlers/
+ security_notifications.go][security-notifications-handler-go]: + - Keep backend validation to `debug|info|warn|error`. +- Update UI log level options to remove `fatal` and match backend validation. +- Update `SecurityNotificationService.GetSettings()` default struct to include + new fields. -# PHASE 1: Foundation -cd /projects/Charon -npx playwright test tests/global-setup.ts tests/auth.setup.ts --project=firefox +**EmailRecipients data format (explicit):** +- Input accepts a comma-separated list of email addresses. +- Split on `,`, trim whitespace for each entry, and drop empty values. +- Validate each email using existing backend validation rules. +- Store a normalized, comma-separated string joined with `, `. +- If validation fails, return a single error listing invalid entries. -# PHASE 2: Core UI, Settings, Tasks, Monitoring -# NOTE: PLAYWRIGHT_SKIP_SECURITY_DEPS=1 is automatically set in E2E scripts -# Security suites will NOT execute as dependencies -npx playwright test tests/core --project=firefox -npx playwright test tests/settings --project=firefox -npx playwright test tests/tasks --project=firefox -npx playwright test tests/monitoring --project=firefox +**Validation and UX notes:** +- UI helper text: "Use comma-separated emails, e.g. admin@example.com, + ops@example.com". +- Inline error highlights the invalid address(es) and does not save. +- Empty input is treated as "no recipients" and stored as an empty string. +- The UI must preserve the normalized format returned by the API. -# PHASE 3: Security UI and Enforcement (SERIAL) -npx playwright test tests/security --project=firefox -npx playwright test tests/security-enforcement --project=firefox --workers=1 +### 3.6 Reduce Settings Write Requests -# PHASE 4: Integration, Browser-Specific, Debug (Optional) -npx playwright test tests/integration --project=firefox -npx playwright test tests/firefox-specific --project=firefox -npx playwright test tests/webkit-specific --project=webkit -npx playwright test tests/debug --project=firefox -npx playwright test tests/tasks/caddy-import-gaps.spec.ts --project=firefox -``` +**Goal:** Fewer requests, fewer partial failures. -## 7. Risks and Mitigations +- Reuse existing `PATCH /api/v1/config` in + [backend/internal/api/handlers/settings_handler.go][settings-handler-go]. +- PATCH updates MUST be transactional and all-or-nothing. If any field update + fails (validation, DB write, or permission), the transaction must roll back + and the API must return a single failure response. +- Update + [frontend/src/pages/SystemSettings.tsx](frontend/src/pages/SystemSettings.tsx) + to send one patch request for all fields. +- Add failure-mode UI message that references permission diagnostics if present. -- Risk: Security suite state leaks across tests. Mitigation: enforce admin - whitelist reset and break-glass recovery ordering. -- Risk: File-name ordering (zzz-) not enforced without `--workers=1`. - Mitigation: document `--workers=1` requirement and make it mandatory in - CI and quick-start commands. -- Risk: Emergency server unavailable. Mitigation: gate enforcement suites on - health checks and document port 2020 requirements. -- Risk: Import suites combine mocked and real flows. Mitigation: isolate by - phase and keep debug suites opt-in. -- Risk: Missing test suites hide regressions. Mitigation: inventory now - includes all suites and maps them to phases. +### 3.7 UX Guidance for Non-Root Deployments -## 8. Dependencies and Impacted Files +- Add a settings banner or toast when permissions fail, pointing to: + - `docker run` or `docker compose` examples + - `chown -R : /path/to/volume` using values from + diagnostics or configured `--user` / `CHARON_UID` / `CHARON_GID` + - Optionally `--user :` or PUID/PGID env if added -- Harness: [tests/global-setup.ts](tests/global-setup.ts), - [tests/auth.setup.ts](tests/auth.setup.ts), - [tests/security-teardown.setup.ts](tests/security-teardown.setup.ts). -- Core UI: [tests/core](tests/core). -- Settings: [tests/settings](tests/settings). -- Tasks: [tests/tasks](tests/tasks). -- Monitoring: [tests/monitoring](tests/monitoring). -- Security UI: [tests/security](tests/security). -- Security enforcement: [tests/security-enforcement](tests/security-enforcement). -- Integration: [tests/integration](tests/integration). -- Browser-specific: [tests/firefox-specific](tests/firefox-specific), - [tests/webkit-specific](tests/webkit-specific). +### 3.8 PUID/PGID and --user Behavior -## 9. Confidence Score +- If the container is started with `--user`, the entrypoint cannot `chown` + mounted volumes. +- When `--user` is set, `CHARON_UID`/`CHARON_GID` (and any PUID/PGID + equivalents) SHALL be treated as no-ops and only used for logging. +- Documentation must instruct operators to pre-create and `chown` host volumes + to the runtime UID/GID when using `--user`, based on the diagnostics-reported + UID/GID or the configured runtime values. -Confidence: 79 percent +**Directory permission modes (0700 vs group-writable):** +- Default directory mode remains `0700` for single-user deployments. +- When PUID/PGID or supplemental group access is used, directories MAY be + created as `0770` (or `0750` if group write is not required). +- If group-writable directories are used, ensure the runtime user is in the + owning group and document the expected umask behavior. -Rationale: The suite inventory and dependencies are well understood. The main -unknowns are timing-sensitive security propagation and emergency server -availability in varied environments. +### 3.9 Risk Register and Mitigations -## Review Feedback & Required Additions +- **Risk:** Repair endpoint could be abused in a multi-tenant environment. + - **Mitigation:** Only enabled in single-container mode; root-only; allowlist + paths. +- **Risk:** Adding fields to NotificationConfig might break existing migrations. + - **Mitigation:** Use GORM AutoMigrate and default values. +- **Risk:** UI still masks failures due to optimistic updates. + - **Mitigation:** Ensure all mutations handle error states and show help text. -Summary: the spec is thorough and well-structured but is missing several concrete -forensic and reproduction details needed to reliably diagnose shard timeouts -and to make CI-side fixes repeatable. The items below add those missing -artifacts, commands, and prioritized mitigations. +### 3.9.1 Single-Container Mode Detection and Enforcement -1) Test-forensics (how to analyze Playwright traces & map failing tests to shards) -- Extract and open traces per-shard: unzip the artifact and run: - ```bash - unzip e2e-shard--output/trace.zip -d /tmp/trace-INDEX - npx playwright show-trace /tmp/trace-INDEX - ``` -- Use JSON reporter to map test IDs to trace files and timestamps: - ```bash - # run locally to produce a reporter JSON for the shard - npx playwright test --shard=INDEX/TOTAL --project=chromium --reporter=json --output=/tmp/playwright-shard-INDEX --trace=on > /tmp/playwright-shard-INDEX.json - jq '.suites[].specs[]?.tests[] | {title: .title, file: .location.file, line: .location.line, duration: .duration, annotations: .annotations}' /tmp/playwright-shard-INDEX.json - ``` -- Correlate test start/stop timestamps (from reporter JSON) with job logs and container logs to find the precise point where execution stopped. -- If only one test is hanging, use `--grep` or `--file` to re-run that test with `--trace=on --debug=pw:api` and capture trace and stdout. +**Goal:** Ensure repair operations are only enabled in single-container mode +and the system can deterministically report whether this mode is active. -2) CI / Workflow checks (where to inspect timeouts and cancellation causes) -- Inspect `.github/workflows/*.yml` for both top-level `timeout-minutes:` and job-level `jobs..timeout-minutes`. - ```bash - grep -n "timeout-minutes" .github/workflows -R || true - ``` -- From the run/job JSON (API) check `status` and `conclusion` fields and `cancelled_by` / `cancelled_at` times: - ```bash - curl -H "Authorization: token $GITHUB_TOKEN" \ - "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/actions/jobs/$JOB_ID" | jq '.' - ``` -- Search job logs for runner messages indicating preemption, OOM, or cancellation: - ```bash - grep -iE "Job canceled|cancelled|runner lost|Runner|Killed|OOM|oom_reaper|Timeout" -R job-$JOB_ID-logs || true - ``` -- Confirm whether the runner was `self-hosted` (job JSON `runner_name` / `runner_group_id`). If self-hosted, collect `journalctl` and docker host logs for the timestamp window. +**Detection (explicit):** +- Environment flag: `CHARON_SINGLE_CONTAINER_MODE`. +- Accepted values: `true|false` (case-insensitive). Any other value defaults + to `false` and logs a warning. +- Default: `true` in official Dockerfile and official compose examples. +- Non-container installs (binary on host) default to `false` unless explicitly + set. -3) Reproduction instructions (how to reproduce the shard locally exactly) -- Rebuild image used by CI (recommended to match CI): - ```bash - .github/skills/scripts/skill-runner.sh docker-rebuild-e2e - ``` -- Start E2E environment (use the same compose used in CI): - ```bash - docker compose -f containers/charon/docker-compose.yml up -d - ``` -- Environment variables to set (use the values CI uses): - - `PLAYWRIGHT_BASE_URL` – CI base URL (e.g. `http://localhost:8080` for Docker mode; `http://localhost:5173` for Vite dev). - - `CHARON_EMERGENCY_TOKEN` – emergency token used by tests. - - `PLAYWRIGHT_JOBS` or `PWDEBUG` as needed: `DEBUG=pw:api PWDEBUG=1`. - - Optional toggles used in CI: `PLAYWRIGHT_SKIP_SECURITY_DEPS=1`. -- Exact shard reproduction command (example matching CI): - ```bash - export PLAYWRIGHT_BASE_URL=http://localhost:8080 - export CHARON_EMERGENCY_TOKEN=changeme - DEBUG=pw:api PWDEBUG=1 \ - npx playwright test --shard=INDEX/TOTAL --project=chromium \ - --output=/tmp/playwright-shard-INDEX --reporter=json --trace=on > /tmp/playwright-shard-INDEX.log 2>&1 - ``` -- To re-run a single failing test found in JSON: - ```bash - npx playwright test tests/path/to/spec.ts -g "Exact test title" --project=chromium --trace=on --output=/tmp/playwright-single - ``` +**Enforcement (explicit):** +- The repair endpoint is disabled when single-container mode is `false`. +- The handler MUST return `403` with `permissions_repair_disabled` when the + mode check fails, and SHALL NOT attempt any filesystem mutations. +- Diagnostics remain available regardless of mode. -4) Required artifacts & evidence to collect (exact list and commands) -- Per-shard Playwright outputs: `trace.zip`, `video/*`, `test-results.json` or `reporter json` and shard stdout/stderr log. Ensure `--output` points to shard-specific path and upload as artifact. -- Job-level artifacts: GitHub Actions run logs ZIP, job logs ZIP, `gh run download` output. -- Runner/host diagnostics (self-hosted): `journalctl -u actions.runner.*`, `dmesg | grep -i oom`, `sudo journalctl -u docker.service`, `docker ps -a`, `docker logs --since` for charon-e2e and caddy. -- Capture a timestamped mapping file that lists: job start, shard start, last test start, last trace timestamp, job end. Example CSV header: `job_id,job_start,shard_index,shard_start, last_test_started_at, job_end, conclusion`. -- Attach a minimal repro package: Docker image tag, docker-compose file, the exact Playwright command-line, and the failing test id/title. +**Repair gating and precedence (explicit):** +1) Admin-only check first. If not admin, return `403` with + `permissions_admin_only`. +2) Single-container mode check second. If disabled, return `403` with + `permissions_repair_disabled`. +3) Root check third. If not root, return `403` with `permissions_non_root`. +4) Only after all gating checks pass, proceed to path validation and mutation. -5) Prioritization of fixes and quick mitigations (concrete) -- P0 (Immediate unblock): - - Temporarily increase `timeout-minutes` to 60 for failing workflow; add `if: always()` diagnostics step and artifact upload. - - Ensure each shard uses `--output` per-shard and is uploaded (`actions/upload-artifact`) so traces are available even on cancellation. - - Re-run failing shard locally with `DEBUG=pw:api PWDEBUG=1` and collect traces. -- P1 (Same-day): - - Add CI smoke healthcheck step that validates UI and emergency server before shards start (quick `curl` checks and a small Playwright smoke test). - - If self-hosted runner, add simple resource guard (systemd service restart prevention) and OOM monitoring alert. - - Configure Playwright retries for flaky tests (small number) and mark expensive suites as `--workers=1`. -- P2 (Next sprint): - - Implement historical-duration-based shard splitting to avoid heavy concentration in one shard. - - Add test-level tagging and targeted prioritization for long-running security-enforcement suites. - - Add CI-level telemetry: test-duration history, flaky-test dashboard. +**Placement:** +- Mode detection lives in `backend/internal/config` as a boolean flag on the + runtime config object. +- Enforcement happens in the permissions repair handler before any path + validation or mutation. +- Log the evaluated mode and source (explicit env vs default) once at startup. -Verdict: NEEDS CHANGES — the existing spec is a solid base, but add the forensic commands, reproducible shard reproduction steps, explicit artifact list, and CI checks above before marking this plan approved. +### 3.10 Spec-Driven Workflow Artifacts (Pre-Implementation Gate) -Actionable next steps (short list): -- Add the `always()` diagnostics step to `.github/workflows/.yml` and upload diagnostics as artifacts. -- Modify the E2E job to set `--output` to `e2e-shard-${{ matrix.index }}-output` and upload that path. -- Run `gh run download 21865692694` and extract the per-job logs; parse the job JSON to determine if the runner was self-hosted and collect host logs if so. -- Reproduce the failing shard locally using the exact commands above and attach `trace.zip` and JSON reporter output to the issue. +Before Phase 1 begins, update the following artifacts and confirm sign-off: -If you want, I can apply the small CI YAML snippets (diagnostics + upload) as a targeted patch or download the run artifacts now (requires `GITHUB_TOKEN`). +- `requirements.md` with new or refined EARS statements for permissions + diagnostics, admin-gated saves, and error mapping. +- `design.md` with new endpoints, data flow, error payloads, and non-root + permission remediation design. +- `tasks.md` with the phase plan, test order, verification tasks, and the + deterministic read-only DB simulation approach. + +## 4) Implementation Plan (Phased, Minimal Requests) + +### Pre-Implementation Gate (Required) + +1) Update `requirements.md`, `design.md`, and `tasks.md` per section 3.10. +2) Confirm the deterministic read-only DB simulation approach and exact + invocation are documented in `tasks.md`. +3) Proceed only after the spec artifacts are updated and reviewed. + +### Phase 1 — Playwright & Diagnostic Ground Truth + +**Goal:** Define the expected UX and capture the failure state before changes. + +1) Add E2E coverage for permissions and save failures: + - Tests in `tests/settings/settings-permissions.spec.ts`: + - Simulate DB read-only deterministically (compose override only). + - Verify toast/error text on save failure. + - Tests for dropdown persistence in System Settings and SMTP: + - Ensure selections persist after reload when writes succeed. + - Ensure UI reverts with visible error on write failure. + - Security notification log level options: + - Ensure `fatal` is not present in the dropdown options. + + +1a) Deterministic failure simulation setup (aligned with E2E workflow): + - Use a docker-compose override to bind-mount a read-only DB file for E2E. + This is the single supported approach for deterministic DB read-only + simulation. + - Override file (example name): + `.docker/compose/docker-compose.e2e-readonly-db.override.yml`. + - Read-only DB setup sequence (explicit): + 1. Run the E2E rebuild skill first to ensure the base container and + baseline volumes are fresh and healthy. + 2. Start a one-off container (or job) with a writable volume. + 3. Run migrations and seed data to create the SQLite DB file in that + writable location. + 4. Stop the one-off container and bind-mount the DB file into the E2E + container as read-only using the override. + - Exact invocation (Docker E2E mode): + ```bash + # Step 1: rebuild E2E container + .github/skills/scripts/skill-runner.sh docker-rebuild-e2e + + # Step 2: start with override + docker compose -f .docker/compose/docker-compose.yml \ + -f .docker/compose/docker-compose.e2e-readonly-db.override.yml up -d + ``` + - The override SHALL mount the DB file read-only and MUST NOT require any + application code changes or test-only flags. + - Teardown/cleanup after the test run (explicit): + 1. Stop and remove the override services/containers started for the + read-only run. + 2. Remove any override-specific volumes used for the read-only DB file + to avoid cross-test contamination. + 3. Re-run the E2E rebuild skill before the next E2E session to restore + the standard writable DB state. + - Add a planned VS Code task or skill-runner entry to make this workflow + one-command and discoverable (example task label: "Test: E2E Readonly + DB", command invoking the docker compose override sequence above). + +2) Add a health check step in tests for permissions endpoint once available. + +**Outputs:** New E2E baseline expectations for save behavior. + +### Phase 2 — Backend Permissions Diagnostics & Errors + +**Goal:** Make permission issues undeniable and actionable. + +1) Add system permissions handler and util: + - `backend/internal/api/handlers/system_permissions_handler.go` + - `backend/internal/util/permissions.go` + +2) Add standardized permission error mapping: + - Wrap DB and filesystem errors in settings, notifications, imports, backups. + +3) Extend security notifications model and defaults: + - Update `NotificationConfig` fields. + - Update handler validation for min log level or adjust UI. + +**Outputs:** A diagnostics API and consistent error payloads across persistence +paths. + +### Phase 3 — Frontend Save Flows and UI Messaging + +**Goal:** Reduce request count and surface errors clearly. + +1) System Settings: + - Switch to `PATCH /api/v1/config` for multi-field save. + - On error, show permission hint if provided. + +2) Security Notification Settings modal: + - Align log level options with backend. + - Ensure new fields are saved and displayed. + +3) Notifications providers: + - Surface permission errors on save/update/delete. + +**Outputs:** Fewer save calls, better error clarity, stable dropdown +persistence. + +### Phase 4 — Integration and Testing + +1) Run Playwright E2E tests first, before any unit tests. +2) If the E2E environment changed, rebuild using the E2E Docker skill. +3) Ensure E2E tests cover permission failure UX and dropdown persistence. +4) Run unit tests only after E2E passes. +5) Enforce 100% patch coverage for all modified lines. +6) Record any coverage gaps in `tasks.md` before adding tests. + +### Phase 5 — Container & Volume Hardening + +**Goal:** Provide a clear, secure non-root path. + +1) Entrypoint improvements: + - When running as root, ensure `/app/data` ownership is corrected (not only + subdirs). + - Log UID/GID at startup. + +2) Optional PUID/PGID support: + - If `CHARON_UID`/`CHARON_GID` are set and the container is not started with + `--user`, re-map `charon` user or add supplemental group. + - If `--user` is set, log that PUID/PGID overrides are ignored and volume + ownership must be handled on the host. + +3) Dockerfile/Compose review: + - If PUID/PGID added, update Dockerfile and compose example. + +**Outputs:** Hardening changes that remove the “silent failure” path. + +### Phase 6 — Integration, Documentation, and Cleanup + +1) Add troubleshooting docs for non-root volumes. +2) Update any user guides referencing permissions. +3) Update API docs for new endpoints: + - Add `GET /api/v1/system/permissions` and + `POST /api/v1/system/permissions/repair` to + [docs/api.md](docs/api.md) with schemas, auth, and error codes. +4) Update documentation to reference `CHARON_SINGLE_CONTAINER_MODE`: + - Add the env var description and default behavior to the primary + configuration reference (include accepted values and fallback behavior). + - Add or update a Docker Compose example showing + `CHARON_SINGLE_CONTAINER_MODE=true` in the environment list. +5) Ensure `requirements.md`, `design.md`, and `tasks.md` are updated. +6) Finalize tests and ensure coverage targets are met. +7) Update [docs/features.md](docs/features.md) for any user-facing permissions + diagnostics or repair UX changes. + +## 5) Acceptance Criteria (EARS) + +- WHEN the container runs as non-root and a mounted volume is not writable, THE + SYSTEM SHALL expose a permissions diagnostic endpoint that reports the failing + path and required access. +- WHEN the permissions repair endpoint is called by a non-root process, THE + SYSTEM SHALL return `403` and SHALL NOT perform any filesystem mutation. +- WHEN the permissions repair endpoint is called by a non-admin user, THE SYSTEM + SHALL return `403` with `permissions_admin_only` and SHALL NOT perform any + filesystem mutation. +- WHEN the permissions repair endpoint is called while single-container mode is + disabled, THE SYSTEM SHALL return `403` with `permissions_repair_disabled` + and SHALL NOT perform any filesystem mutation. +- WHEN the permissions repair endpoint receives a path that is outside the + allowlist, THE SYSTEM SHALL reject the request with a clear error and SHALL + NOT touch the filesystem. +- WHEN the permissions repair endpoint receives a symlink or a path containing a + symlinked component, THE SYSTEM SHALL reject the request with a clear error + and SHALL NOT follow the link. +- WHEN the permissions repair endpoint receives a missing path, THE SYSTEM SHALL + return a per-path error and SHALL NOT create the path. +- WHEN the permissions repair endpoint receives a relative path or a path that + normalizes to `.` or `..`, THE SYSTEM SHALL reject the request and SHALL NOT + perform any filesystem mutation. +- WHEN a user saves system, SMTP, or notification settings and the DB is read- + only, THE SYSTEM SHALL return a clear error with a remediation hint. +- WHEN a user updates dropdown-based settings and persistence fails, THE SYSTEM + SHALL display an error and SHALL NOT silently pretend the save succeeded. +- WHEN the security notification log level options are displayed, THE SYSTEM + SHALL only present `debug`, `info`, `warn`, and `error`. +- WHEN security notification settings are saved, THE SYSTEM SHALL persist all + fields that the UI presents. +- WHEN settings updates include multiple fields, THE SYSTEM SHALL apply them in + a single request and a single transaction to avoid partial persistence. +- WHEN a non-admin user attempts to call a save endpoint, THE SYSTEM SHALL + return `403` with `permissions_admin_only` and SHALL NOT perform any write. +- WHEN permissions diagnostics or repair endpoints are called, THE SYSTEM SHALL + emit an audit log entry with outcome details. +- WHEN a permission-related save failure occurs, THE SYSTEM SHALL emit an audit + log entry with a stable error code and redacted path details for non-admin + contexts. +- WHEN a non-admin user receives a permission-related error, THE SYSTEM SHALL + redact filesystem path details from the response payload. + +## 6) Files and Components to Touch (Trace Map) + +**Backend** +- + [.docker/docker-entrypoint.sh][docker-entrypoint-sh] — permission checks + and potential ownership fixes. +- + [backend/internal/config/config.go][config-go] — data directory creation + behavior. +- + [backend/internal/api/handlers/settings_handler.go][settings-handler-go] + — permission-aware errors, PATCH usage. +- + [backend/internal/api/handlers/
+ security_notifications.go][security-notifications-handler-go] + — validation alignment. +- + [backend/internal/services/
+ security_notification_service.go][security-notification-service-go] + — defaults, persistence. +- + [backend/internal/models/notification_config.go][notification-config-go] + — new fields. +- + [backend/internal/services/mail_service.go][mail-service-go] — permission- + aware errors. +- + [backend/internal/services/notification_service.go][notification-service-go] + — permission-aware errors. +- + [backend/internal/services/backup_service.go][backup-service-go] — + permission-aware errors. +- + [backend/internal/util/permissions.go][permissions-util-go] — permission + diagnostics utility. +- + [backend/internal/api/handlers/import_handler.go][import-handler-go] — + permission-aware errors for uploads. + +**Frontend** +- + [frontend/src/pages/SystemSettings.tsx][system-settings-tsx] — batch save + via PATCH and better error UI. +- + [frontend/src/pages/SMTPSettings.tsx][smtp-settings-tsx] — permission error + messaging. +- + [frontend/src/pages/Notifications.tsx][notifications-page-tsx] — save error + handling. +- + [frontend/src/components/
+ SecurityNotificationSettingsModal.tsx][security-notification-modal-tsx] + — align fields. +- + [frontend/src/components/ui/Select.tsx][select-component-tsx] — no + functional change expected; verify for state persistence. + +**Infra** +- [Dockerfile](Dockerfile) +- [.docker/compose/docker-compose.yml](.docker/compose/docker-compose.yml) + +## 7) Repo Hygiene Review (Requested) + +- **.gitignore:** No change required unless we add new diagnostics artifacts + (e.g., `permissions-report.json`). If added, ignore them under root or `test- + results/`. +- **.dockerignore:** No change required. If we add new documentation files or + test artifacts, keep them excluded from the image. +- **codecov.yml:** No change required unless new diagnostics packages warrant + exclusions. +- **Dockerfile:** Potential update if PUID/PGID support is added; otherwise, no + change required. + +## 8) Unit Test Plan + +Backend unit tests (Go): +- Permissions diagnostics utility: validate stat parsing, writable checks, and + error mapping for missing paths and permission denied. +- Permissions endpoints: admin-only access (403 + `permissions_admin_only`) and + successful admin responses. +- Permissions repair endpoint: + - Rejects non-root execution with `403` and no filesystem changes. + - Rejects non-admin requests with `permissions_admin_only`. + - Rejects paths outside the allowlist safe roots. + - Rejects relative paths, `.` and `..` after normalization, and any request + where `filepath.Clean` produces an out-of-allowlist path. + - Rejects symlinks and symlinked path components via `Lstat` and + `EvalSymlinks` checks. + - Returns per-path errors for missing paths without creating them. +- Permission-aware error mapping: ensure DB read-only and `os.IsPermission` + errors map to the standard payload fields and redact path details for non- + admins. +- Audit logging: verify diagnostics/repair calls and permission-related save + failures emit audit entries with redacted path details for non-admin contexts. +- Settings PATCH behavior: multi-field patch applies atomically in the + handler/service and returns a single failure when any persistence step fails. + +Frontend unit tests (Vitest): +- Diagnostics fetch handling: verify non-admin error messaging without path + details. +- Settings save errors: ensure error toast displays remediation text and UI + state does not silently persist on failure. + +## 9) Confidence Score + +Confidence: 80% + +Rationale: The permissions write paths are well mapped, and the root cause (non- +root + volume ownership mismatch) is a common pattern. The only uncertainty is +the exact user environment for the failure, which will be clarified once +diagnostics are in place. + +[backup-service-go]: + backend/internal/services/backup_service.go +[import-handler-go]: + backend/internal/api/handlers/import_handler.go +[notification-service-go]: + backend/internal/services/notification_service.go +[notification-handler-go]: + backend/internal/api/handlers/notification_handler.go +[notifications-page-tsx]: + frontend/src/pages/Notifications.tsx +[security-notification-service-go]: + backend/internal/services/security_notification_service.go +[security-notifications-handler-go]: + backend/internal/api/handlers/security_notifications.go +[security-notification-modal-tsx]: + frontend/src/components/SecurityNotificationSettingsModal.tsx +[notification-config-go]: + backend/internal/models/notification_config.go +[system-settings-tsx]: + frontend/src/pages/SystemSettings.tsx +[settings-handler-go]: + backend/internal/api/handlers/settings_handler.go +[smtp-settings-tsx]: + frontend/src/pages/SMTPSettings.tsx +[mail-service-go]: + backend/internal/services/mail_service.go +[select-component-tsx]: + frontend/src/components/ui/Select.tsx +[docker-entrypoint-sh]: + .docker/docker-entrypoint.sh +[config-go]: + backend/internal/config/config.go +[permissions-util-go]: + backend/internal/util/permissions.go diff --git a/go.work b/go.work index ca05e7d8..1f9018ac 100644 --- a/go.work +++ b/go.work @@ -1,3 +1,3 @@ -go 1.26.0 +go 1.25.0 use ./backend diff --git a/tests/settings/notifications.spec.ts b/tests/settings/notifications.spec.ts index 6d3343db..85def2b9 100644 --- a/tests/settings/notifications.spec.ts +++ b/tests/settings/notifications.spec.ts @@ -920,14 +920,14 @@ test.describe('Notification Providers', () => { * Test: Edit external template * Priority: P2 */ - test('should edit external template', async ({ page }) => { - await test.step('Mock external templates API response', async () => { - await page.route('**/api/v1/notifications/external-templates', async (route, request) => { - if (request.method() === 'GET') { - await route.fulfill({ - status: 200, - contentType: 'application/json', - body: JSON.stringify([ + test('should edit external template', async ({ page }) => { + await test.step('Mock external templates API response', async () => { + await page.route(/\/api\/v1\/notifications\/external-templates\/?(?:\?.*)?$/, async (route, request) => { + if (request.method() === 'GET') { + await route.fulfill({ + status: 200, + contentType: 'application/json', + body: JSON.stringify([ { id: 'edit-template-id', name: 'Editable Template', @@ -936,7 +936,7 @@ test.describe('Notification Providers', () => { config: '{"old": "config"}', }, ]), - }); + }); } else { await route.continue(); } @@ -948,15 +948,13 @@ test.describe('Notification Providers', () => { await waitForLoadingComplete(page); }); - await test.step('Click Manage Templates button to show templates list', async () => { - // Find the toggle button for template management - const allButtons = page.getByRole('button'); - const manageBtn = allButtons.filter({ hasText: /manage.*templates/i }).first(); - - if (await manageBtn.isVisible({ timeout: 3000 }).catch(() => false)) { - await manageBtn.click(); - } - }); + await test.step('Click Manage Templates button to show templates list', async () => { + // Find the toggle button for template management + const allButtons = page.getByRole('button'); + const manageBtn = allButtons.filter({ hasText: /manage.*templates/i }).first(); + await expect(manageBtn).toBeVisible({ timeout: 5000 }); + await manageBtn.click(); + }); await test.step('Wait and verify templates list is visible', async () => { const templateText = page.getByText('Editable Template'); @@ -1013,9 +1011,9 @@ test.describe('Notification Providers', () => { * Test: Delete external template * Priority: P2 */ - test('should delete external template', async ({ page }) => { - await test.step('Mock external templates', async () => { - let templates = [ + test('should delete external template', async ({ page }) => { + await test.step('Mock external templates', async () => { + let templates = [ { id: 'delete-template-id', name: 'Template to Delete', @@ -1025,11 +1023,11 @@ test.describe('Notification Providers', () => { }, ]; - await page.route('**/api/v1/notifications/external-templates', async (route, request) => { - if (request.method() === 'GET') { - await route.fulfill({ - status: 200, - contentType: 'application/json', + await page.route(/\/api\/v1\/notifications\/external-templates\/?(?:\?.*)?$/, async (route, request) => { + if (request.method() === 'GET') { + await route.fulfill({ + status: 200, + contentType: 'application/json', body: JSON.stringify(templates), }); return; @@ -1053,13 +1051,13 @@ test.describe('Notification Providers', () => { }); }); - await test.step('Reload page', async () => { - // Wait for external templates fetch so list render is deterministic. - const templatesResponsePromise = waitForAPIResponse( - page, - /\/api\/v1\/notifications\/external-templates$/, - { status: 200 } - ); + await test.step('Reload page', async () => { + // Wait for external templates fetch so list render is deterministic. + const templatesResponsePromise = waitForAPIResponse( + page, + /\/api\/v1\/notifications\/external-templates\/?(?:\?.*)?$/, + { status: 200 } + ); await page.reload(); await templatesResponsePromise; @@ -1089,11 +1087,11 @@ test.describe('Notification Providers', () => { /\/api\/v1\/notifications\/external-templates\/delete-template-id/, { status: 200 } ); - const refreshResponsePromise = waitForAPIResponse( - page, - /\/api\/v1\/notifications\/external-templates$/, - { status: 200 } - ); + const refreshResponsePromise = waitForAPIResponse( + page, + /\/api\/v1\/notifications\/external-templates\/?(?:\?.*)?$/, + { status: 200 } + ); const templateHeading = page.getByRole('heading', { name: 'Template to Delete', level: 4 }); const templateCard = templateHeading.locator('..').locator('..'); diff --git a/tests/settings/user-management.spec.ts b/tests/settings/user-management.spec.ts index 8cb69f83..b4e4af73 100644 --- a/tests/settings/user-management.spec.ts +++ b/tests/settings/user-management.spec.ts @@ -12,7 +12,7 @@ * @see /projects/Charon/frontend/src/pages/UsersPage.tsx */ -import { test, expect, loginUser, TEST_PASSWORD } from '../fixtures/auth-fixtures'; +import { test, expect, loginUser, logoutUser, TEST_PASSWORD } from '../fixtures/auth-fixtures'; import { waitForLoadingComplete, waitForToast, @@ -409,26 +409,46 @@ test.describe('User Management', () => { * Priority: P1 */ test('should show invite URL preview', async ({ page }) => { - await test.step('Open invite modal', async () => { + const inviteModal = await test.step('Open invite modal', async () => { const inviteButton = page.getByRole('button', { name: /invite.*user/i }); await inviteButton.click(); + return await waitForModal(page, /invite.*user/i); }); await test.step('Enter valid email', async () => { - const emailInput = page.getByPlaceholder(/user@example/i); + // Debounced API call triggers invite URL preview + const previewResponsePromise = waitForAPIResponse( + page, + /\/api\/v1\/users\/preview-invite-url\/?(?:\?.*)?$/, + { status: 200 } + ); + + const emailInput = inviteModal.getByPlaceholder(/user@example/i); await emailInput.fill('preview-test@example.com'); + await previewResponsePromise; }); await test.step('Wait for URL preview to appear', async () => { - // URL preview appears after debounced API call - // Wait for invite generation (triggers after email is filled) - await page.waitForTimeout(500); + // When app.public_url is not configured, the backend returns an empty preview URL + // and the UI shows a warning with a link to system settings. + const warningAlert = inviteModal + .getByRole('alert') + .filter({ hasText: /application url is not configured/i }) + .first(); + const previewUrlText = inviteModal + .locator('div.font-mono') + .filter({ hasText: /accept-invite\?token=/i }) + .first(); - const urlPreview = page.locator('input[readonly]').filter({ - hasText: /accept.*invite|token/i, - }); + await expect(warningAlert.or(previewUrlText).first()).toBeVisible({ timeout: 5000 }); - await expect(urlPreview.first()).toBeVisible({ timeout: 5000 }); + if (await warningAlert.isVisible().catch(() => false)) { + const configureLink = warningAlert.getByRole('link', { name: /configure.*application url/i }); + await expect(configureLink).toBeVisible(); + await expect(configureLink).toHaveAttribute('href', '/settings/system'); + } else { + await expect(previewUrlText).toBeVisible(); + } }); }); @@ -635,37 +655,37 @@ test.describe('User Management', () => { * Test: Add permitted hosts * Priority: P0 */ - test('should add permitted hosts', async ({ page, testData }) => { - // SKIP: Depends on settings (permissions) button which is not yet implemented - const testUser = await testData.createUser({ - name: 'Add Hosts Test', - email: `add-hosts-${Date.now()}@test.local`, - password: TEST_PASSWORD, - role: 'user', - }); + test('should add permitted hosts', async ({ page, testData }) => { + // SKIP: Depends on settings (permissions) button which is not yet implemented + const testUser = await testData.createUser({ + name: 'Add Hosts Test', + email: `add-hosts-${Date.now()}@test.local`, + password: TEST_PASSWORD, + role: 'user', + }); - await test.step('Open permissions modal', async () => { - await page.reload(); - await waitForLoadingComplete(page); + const permissionsModal = await test.step('Open permissions modal', async () => { + await page.reload(); + await waitForLoadingComplete(page); - const userRow = page.getByRole('row').filter({ - hasText: testUser.email, + const userRow = page.getByRole('row').filter({ + hasText: testUser.email, }); const permissionsButton = userRow.locator('button').filter({ has: page.locator('svg.lucide-settings'), - }); + }); - await permissionsButton.first().click(); - await page.waitForTimeout(500); - }); + await permissionsButton.first().click(); + return await waitForModal(page, /permissions/i); + }); - await test.step('Check a host to add', async () => { - const hostCheckboxes = page.locator('input[type="checkbox"]'); - const count = await hostCheckboxes.count(); + await test.step('Check a host to add', async () => { + const hostCheckboxes = permissionsModal.locator('input[type="checkbox"]'); + const count = await hostCheckboxes.count(); - if (count === 0) { - // No hosts to add - return + if (count === 0) { + // No hosts to add - return return; } @@ -674,12 +694,12 @@ test.describe('User Management', () => { await firstCheckbox.check(); } await expect(firstCheckbox).toBeChecked(); - }); + }); - await test.step('Save changes', async () => { - const saveButton = page.getByRole('button', { name: /save/i }); - await saveButton.click(); - }); + await test.step('Save changes', async () => { + const saveButton = permissionsModal.getByRole('button', { name: /save/i }); + await saveButton.click(); + }); await test.step('Verify success', async () => { await waitForToast(page, /updated|saved|success/i, { type: 'success' }); @@ -690,36 +710,36 @@ test.describe('User Management', () => { * Test: Remove permitted hosts * Priority: P1 */ - test('should remove permitted hosts', async ({ page, testData }) => { - const testUser = await testData.createUser({ - name: 'Remove Hosts Test', - email: `remove-hosts-${Date.now()}@test.local`, - password: TEST_PASSWORD, - role: 'user', - }); + test('should remove permitted hosts', async ({ page, testData }) => { + const testUser = await testData.createUser({ + name: 'Remove Hosts Test', + email: `remove-hosts-${Date.now()}@test.local`, + password: TEST_PASSWORD, + role: 'user', + }); - await test.step('Open permissions modal', async () => { - await page.reload(); - await waitForLoadingComplete(page); + const permissionsModal = await test.step('Open permissions modal', async () => { + await page.reload(); + await waitForLoadingComplete(page); - const userRow = page.getByRole('row').filter({ - hasText: testUser.email, + const userRow = page.getByRole('row').filter({ + hasText: testUser.email, }); const permissionsButton = userRow.locator('button').filter({ has: page.locator('svg.lucide-settings'), - }); + }); - await permissionsButton.first().click(); - await page.waitForTimeout(500); - }); + await permissionsButton.first().click(); + return await waitForModal(page, /permissions/i); + }); - await test.step('Uncheck a checked host', async () => { - const hostCheckboxes = page.locator('input[type="checkbox"]'); - const count = await hostCheckboxes.count(); + await test.step('Uncheck a checked host', async () => { + const hostCheckboxes = permissionsModal.locator('input[type="checkbox"]'); + const count = await hostCheckboxes.count(); - if (count === 0) { - return; + if (count === 0) { + return; } // First check a box, then uncheck it @@ -733,12 +753,12 @@ test.describe('User Management', () => { await firstCheckbox.uncheck(); await expect(firstCheckbox).not.toBeChecked(); - }); + }); - await test.step('Save changes', async () => { - const saveButton = page.getByRole('button', { name: /save/i }); - await saveButton.click(); - }); + await test.step('Save changes', async () => { + const saveButton = permissionsModal.getByRole('button', { name: /save/i }); + await saveButton.click(); + }); await test.step('Verify success', async () => { await waitForToast(page, /updated|saved|success/i, { type: 'success' }); @@ -1129,12 +1149,7 @@ test.describe('User Management', () => { // Skip: Admin access control is enforced via routing/middleware, not visible error messages test('should require admin role for access', async ({ page, regularUser }) => { await test.step('Logout current admin', async () => { - // Navigate to logout or click logout button - const logoutButton = page.getByText(/logout/i); - if (await logoutButton.isVisible()) { - await logoutButton.click(); - await page.waitForURL(/\/login/); - } + await logoutUser(page); }); await test.step('Login as regular user', async () => { @@ -1142,18 +1157,30 @@ test.describe('User Management', () => { await waitForLoadingComplete(page); }); - await test.step('Attempt to access users page', async () => { + const listUsersResponse = await test.step('Attempt to access users page', async () => { + const responsePromise = page.waitForResponse( + (response) => + response.request().method() === 'GET' && + /\/api\/v1\/users\/?(?:\?.*)?$/.test(response.url()) && + !/\/api\/v1\/users\/preview-invite-url\/?(?:\?.*)?$/.test(response.url()), + { timeout: 15000 } + ).catch(() => null); + await page.goto('/users', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + return await responsePromise; }); await test.step('Verify access denied or redirect', async () => { // Should either redirect to home/dashboard or show error const currentUrl = page.url(); const isRedirected = !currentUrl.includes('/users'); - const hasError = await page.getByText(/access.*denied|not.*authorized|forbidden/i).isVisible({ timeout: 3000 }).catch(() => false); + const hasForbiddenResponse = listUsersResponse?.status() === 403; + const hasError = await page + .getByText(/admin access required|access.*denied|not.*authorized|forbidden/i) + .isVisible({ timeout: 3000 }) + .catch(() => false); - expect(isRedirected || hasError).toBeTruthy(); + expect(isRedirected || hasForbiddenResponse || hasError).toBeTruthy(); }); }); @@ -1164,29 +1191,34 @@ test.describe('User Management', () => { // Skip: Admin access control is enforced via routing/middleware, not visible error messages test('should show error for regular user access', async ({ page, regularUser }) => { await test.step('Logout and login as regular user', async () => { - const logoutButton = page.getByText(/logout/i); - if (await logoutButton.isVisible()) { - await logoutButton.click(); - await page.waitForURL(/\/login/); - } + await logoutUser(page); await loginUser(page, regularUser); await waitForLoadingComplete(page); }); - await test.step('Navigate to users page directly', async () => { + const listUsersResponse = await test.step('Navigate to users page directly', async () => { + const responsePromise = page.waitForResponse( + (response) => + response.request().method() === 'GET' && + /\/api\/v1\/users\/?(?:\?.*)?$/.test(response.url()) && + !/\/api\/v1\/users\/preview-invite-url\/?(?:\?.*)?$/.test(response.url()), + { timeout: 15000 } + ).catch(() => null); + await page.goto('/users', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + return await responsePromise; }); await test.step('Verify error message or redirect', async () => { // Check for error toast, error page, or redirect - const errorMessage = page.getByText(/access.*denied|unauthorized|forbidden|permission/i); + const errorMessage = page.getByText(/admin access required|access.*denied|unauthorized|forbidden|permission/i); const hasError = await errorMessage.isVisible({ timeout: 3000 }).catch(() => false); const isRedirected = !page.url().includes('/users'); + const hasForbiddenResponse = listUsersResponse?.status() === 403; - expect(hasError || isRedirected).toBeTruthy(); + expect(hasError || isRedirected || hasForbiddenResponse).toBeTruthy(); }); }); diff --git a/tests/tasks/logs-viewing.spec.ts b/tests/tasks/logs-viewing.spec.ts index 66ad2b4d..e50970f1 100644 --- a/tests/tasks/logs-viewing.spec.ts +++ b/tests/tasks/logs-viewing.spec.ts @@ -194,20 +194,20 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await expect(page.getByTestId('log-file-list')).toBeVisible(); }); - test('should show list of available log files', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should show list of available log files', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + const logFilesPromise = waitForAPIResponse(page, '/api/v1/logs', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); - // Wait for API response - await waitForAPIResponse(page, '/api/v1/logs', { status: 200 }); + await logFilesPromise; - // Verify all log files are displayed in the list - await expect(page.getByText('access.log')).toBeVisible(); - await expect(page.getByText('error.log')).toBeVisible(); - await expect(page.getByText('caddy.log')).toBeVisible(); + // Verify all log files are displayed in the list + await expect(page.getByText('access.log')).toBeVisible(); + await expect(page.getByText('error.log')).toBeVisible(); + await expect(page.getByText('caddy.log')).toBeVisible(); }); test('should display log filters section', async ({ page, authenticatedUser }) => { @@ -242,35 +242,37 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await expect(page.getByText('0.24 MB')).toBeVisible(); }); - test('should load log content when file selected', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should load log content when file selected', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); - // The first file (access.log) is auto-selected - wait for content - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + // The first file (access.log) is auto-selected - wait for content + await initialContentPromise; - // Verify log table is displayed - await expect(page.getByTestId('log-table')).toBeVisible(); - }); - }); + // Verify log table is displayed + await expect(page.getByTestId('log-table')).toBeVisible(); + }); + }); test.describe('Log Content Display', () => { - test('should display log entries in table format', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should display log entries in table format', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); - // Wait for auto-selected log content to load - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + // Wait for auto-selected log content to load + await initialContentPromise; - // Verify table structure - const logTable = page.getByTestId('log-table'); - await expect(logTable).toBeVisible(); + // Verify table structure + const logTable = page.getByTestId('log-table'); + await expect(logTable).toBeVisible(); // Verify table has expected columns await expect(page.getByRole('columnheader', { name: /time/i })).toBeVisible(); @@ -278,35 +280,38 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await expect(page.getByRole('columnheader', { name: /method/i })).toBeVisible(); }); - test('should show timestamp, level, method, uri, status', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should show timestamp, level, method, uri, status', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); - // Wait for content to load - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + // Wait for content to load + await initialContentPromise; - // Verify log entry content is displayed - // The mock data includes 192.168.1.100 as remote_ip in first entry - await expect(page.getByText('192.168.1.100')).toBeVisible(); - await expect(page.getByText('GET')).toBeVisible(); - await expect(page.getByText('/api/v1/users')).toBeVisible(); - await expect(page.getByTestId('status-200')).toBeVisible(); + // Verify log entry content is displayed + // The mock data includes 192.168.1.100 as remote_ip in first entry + const entryRow = page.getByRole('row').filter({ hasText: '192.168.1.100' }).first(); + await expect(entryRow).toBeVisible(); + await expect(entryRow.getByRole('cell', { name: 'GET' })).toBeVisible(); + await expect(entryRow.getByText('/api/v1/users')).toBeVisible(); + await expect(entryRow.getByTestId('status-200')).toBeVisible(); }); - test('should highlight error entries with distinct styling', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should highlight error entries with distinct styling', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; - // Find the 502 error status badge - should have red styling class - const errorStatus = page.getByTestId('status-502'); - await expect(errorStatus).toBeVisible(); + // Find the 502 error status badge - should have red styling class + const errorStatus = page.getByTestId('status-502'); + await expect(errorStatus).toBeVisible(); // Verify error has red styling (bg-red or similar) await expect(errorStatus).toHaveClass(/red/); @@ -342,9 +347,11 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { }); }); + // Set up the response wait BEFORE navigation to avoid missing fast mocked responses. + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await initialContentPromise; // Initial state - page 1 expect(capturedOffset).toBe(0); @@ -363,8 +370,8 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { expect(capturedOffset).toBe(50); }); - test('should display page info correctly', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should display page info correctly', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); const largeEntrySet = generateMockEntries(150); @@ -372,7 +379,7 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { const url = new URL(route.request().url()); const offset = parseInt(url.searchParams.get('offset') || '0'); const limit = parseInt(url.searchParams.get('limit') || '50'); @@ -387,22 +394,23 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { offset, }, }); - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; - // Verify page info displays correctly - const pageInfo = page.getByTestId('page-info'); - await expect(pageInfo).toBeVisible(); + // Verify page info displays correctly + const pageInfo = page.getByTestId('page-info'); + await expect(pageInfo).toBeVisible(); // Should show "Showing 1 - 50 of 150" or similar format await expect(pageInfo).toContainText(/1.*50.*150/); }); - test('should disable prev button on first page and next on last', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should disable prev button on first page and next on last', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); const entries = generateMockEntries(75); // 2 pages (50 + 25) @@ -410,7 +418,7 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { const url = new URL(route.request().url()); const offset = parseInt(url.searchParams.get('offset') || '0'); const limit = parseInt(url.searchParams.get('limit') || '50'); @@ -425,11 +433,12 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { offset, }, }); - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; const prevButton = getPrevButton(page); const nextButton = getNextButton(page); @@ -451,8 +460,8 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { }); test.describe('Search and Filter', () => { - test('should filter logs by search text', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should filter logs by search text', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); let capturedSearch = ''; @@ -460,7 +469,7 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { const url = new URL(route.request().url()); capturedSearch = url.searchParams.get('search') || ''; @@ -483,11 +492,12 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { offset: 0, }, }); - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; // Type in search input const searchInput = page.getByTestId('search-input'); @@ -506,8 +516,8 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { expect(capturedSearch).toBe('users'); }); - test('should filter logs by log level', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should filter logs by log level', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); let capturedLevel = ''; @@ -515,7 +525,7 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { const url = new URL(route.request().url()); capturedLevel = url.searchParams.get('level') || ''; @@ -534,11 +544,12 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { offset: 0, }, }); - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; // Select Error level from dropdown using data-testid const levelSelect = page.getByTestId('level-select'); @@ -558,13 +569,14 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { }); test.describe('Download', () => { - test('should download log file successfully', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); - await setupLogMocks(page); + test('should download log file successfully', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); + await setupLogMocks(page); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; // Verify download button is visible and enabled const downloadButton = page.getByTestId('download-button'); @@ -576,14 +588,14 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { await expect(downloadButton).not.toHaveAttribute('disabled'); }); - test('should handle download error gracefully', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should handle download error gracefully', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); await page.route('**/api/v1/logs', async (route) => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { if (!route.request().url().includes('/download')) { await route.fulfill({ status: 200, @@ -598,11 +610,12 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { } else { await route.continue(); } - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; // Verify download button is present and properly rendered const downloadButton = page.getByTestId('download-button'); @@ -614,14 +627,14 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { }); test.describe('Edge Cases', () => { - test('should handle empty log content gracefully', async ({ page, authenticatedUser }) => { - await loginUser(page, authenticatedUser); + test('should handle empty log content gracefully', async ({ page, authenticatedUser }) => { + await loginUser(page, authenticatedUser); await page.route('**/api/v1/logs', async (route) => { await route.fulfill({ status: 200, json: mockLogFiles }); }); - await page.route('**/api/v1/logs/access.log*', async (route) => { + await page.route('**/api/v1/logs/access.log*', async (route) => { await route.fulfill({ status: 200, json: { @@ -632,11 +645,12 @@ test.describe('Logs Page - WebKit Compatible Tests', () => { offset: 0, }, }); - }); + }); - await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); - await waitForLoadingComplete(page); - await waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + const initialContentPromise = waitForAPIResponse(page, '/api/v1/logs/access.log', { status: 200 }); + await page.goto('/tasks/logs', { waitUntil: 'domcontentloaded' }); + await waitForLoadingComplete(page); + await initialContentPromise; // Should show "No logs found" or similar message await expect(page.getByText(/no logs found|no.*matching/i)).toBeVisible(); diff --git a/tests/utils/TestDataManager.ts b/tests/utils/TestDataManager.ts index 97d2cf91..babd588e 100644 --- a/tests/utils/TestDataManager.ts +++ b/tests/utils/TestDataManager.ts @@ -28,7 +28,7 @@ * ``` */ -import { APIRequestContext, type APIResponse } from '@playwright/test'; +import { APIRequestContext, type APIResponse, request as playwrightRequest } from '@playwright/test'; import * as crypto from 'crypto'; /** @@ -162,6 +162,7 @@ export class TestDataManager { private resources: ManagedResource[] = []; private namespace: string; private request: APIRequestContext; + private baseURLPromise: Promise | null = null; /** * Creates a new TestDataManager instance @@ -176,6 +177,33 @@ export class TestDataManager { : `test-${crypto.randomUUID()}`; } + private async getBaseURL(): Promise { + if (this.baseURLPromise) { + return await this.baseURLPromise; + } + + this.baseURLPromise = (async () => { + const envBaseURL = process.env.PLAYWRIGHT_BASE_URL; + if (envBaseURL) { + try { + return new URL(envBaseURL).origin; + } catch { + return envBaseURL; + } + } + + try { + const response = await this.request.get('/api/v1/health'); + return new URL(response.url()).origin; + } catch { + // Default matches playwright.config.js non-coverage baseURL + return 'http://127.0.0.1:8080'; + } + })(); + + return await this.baseURLPromise; + } + /** * Sanitizes a test name for use in identifiers * Keeps it short to avoid overly long domain names @@ -474,20 +502,36 @@ export class TestDataManager { createdAt: new Date(), }); - // Automatically log in the user and return token - const loginResponse = await this.request.post('/api/v1/auth/login', { - data: { email: namespacedEmail, password: data.password }, + // Automatically log in the user and return token. + // + // IMPORTANT: Do NOT log in using the manager's request context. + // The request context is expected to remain admin-authenticated so later + // operations (and automatic cleanup) can delete resources regardless of + // the created user's role. + const loginContext = await playwrightRequest.newContext({ + baseURL: await this.getBaseURL(), + extraHTTPHeaders: { + Accept: 'application/json', + 'Content-Type': 'application/json', + }, }); - if (!loginResponse.ok()) { - // User created but login failed - still return user info - console.warn(`User created but login failed: ${await loginResponse.text()}`); - return { id: result.id, email: namespacedEmail, token: '' }; + try { + const loginResponse = await loginContext.post('/api/v1/auth/login', { + data: { email: namespacedEmail, password: data.password }, + }); + + if (!loginResponse.ok()) { + // User created but login failed - still return user info + console.warn(`User created but login failed: ${await loginResponse.text()}`); + return { id: result.id, email: namespacedEmail, token: '' }; + } + + const { token } = await loginResponse.json(); + return { id: result.id, email: namespacedEmail, token }; + } finally { + await loginContext.dispose(); } - - const { token } = await loginResponse.json(); - - return { id: result.id, email: namespacedEmail, token }; } /**