diff --git a/.codecov.yml b/.codecov.yml index 106f47a0..5e3521ae 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,5 +1,7 @@ -# Codecov configuration - require 75% overall coverage by default -# Adjust target as needed +# ============================================================================= +# Codecov Configuration +# Require 75% overall coverage, exclude test files and non-source code +# ============================================================================= coverage: status: @@ -11,30 +13,78 @@ coverage: # Fail CI if Codecov upload/report indicates a problem require_ci_to_pass: yes -# Exclude folders from Codecov +# ----------------------------------------------------------------------------- +# Exclude from coverage reporting +# ----------------------------------------------------------------------------- ignore: - - "**/tests/*" - - "**/test/*" - - "**/__tests__/*" + # Test files + - "**/tests/**" + - "**/test/**" + - "**/__tests__/**" - "**/test_*.go" - "**/*_test.go" - "**/*.test.ts" - "**/*.test.tsx" - - "docs/*" - - ".github/*" - - "scripts/*" - - "tools/*" - - "frontend/node_modules/*" - - "frontend/dist/*" - - "frontend/coverage/*" - - "backend/cmd/seed/*" - - "backend/cmd/api/*" - - "backend/data/*" - - "backend/coverage/*" + - "**/*.spec.ts" + - "**/*.spec.tsx" + - "**/vitest.config.ts" + - "**/vitest.setup.ts" + + # E2E tests + - "**/e2e/**" + - "**/integration/**" + + # Documentation + - "docs/**" + - "*.md" + + # CI/CD & Config + - ".github/**" + - "scripts/**" + - "tools/**" + - "*.yml" + - "*.yaml" + - "*.json" + + # Frontend build artifacts & dependencies + - "frontend/node_modules/**" + - "frontend/dist/**" + - "frontend/coverage/**" + - "frontend/test-results/**" + - "frontend/public/**" + + # Backend non-source files + - "backend/cmd/seed/**" + - "backend/cmd/api/**" + - "backend/data/**" + - "backend/coverage/**" + - "backend/bin/**" - "backend/*.cover" - "backend/*.out" + - "backend/*.html" + - "backend/codeql-db/**" + + # Docker-only code (not testable in CI) - "backend/internal/services/docker_service.go" - "backend/internal/api/handlers/docker_handler.go" - - "codeql-db/*" + + # CodeQL artifacts + - "codeql-db/**" + - "codeql-db-*/**" + - "codeql-agent-results/**" + - "codeql-custom-queries-*/**" - "*.sarif" - - "*.md" + + # Config files (no logic) + - "**/tailwind.config.js" + - "**/postcss.config.js" + - "**/eslint.config.js" + - "**/vite.config.ts" + - "**/tsconfig*.json" + + # Type definitions only + - "**/*.d.ts" + + # Import/data directories + - "import/**" + - "data/**" diff --git a/.dockerignore b/.dockerignore index ec257925..48f5be27 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,22 @@ -# Version control -.git +# ============================================================================= +# .dockerignore - Exclude files from Docker build context +# Keep this file in sync with .gitignore where applicable +# ============================================================================= + +# ----------------------------------------------------------------------------- +# Version Control & CI/CD +# ----------------------------------------------------------------------------- +.git/ .gitignore .github/ +.pre-commit-config.yaml +.codecov.yml +.goreleaser.yaml +.sourcery.yml -# Python +# ----------------------------------------------------------------------------- +# Python (pre-commit, tooling) +# ----------------------------------------------------------------------------- __pycache__/ *.py[cod] *$py.class @@ -15,99 +28,172 @@ env/ ENV/ .pytest_cache/ .coverage -*.cover .hypothesis/ htmlcov/ *.egg-info/ -# Node/Frontend build artifacts +# ----------------------------------------------------------------------------- +# Node/Frontend - Build in Docker, not from host +# ----------------------------------------------------------------------------- frontend/node_modules/ frontend/coverage/ -frontend/coverage.out +frontend/test-results/ frontend/dist/ frontend/.vite/ frontend/*.tsbuildinfo frontend/frontend/ +frontend/e2e/ -# Go/Backend -backend/coverage.txt +# Root-level node artifacts (eslint config runner) +node_modules/ +package-lock.json +package.json + +# ----------------------------------------------------------------------------- +# Go/Backend - Build artifacts & coverage +# ----------------------------------------------------------------------------- +backend/bin/ +backend/api backend/*.out backend/*.cover +backend/*.html backend/coverage/ -backend/coverage.*.out -backend/coverage_*.out +backend/coverage*.out +backend/coverage*.txt +backend/*.coverage.out +backend/handler_coverage.txt +backend/handlers.out +backend/services.test +backend/test-output.txt +backend/tr_no_cover.txt +backend/nohup.out backend/package.json backend/package-lock.json -# Databases (runtime) -backend/data/*.db -backend/data/**/*.db -backend/cmd/api/data/*.db +# Backend data (created at runtime) +backend/data/ +backend/codeql-db/ +backend/.venv/ +backend/.vscode/ + +# ----------------------------------------------------------------------------- +# Databases (created at runtime) +# ----------------------------------------------------------------------------- +*.db *.sqlite *.sqlite3 -cpm.db +data/ charon.db +cpm.db -# IDE +# ----------------------------------------------------------------------------- +# IDE & Editor +# ----------------------------------------------------------------------------- .vscode/ +.vscode.backup*/ .idea/ *.swp *.swo *~ +*.xcf +Chiron.code-workspace -# Logs +# ----------------------------------------------------------------------------- +# Logs & Temp Files +# ----------------------------------------------------------------------------- .trivy_logs/ *.log logs/ +nohup.out -# Environment +# ----------------------------------------------------------------------------- +# Environment Files +# ----------------------------------------------------------------------------- .env .env.local .env.*.local +!.env.example -# OS +# ----------------------------------------------------------------------------- +# OS Files +# ----------------------------------------------------------------------------- .DS_Store Thumbs.db -# Documentation +# ----------------------------------------------------------------------------- +# Documentation (not needed in image) +# ----------------------------------------------------------------------------- docs/ *.md !README.md +!CONTRIBUTING.md +!LICENSE -# Docker +# ----------------------------------------------------------------------------- +# Docker Compose (not needed inside image) +# ----------------------------------------------------------------------------- docker-compose*.yml **/Dockerfile.* -# CI/CD -.github/ -.pre-commit-config.yaml -.codecov.yml -.goreleaser.yaml - -# GoReleaser artifacts +# ----------------------------------------------------------------------------- +# GoReleaser & dist artifacts +# ----------------------------------------------------------------------------- dist/ -# Scripts +# ----------------------------------------------------------------------------- +# Scripts & Tools (not needed in image) +# ----------------------------------------------------------------------------- scripts/ tools/ create_issues.sh cookies.txt +cookies.txt.bak +test.caddyfile +Makefile -# Testing artifacts +# ----------------------------------------------------------------------------- +# Testing & Coverage Artifacts +# ----------------------------------------------------------------------------- +coverage/ coverage.out *.cover *.crdownload +*.sarif -# Project Documentation -ACME_STAGING_IMPLEMENTATION.md +# ----------------------------------------------------------------------------- +# CodeQL & Security Scanning (large, not needed) +# ----------------------------------------------------------------------------- +codeql-db/ +codeql-db-*/ +codeql-agent-results/ +codeql-custom-queries-*/ +codeql-*.sarif +codeql-results*.sarif +.codeql/ + +# ----------------------------------------------------------------------------- +# Import Directory (user data) +# ----------------------------------------------------------------------------- +import/ + +# ----------------------------------------------------------------------------- +# Project Documentation & Planning (not needed in image) +# ----------------------------------------------------------------------------- +*.md.bak +ACME_STAGING_IMPLEMENTATION.md* ARCHITECTURE_PLAN.md BULK_ACL_FEATURE.md -DOCKER_TASKS.md +DOCKER_TASKS.md* DOCUMENTATION_POLISH_SUMMARY.md GHCR_MIGRATION_SUMMARY.md -ISSUE_*_IMPLEMENTATION.md +ISSUE_*_IMPLEMENTATION.md* PHASE_*_SUMMARY.md PROJECT_BOARD_SETUP.md PROJECT_PLANNING.md SECURITY_IMPLEMENTATION_PLAN.md VERSIONING_IMPLEMENTATION.md +QA_AUDIT_REPORT*.md +VERSION.md +eslint.config.js +go.work +go.work.sum diff --git a/.github/agents/Backend_Dev.agent.md b/.github/agents/Backend_Dev.agent.md index 49689d74..5b3400b9 100644 --- a/.github/agents/Backend_Dev.agent.md +++ b/.github/agents/Backend_Dev.agent.md @@ -1,4 +1,4 @@ -name: Backend_Dev +name: Backend Dev description: Senior Go Engineer focused on high-performance, secure backend implementation. argument-hint: The specific backend task from the Plan (e.g., "Implement ProxyHost CRUD endpoints") # ADDED 'list_dir' below so Step 1 works diff --git a/.github/agents/DevOps.agent.md b/.github/agents/DevOps.agent.md index 4e0ca575..2793d327 100644 --- a/.github/agents/DevOps.agent.md +++ b/.github/agents/DevOps.agent.md @@ -1,4 +1,4 @@ -name: Dev_Ops +name: Dev Ops description: DevOps specialist that debugs GitHub Actions, CI pipelines, and Docker builds. argument-hint: The workflow issue (e.g., "Why did the last build fail?" or "Fix the Docker push error") tools: ['run_terminal_command', 'read_file', 'write_file', 'search', 'list_dir'] diff --git a/.github/agents/Doc_Writer.agent.md b/.github/agents/Doc_Writer.agent.md index 79bd40e8..5c739cfe 100644 --- a/.github/agents/Doc_Writer.agent.md +++ b/.github/agents/Doc_Writer.agent.md @@ -1,4 +1,4 @@ -name: Docs_Writer +name: Docs Writer description: User Advocate and Writer focused on creating simple, layman-friendly documentation. argument-hint: The feature to document (e.g., "Write the guide for the new Real-Time Logs") tools: ['search', 'read_file', 'write_file', 'list_dir', 'changes'] @@ -20,6 +20,7 @@ Your goal is to translate "Engineer Speak" into simple, actionable instructions. - **ELI5 (Explain Like I'm 5)**: Use simple words. If you must use a technical term, explain it immediately using a real-world analogy. - **Banish Jargon**: Avoid words like "latency," "payload," "handshake," or "schema" unless you explain them. - **Focus on Action**: Structure text as: "Do this -> Get that result." +- **Pull Requests**: When opening PRs, the title needs to follow the naming convention outlined in `auto-versioning.md` to make sure new versions are generated correctly upon merge. diff --git a/.github/agents/Frontend_Dev.agent.md b/.github/agents/Frontend_Dev.agent.md index 97ecfd47..1e19e94e 100644 --- a/.github/agents/Frontend_Dev.agent.md +++ b/.github/agents/Frontend_Dev.agent.md @@ -1,4 +1,4 @@ -name: Frontend_Dev +name: Frontend Dev description: Senior React/UX Engineer focused on seamless user experiences and clean component architecture. argument-hint: The specific frontend task from the Plan (e.g., "Create Proxy Host Form") # ADDED 'list_dir' below so Step 1 works diff --git a/.github/agents/Managment.agent.md b/.github/agents/Managment.agent.md new file mode 100644 index 00000000..adfcad78 --- /dev/null +++ b/.github/agents/Managment.agent.md @@ -0,0 +1,50 @@ +name: Management +description: Engineering Director. Delegates ALL research and execution. DO NOT ask it to debug code directly. +argument-hint: The high-level goal (e.g., "Build the new Proxy Host Dashboard widget") +tools: ['runSubagent', 'read_file', 'manage_todo_list'] + +--- +You are the ENGINEERING DIRECTOR. +**YOUR OPERATING MODEL: AGGRESSIVE DELEGATION.** +You are "lazy" in the smartest way possible. You never do what a subordinate can do. + + +1. **Initialize**: ALWAYS read `.github/copilot-instructions.md` first to load global project rules. +2. **Team Roster**: + - `Planning`: The Architect. (Delegate research & planning here). + - `Backend_Dev`: The Engineer. (Delegate Go implementation here). + - `Frontend_Dev`: The Designer. (Delegate React implementation here). + - `QA_Security`: The Auditor. (Delegate verification here). + - `Docs_Writer`: The Scribe. (Delegate docs here). + + + +1. **Phase 1: Assessment & Delegation (NO RESEARCH)**: + - **Read Instructions**: Read `.github/copilot-instructions.md`. + - **Identify Goal**: Understand the user's request. + - **STOP**: Do not look at the code. Do not run `list_dir`. + - **Action**: Immediately call `Planning` subagent. + - *Prompt*: "Research the necessary files for '{user_request}' and write a comprehensive plan detailing as many specifics as possible to `docs/plans/current_spec.md`. Be an artist with directions and discriptions. Include file names, function names, and component names wherever possible." + +2. **Phase 2: Approval Gate**: + - **Read Plan**: Read `docs/plans/current_spec.md` (You are allowed to read Markdown). + - **Present**: Summarize the plan to the user. + - **Ask**: "Plan created. Shall I authorize the construction?" + +3. **Phase 3: Execution (Waterfall)**: + - **Backend**: Call `Backend_Dev` with the plan file. + - **Frontend**: Call `Frontend_Dev` with the plan file. + +4. **Phase 4: Audit**: + - **QA**: Call `QA_Security` to meticulously test current implementation as well as regression test. Run all linting, security tasks, and manual pre-commit checks. Write a report to `docs/reports/qa_report.md`. Start back at Phase 1 if issues are found. +5. **Phase 5: Closure**: + - **Docs**: Call `Docs_Writer`. + - **Final Report**: Summarize the successful subagent runs. + + + +- **SOURCE CODE BAN**: You are FORBIDDEN from reading `.go`, `.tsx`, `.ts`, or `.css` files. You may ONLY read `.md` (Markdown) files. +- **NO DIRECT RESEARCH**: If you need to know how the code works, you must ask the `Planning` agent to tell you. +- **MANDATORY DELEGATION**: Your first thought should always be "Which agent handles this?", not "How do I solve this?" +- **WAIT FOR APPROVAL**: Do not trigger Phase 3 without explicit user confirmation. + diff --git a/.github/agents/Planning.agent.md b/.github/agents/Planning.agent.md index 7e998537..78a3ff8c 100644 --- a/.github/agents/Planning.agent.md +++ b/.github/agents/Planning.agent.md @@ -6,7 +6,7 @@ tools: ['search', 'runSubagent', 'usages', 'problems', 'changes', 'fetch', 'gith --- You are a PRINCIPAL SOFTWARE ARCHITECT and TECHNICAL PRODUCT MANAGER. -Your goal is to design the **User Experience** first, then engineer the **Backend** to support it. +Your goal is to design the **User Experience** first, then engineer the **Backend** to support it. Plan out the UX first and work backwards to make sure the API meets the exact needs of the Frontend. When you need a subagent to perform a task, use the `#runSubagent` tool. Specify the exact name of the subagent you want to use within the instruction 1. **Context Loading (CRITICAL)**: @@ -26,6 +26,7 @@ Your goal is to design the **User Experience** first, then engineer the **Backen 4. **Review**: - Ask the user for confirmation. + diff --git a/.github/agents/QA_Security.agent.md b/.github/agents/QA_Security.agent.md index e0aa239b..95bc7998 100644 --- a/.github/agents/QA_Security.agent.md +++ b/.github/agents/QA_Security.agent.md @@ -1,4 +1,4 @@ -name: QA_Security +name: QA and Security description: Security Engineer and QA specialist focused on breaking the implementation. argument-hint: The feature or endpoint to audit (e.g., "Audit the new Proxy Host creation flow") tools: ['search', 'runSubagent', 'read_file', 'run_terminal_command', 'usages', 'write_file', 'list_dir', 'run_task'] @@ -10,7 +10,7 @@ Your job is to act as an ADVERSARY. The Developer says "it works"; your job is t - **Project**: Charon (Reverse Proxy) - **Priority**: Security, Input Validation, Error Handling. -- **Tools**: `go test`, `trivy` (if available), manual edge-case analysis. +- **Tools**: `go test`, `trivy` (if available), pre-commit, manual edge-case analysis. @@ -26,7 +26,7 @@ Your job is to act as an ADVERSARY. The Developer says "it works"; your job is t 3. **Execute**: - **Path Verification**: Run `list_dir internal/api` to verify where tests should go. - **Creation**: Write a new test file (e.g., `internal/api/tests/audit_test.go`) to test the *flow*. - - **Run**: Execute `go test ./internal/api/tests/...` (or specific path). Run local CodeQL and Trivy scans (they are built as VS Code Tasks so they just need to be triggered to run) and triage any findings. + - **Run**: Execute `go test ./internal/api/tests/...` (or specific path). Run local CodeQL and Trivy scans (they are built as VS Code Tasks so they just need to be triggered to run), pre-commit all files, and triage any findings. - **Cleanup**: If the test was temporary, delete it. If it's valuable, keep it. diff --git a/.github/agents/SubagentUsage.md b/.github/agents/SubagentUsage.md new file mode 100644 index 00000000..76185269 --- /dev/null +++ b/.github/agents/SubagentUsage.md @@ -0,0 +1,60 @@ +## Subagent Usage Templates and Orchestration + +This helper provides the Management agent with templates to create robust and repeatable `runSubagent` calls. + +1) Basic runSubagent Template +``` +runSubagent({ + prompt: "", + description: "", + metadata: { + plan_file: "docs/plans/current_spec.md", + files_to_change: ["..."], + commands_to_run: ["..."], + tests_to_run: ["..."], + timeout_minutes: 60, + acceptance_criteria: ["All tests pass", "No lint warnings"] + } +}) +``` + +2) Orchestration Checklist (Management) +- Validate: `plan_file` exists and contains a `Handoff Contract` JSON. +- Kickoff: call `Planning` to create the plan if not present. +- Run: execute `Backend Dev` then `Frontend Dev` sequentially. +- Parallel: run `QA and Security`, `DevOps` and `Doc Writer` in parallel for CI / QA checks and documentation. +- Return: a JSON summary with `subagent_results`, `overall_status`, and aggregated artifacts. + +3) Return Contract that all subagents must return +``` +{ + "changed_files": ["path/to/file1", "path/to/file2"], + "summary": "Short summary of changes", + "tests": {"passed": true, "output": "..."}, + "artifacts": ["..."], + "errors": [] +} +``` + +4) Error Handling +- On a subagent failure, the Management agent must capture `tests.output` and decide to retry (1 retry maximum), or request a revert/rollback. +- Clearly mark the `status` as `failed`, and include `errors` and `failing_tests` in the `summary`. + +5) Example: Run a full Feature Implementation +``` +// 1. Planning +runSubagent({ description: "Planning", prompt: "", metadata: { plan_file: "docs/plans/current_spec.md" } }) + +// 2. Backend +runSubagent({ description: "Backend Dev", prompt: "Implement backend as per plan file", metadata: { plan_file: "docs/plans/current_spec.md", commands_to_run: ["cd backend && go test ./..."] } }) + +// 3. Frontend +runSubagent({ description: "Frontend Dev", prompt: "Implement frontend widget per plan file", metadata: { plan_file: "docs/plans/current_spec.md", commands_to_run: ["cd frontend && npm run build"] } }) + +// 4. QA & Security, DevOps, Docs (Parallel) +runSubagent({ description: "QA and Security", prompt: "Audit the implementation for input validation, security and contract conformance", metadata: { plan_file: "docs/plans/current_spec.md" } }) +runSubagent({ description: "DevOps", prompt: "Update docker CI pipeline and add staging step", metadata: { plan_file: "docs/plans/current_spec.md" } }) +runSubagent({ description: "Doc Writer", prompt: "Update the features doc and release notes.", metadata: { plan_file: "docs/plans/current_spec.md" } }) +``` + +This file is a template; management should keep operations terse and the metadata explicit. Always capture and persist the return artifact's path and the `changed_files` list. diff --git a/.github/workflows/auto-versioning.yml b/.github/workflows/auto-versioning.yml index f8bb9b08..8f911d11 100644 --- a/.github/workflows/auto-versioning.yml +++ b/.github/workflows/auto-versioning.yml @@ -17,18 +17,24 @@ jobs: with: fetch-depth: 0 - - name: Generate semantic version (fallback script) + - name: Calculate Semantic Version id: semver - run: | - # Ensure git tags are fetched - git fetch --tags --quiet || true - # Get latest tag or default to v0.0.0 - TAG=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0") - echo "Detected latest tag: $TAG" - # Set outputs for downstream steps - echo "version=$TAG" >> $GITHUB_OUTPUT - echo "release_notes=Fallback: using latest tag only" >> $GITHUB_OUTPUT - echo "changed=false" >> $GITHUB_OUTPUT + uses: paulhatch/semantic-version@v5.4.0 + with: + # The prefix to use to create tags + tag_prefix: "v" + # A string which, if present in the git log, indicates that a major version increase is required + major_pattern: "(MAJOR)" + # A string which, if present in the git log, indicates that a minor version increase is required + minor_pattern: "(feat)" + # Pattern to determine formatting + version_format: "${major}.${minor}.${patch}" + # If no tags are found, this version is used + version_from_branch: "0.0.0" + # This helps it search through history to find the last tag + search_commit_body: true + # Important: This enables the output 'changed' which your other steps rely on + enable_prerelease_mode: false - name: Show version run: | @@ -96,7 +102,7 @@ jobs: with: tag_name: ${{ steps.determine_tag.outputs.tag }} name: Release ${{ steps.determine_tag.outputs.tag }} - body: ${{ steps.semver.outputs.release_notes }} + generate_release_notes: true make_latest: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 80f998d7..63785fb8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,10 @@ -# Python +# ============================================================================= +# .gitignore - Files to exclude from version control +# ============================================================================= + +# ----------------------------------------------------------------------------- +# Python (pre-commit, tooling) +# ----------------------------------------------------------------------------- __pycache__/ *.py[cod] *$py.class @@ -14,25 +20,44 @@ ENV/ .hypothesis/ htmlcov/ +# ----------------------------------------------------------------------------- # Node/Frontend +# ----------------------------------------------------------------------------- node_modules/ frontend/node_modules/ backend/node_modules/ frontend/dist/ frontend/coverage/ +frontend/test-results/ frontend/.vite/ frontend/*.tsbuildinfo +frontend/frontend/ -# Go/Backend +# ----------------------------------------------------------------------------- +# Go/Backend - Build artifacts & coverage +# ----------------------------------------------------------------------------- backend/api +backend/bin/ backend/*.out backend/*.cover +backend/*.html backend/coverage/ -backend/coverage.*.out -backend/coverage_*.out +backend/coverage*.out +backend/coverage*.txt +backend/*.coverage.out +backend/handler_coverage.txt +backend/handlers.out +backend/services.test +backend/test-output.txt +backend/tr_no_cover.txt +backend/nohup.out backend/charon +backend/codeql-db/ +backend/.venv/ +# ----------------------------------------------------------------------------- # Databases +# ----------------------------------------------------------------------------- *.db *.sqlite *.sqlite3 @@ -42,80 +67,105 @@ backend/cmd/api/data/*.db cpm.db charon.db -# IDE +# ----------------------------------------------------------------------------- +# IDE & Editor +# ----------------------------------------------------------------------------- .idea/ *.swp *.swo *~ .DS_Store +*.xcf +.vscode/launch.json +.vscode.backup*/ - -# Logs -.trivy_logs +# ----------------------------------------------------------------------------- +# Logs & Temp Files +# ----------------------------------------------------------------------------- +.trivy_logs/ *.log logs/ npm-debug.log* yarn-debug.log* yarn-error.log* +nohup.out -# Environment +# ----------------------------------------------------------------------------- +# Environment Files +# ----------------------------------------------------------------------------- .env .env.* !.env.example -# OS +# ----------------------------------------------------------------------------- +# OS Files +# ----------------------------------------------------------------------------- Thumbs.db -*.xcf -# Caddy +# ----------------------------------------------------------------------------- +# Caddy Runtime Data +# ----------------------------------------------------------------------------- backend/data/caddy/ +data/ -# Docker +# ----------------------------------------------------------------------------- +# Docker Overrides +# ----------------------------------------------------------------------------- docker-compose.override.yml +# ----------------------------------------------------------------------------- # GoReleaser +# ----------------------------------------------------------------------------- dist/ -# Testing +# ----------------------------------------------------------------------------- +# Testing & Coverage +# ----------------------------------------------------------------------------- coverage/ coverage.out *.xml -.trivy_logs/ -.trivy_logs/trivy-report.txt -backend/coverage.txt - -# CodeQL -codeql-db/ -codeql-results.sarif -**.sarif -codeql-results-js.sarif -codeql-results-go.sarif *.crdownload -.vscode/launch.json -# More CodeQL/analysis artifacts and DBs +# ----------------------------------------------------------------------------- +# CodeQL & Security Scanning +# ----------------------------------------------------------------------------- +codeql-db/ codeql-db-*/ -codeql-db-js/ -codeql-db-go/ +codeql-agent-results/ +codeql-custom-queries-*/ +codeql-results*.sarif codeql-*.sarif +*.sarif .codeql/ .codeql/** -# Scripts (project-specific) +# ----------------------------------------------------------------------------- +# Scripts & Temp Files (project-specific) +# ----------------------------------------------------------------------------- create_issues.sh cookies.txt +cookies.txt.bak +test.caddyfile -# Project Documentation (keep important docs, ignore implementation notes) -ACME_STAGING_IMPLEMENTATION.md +# ----------------------------------------------------------------------------- +# Project Documentation (implementation notes - not needed in repo) +# ----------------------------------------------------------------------------- +*.md.bak +ACME_STAGING_IMPLEMENTATION.md* ARCHITECTURE_PLAN.md -BULK_ACL_FEATURE.md -DOCKER_TASKS.md +DOCKER_TASKS.md* DOCUMENTATION_POLISH_SUMMARY.md GHCR_MIGRATION_SUMMARY.md -ISSUE_*_IMPLEMENTATION.md +ISSUE_*_IMPLEMENTATION.md* PHASE_*_SUMMARY.md PROJECT_BOARD_SETUP.md PROJECT_PLANNING.md -SECURITY_IMPLEMENTATION_PLAN.md VERSIONING_IMPLEMENTATION.md backend/internal/api/handlers/import_handler.go.bak + +# ----------------------------------------------------------------------------- +# Import Directory (user uploads) +# ----------------------------------------------------------------------------- +import/ +test-results/charon.hatfieldhosted.com.har +test-results/local.har diff --git a/.vscode/settings.json b/.vscode/settings.json index dc0405a6..652fc6a4 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,24 +8,22 @@ ] , "gopls": { - "buildFlags": ["-tags=ignore", "-mod=mod"], "env": { "GOWORK": "off", "GOFLAGS": "-mod=mod", - "GOTOOLCHAIN": "none" + "GOTOOLCHAIN": "auto" }, "directoryFilters": [ "-**/pkg/mod/**", "-**/go/pkg/mod/**", - "-**/root/go/pkg/mod/**", - "-**/golang.org/toolchain@**" + "-**/root/go/pkg/mod/**" ] }, "go.buildFlags": ["-tags=ignore", "-mod=mod"], "go.toolsEnvVars": { "GOWORK": "off", "GOFLAGS": "-mod=mod", - "GOTOOLCHAIN": "none" + "GOTOOLCHAIN": "auto" }, "files.watcherExclude": { "**/pkg/mod/**": true, @@ -39,5 +37,7 @@ }, "githubPullRequests.ignoredPullRequestBranches": [ "main" - ] + ], + // Toggle workspace-specific keybindings (used by .vscode/keybindings.json) + "charon.workspaceKeybindingsEnabled": true } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 1231d3cd..e4001a6b 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -216,3 +216,73 @@ }, "problemMatcher": ["$go"] } + , + { + "label": "Frontend: Lint Fix", + "type": "shell", + "command": "cd frontend && npm run lint -- --fix", + "group": "test", + "presentation": { + "reveal": "always", + "panel": "shared" + }, + "problemMatcher": [] + }, + { + "label": "Lint: GolangCI-Lint Fix", + "type": "shell", + "command": "cd backend && docker run --rm -v $(pwd):/app:rw -w /app golangci/golangci-lint:latest golangci-lint run --fix -v", + "group": "test", + "presentation": { + "reveal": "always", + "panel": "new" + }, + "problemMatcher": ["$go"] + }, + { + "label": "Frontend: Run All Tests & Scans", + "dependsOn": [ + "Frontend: Type Check", + "Frontend: Test Coverage", + "Run CodeQL Scan (Local)" + ], + "dependsOrder": "sequence", + "group": "test", + "presentation": { + "reveal": "always", + "panel": "shared" + } + }, + { + "label": "Backend: Run All Tests & Scans", + "dependsOn": [ + "Backend: Go Test Coverage", + "Backend: Run Benchmarks (Quick)", + "Run Security Scan (govulncheck)", + "Lint: GolangCI-Lint", + "Lint: Go Race Detector" + ], + "dependsOrder": "sequence", + "group": "test", + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "Lint: Apply Fixes", + "dependsOn": [ + "Frontend: Lint Fix", + "Lint: GolangCI-Lint Fix", + "Lint: Hadolint (Dockerfile)", + "Run Pre-commit (Staged Files)" + ], + "dependsOrder": "sequence", + "group": "test", + "presentation": { + "reveal": "always", + "panel": "new" + } + } + ] + } diff --git a/BULK_ACL_FEATURE.md b/BULK_ACL_FEATURE.md new file mode 100644 index 00000000..0eebe8fb --- /dev/null +++ b/BULK_ACL_FEATURE.md @@ -0,0 +1,177 @@ +# Bulk ACL Application Feature + +## Overview +Implemented a bulk ACL (Access Control List) application feature that allows users to quickly apply or remove access lists from multiple proxy hosts at once, eliminating the need to edit each host individually. + +## User Workflow Improvements + +### Previous Workflow (Manual) +1. Create proxy hosts +2. Create access list +3. **Edit each host individually** to apply the ACL (tedious for many hosts) + +### New Workflow (Bulk) +1. Create proxy hosts +2. Create access list +3. **Select multiple hosts** → Bulk Actions → Apply/Remove ACL (one operation) + +## Implementation Details + +### Backend (`backend/internal/api/handlers/proxy_host_handler.go`) + +**New Endpoint**: `PUT /api/v1/proxy-hosts/bulk-update-acl` + +**Request Body**: +```json +{ + "host_uuids": ["uuid-1", "uuid-2", "uuid-3"], + "access_list_id": 42 // or null to remove ACL +} +``` + +**Response**: +```json +{ + "updated": 2, + "errors": [ + {"uuid": "uuid-3", "error": "proxy host not found"} + ] +} +``` + +**Features**: +- Updates multiple hosts in a single database transaction +- Applies Caddy config once for all updates (efficient) +- Partial failure handling (returns both successes and errors) +- Validates host existence before applying ACL +- Supports both applying and removing ACLs (null = remove) + +### Frontend + +#### API Client (`frontend/src/api/proxyHosts.ts`) +```typescript +export const bulkUpdateACL = async ( + hostUUIDs: string[], + accessListID: number | null +): Promise +``` + +#### React Query Hook (`frontend/src/hooks/useProxyHosts.ts`) +```typescript +const { bulkUpdateACL, isBulkUpdating } = useProxyHosts() + +// Usage +await bulkUpdateACL(['uuid-1', 'uuid-2'], 42) // Apply ACL 42 +await bulkUpdateACL(['uuid-1', 'uuid-2'], null) // Remove ACL +``` + +#### UI Components (`frontend/src/pages/ProxyHosts.tsx`) + +**Multi-Select Checkboxes**: +- Checkbox column added to proxy hosts table +- "Select All" checkbox in table header +- Individual checkboxes per row + +**Bulk Actions UI**: +- "Bulk Actions" button appears when hosts are selected +- Shows count of selected hosts +- Opens modal with ACL selection dropdown + +**Modal Features**: +- Lists all enabled access lists +- "Remove Access List" option (sets null) +- Real-time feedback on success/failure +- Toast notifications for user feedback + +## Testing + +### Backend Tests (`proxy_host_handler_test.go`) +- ✅ `TestProxyHostHandler_BulkUpdateACL_Success` - Apply ACL to multiple hosts +- ✅ `TestProxyHostHandler_BulkUpdateACL_RemoveACL` - Remove ACL (null value) +- ✅ `TestProxyHostHandler_BulkUpdateACL_PartialFailure` - Mixed success/failure +- ✅ `TestProxyHostHandler_BulkUpdateACL_EmptyUUIDs` - Validation error +- ✅ `TestProxyHostHandler_BulkUpdateACL_InvalidJSON` - Malformed request + +### Frontend Tests +**API Tests** (`proxyHosts-bulk.test.ts`): +- ✅ Apply ACL to multiple hosts +- ✅ Remove ACL with null value +- ✅ Handle partial failures +- ✅ Handle empty host list +- ✅ Propagate API errors + +**Hook Tests** (`useProxyHosts-bulk.test.tsx`): +- ✅ Apply ACL via mutation +- ✅ Remove ACL via mutation +- ✅ Query invalidation after success +- ✅ Error handling +- ✅ Loading state tracking + +**Test Results**: +- Backend: All tests passing (106+ tests) +- Frontend: All tests passing (132 tests) + +## Usage Examples + +### Example 1: Apply ACL to Multiple Hosts +```typescript +// Select hosts in UI +setSelectedHosts(new Set(['host-1-uuid', 'host-2-uuid', 'host-3-uuid'])) + +// User clicks "Bulk Actions" → Selects ACL from dropdown +await bulkUpdateACL(['host-1-uuid', 'host-2-uuid', 'host-3-uuid'], 5) + +// Result: "Access list applied to 3 host(s)" +``` + +### Example 2: Remove ACL from Hosts +```typescript +// User selects "Remove Access List" from dropdown +await bulkUpdateACL(['host-1-uuid', 'host-2-uuid'], null) + +// Result: "Access list removed from 2 host(s)" +``` + +### Example 3: Partial Failure Handling +```typescript +const result = await bulkUpdateACL(['valid-uuid', 'invalid-uuid'], 10) + +// result = { +// updated: 1, +// errors: [{ uuid: 'invalid-uuid', error: 'proxy host not found' }] +// } + +// Toast: "Updated 1 host(s), 1 failed" +``` + +## Benefits + +1. **Time Savings**: Apply ACLs to dozens of hosts in one click vs. editing each individually +2. **User-Friendly**: Clear visual feedback with checkboxes and selection count +3. **Error Resilient**: Partial failures don't block the entire operation +4. **Efficient**: Single Caddy config reload for all updates +5. **Flexible**: Supports both applying and removing ACLs +6. **Well-Tested**: Comprehensive test coverage for all scenarios + +## Future Enhancements (Optional) + +- Add bulk ACL application from Access Lists page (when creating/editing ACL) +- Bulk enable/disable hosts +- Bulk delete hosts +- Bulk certificate assignment +- Filter hosts before selection (e.g., "Select all hosts without ACL") + +## Related Files Modified + +### Backend +- `backend/internal/api/handlers/proxy_host_handler.go` (+73 lines) +- `backend/internal/api/handlers/proxy_host_handler_test.go` (+140 lines) + +### Frontend +- `frontend/src/api/proxyHosts.ts` (+19 lines) +- `frontend/src/hooks/useProxyHosts.ts` (+11 lines) +- `frontend/src/pages/ProxyHosts.tsx` (+95 lines) +- `frontend/src/api/__tests__/proxyHosts-bulk.test.ts` (+93 lines, new file) +- `frontend/src/hooks/__tests__/useProxyHosts-bulk.test.tsx` (+149 lines, new file) + +**Total**: ~580 lines added (including tests) diff --git a/Dockerfile b/Dockerfile index 84028be8..be85a870 100644 --- a/Dockerfile +++ b/Dockerfile @@ -122,6 +122,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ --with github.com/corazawaf/coraza-caddy/v2 \ --with github.com/hslatman/caddy-crowdsec-bouncer \ --with github.com/zhangjiayin/caddy-geoip2 \ + --with github.com/mholt/caddy-ratelimit \ --output /tmp/caddy-temp || true; \ # Find the build directory BUILDDIR=$(ls -td /tmp/buildenv_* 2>/dev/null | head -1); \ @@ -151,6 +152,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ --with github.com/corazawaf/coraza-caddy/v2 \ --with github.com/hslatman/caddy-crowdsec-bouncer \ --with github.com/zhangjiayin/caddy-geoip2 \ + --with github.com/mholt/caddy-ratelimit \ --output /usr/bin/caddy; \ fi; \ rm -rf /tmp/buildenv_* /tmp/caddy-temp; \ diff --git a/Makefile b/Makefile index cefd5d23..d260af2d 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,16 @@ install: @echo "Installing frontend dependencies..." cd frontend && npm install +# Install Go 1.25.5 system-wide and setup GOPATH/bin +install-go: + @echo "Installing Go 1.25.5 and gopls (requires sudo)" + sudo ./scripts/install-go-1.25.5.sh + +# Clear Go and gopls caches +clear-go-cache: + @echo "Clearing Go and gopls caches" + ./scripts/clear-go-cache.sh + # Run all tests test: @echo "Running backend tests..." diff --git a/SECURITY_IMPLEMENTATION_PLAN.md b/SECURITY_IMPLEMENTATION_PLAN.md new file mode 100644 index 00000000..1909458d --- /dev/null +++ b/SECURITY_IMPLEMENTATION_PLAN.md @@ -0,0 +1,113 @@ +# Security Services Implementation Plan + +## Overview +This document outlines the plan to implement a modular Security Dashboard in Charon (previously 'CPM+'). The goal is to provide optional, high-value security integrations (CrowdSec, WAF, ACLs, Rate Limiting) while keeping the core Docker image lightweight. + +## Core Philosophy +1. **Optionality**: All security services are disabled by default. +2. **Environment Driven**: Activation is controlled via `CHARON_SECURITY_*` environment variables (legacy `CPM_SECURITY_*` names supported for backward compatibility). +3. **Minimal Footprint**: + * Lightweight Caddy modules (WAF, Bouncers) are compiled into the binary (negligible size impact). + * Heavy standalone agents (e.g., CrowdSec Agent) are only installed at runtime if explicitly enabled in "Local" mode. +4. **Unified Dashboard**: A single pane of glass in the UI to view status and configuration. + +--- + +## 1. Environment Variables +We will introduce a new set of environment variables to control these services. + +| Variable | Values | Description | +| :--- | :--- | :--- | +| `CHARON_SECURITY_CROWDSEC_MODE` (legacy `CPM_SECURITY_CROWDSEC_MODE`) | `disabled` (default), `local`, `external` | `local` installs agent inside container; `external` uses remote agent. | +| `CPM_SECURITY_CROWDSEC_API_URL` | URL (e.g., `http://crowdsec:8080`) | Required if mode is `external`. | +| `CPM_SECURITY_CROWDSEC_API_KEY` | String | Required if mode is `external`. | +| `CPM_SECURITY_WAF_MODE` | `disabled` (default), `enabled` | Enables Coraza WAF with OWASP Core Rule Set (CRS). | +| `CPM_SECURITY_RATELIMIT_MODE` | `disabled` (default), `enabled` | Enables global rate limiting controls. | +| `CPM_SECURITY_ACL_MODE` | `disabled` (default), `enabled` | Enables IP-based Access Control Lists. | + +--- + +## 2. Backend Implementation + +### A. Dockerfile Updates +We need to compile the necessary Caddy modules into our binary. This adds minimal size overhead but enables the features natively. +* **Action**: Update `Dockerfile` `caddy-builder` stage to include: + * `github.com/corazawaf/coraza-caddy/v2` (WAF) + * `github.com/hslatman/caddy-crowdsec-bouncer` (CrowdSec Bouncer) + +### B. Configuration Management (`internal/config`) +* **Action**: Update `Config` struct to parse `CHARON_SECURITY_*` variables while still accepting `CPM_SECURITY_*` as legacy fallbacks. +* **Action**: Create `SecurityConfig` struct to hold these values. + +### C. Runtime Installation (`docker-entrypoint.sh`) +To satisfy the "install locally" requirement for CrowdSec without bloating the image: +* **Action**: Modify `docker-entrypoint.sh` to check `CHARON_SECURITY_CROWDSEC_MODE` (and fallback to `CPM_SECURITY_CROWDSEC_MODE`). +* **Logic**: If `local`, execute `apk add --no-cache crowdsec` (and dependencies) before starting the app. This keeps the base image small for users who don't use it. + +### D. API Endpoints (`internal/api`) +* **New Endpoint**: `GET /api/v1/security/status` + * Returns the enabled/disabled state of each service. + * Returns basic metrics if available (e.g., "WAF: Active", "CrowdSec: Connected"). + +--- + +## 3. Frontend Implementation + +### A. Navigation +* **Action**: Add "Security" item to the Sidebar in `Layout.tsx`. + +### B. Security Dashboard (`src/pages/Security.tsx`) +* **Layout**: Grid of cards representing each service. +* **Empty State**: If all services are disabled, show a clean "Security Not Enabled" state with a link to the GitHub Pages documentation on how to enable them. + +### C. Service Cards +1. **CrowdSec Card**: + * **Status**: Active (Local/External) / Disabled. + * **Content**: If Local, show basic stats (last push, alerts). If External, show connection status. + * **Action**: Link to CrowdSec Console or Dashboard. +2. **WAF Card**: + * **Status**: Active / Disabled. + * **Content**: "OWASP CRS Loaded". +3. **Access Control Lists (ACL)**: + * **Status**: Active / Disabled. + * **Action**: "Manage Blocklists" (opens modal/page to edit IP lists). +4. **Rate Limiting**: + * **Status**: Active / Disabled. + * **Action**: "Configure Limits" (opens modal to set global requests/second). + +--- + +## 4. Service-Specific Logic + +### CrowdSec +* **Local**: + * Installs CrowdSec agent via `apk`. + * Generates `acquis.yaml` to read Caddy logs. + * Configures Caddy bouncer to talk to `localhost:8080`. +* **External**: + * Configures Caddy bouncer to talk to `CPM_SECURITY_CROWDSEC_API_URL`. + +### WAF (Coraza) +* **Implementation**: + * When enabled, inject `coraza_waf` directive into the global Caddyfile or per-host. + * Use default OWASP Core Rule Set (CRS). + +### IP ACLs +* **Implementation**: + * Create a snippet `(ip_filter)` in Caddyfile. + * Use `@matcher` with `remote_ip` to block/allow IPs. + * UI allows adding CIDR ranges to this list. + +### Rate Limiting +* **Implementation**: + * Use `rate_limit` directive. + * Allow user to define "zones" (e.g., API, Static) in the UI. + +--- + +## 5. Documentation +* **New Doc**: `docs/security.md` +* **Content**: + * Explanation of each service. + * How to configure Env Vars. + * Trade-offs of "Local" CrowdSec (startup time vs convenience). diff --git a/backend/go.mod b/backend/go.mod index 53db9249..cd482b42 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -5,6 +5,7 @@ go 1.25.5 require ( github.com/containrrr/shoutrrr v0.8.0 github.com/docker/docker v28.5.2+incompatible + github.com/gin-contrib/gzip v1.2.5 github.com/gin-gonic/gin v1.11.0 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/uuid v1.6.0 @@ -21,7 +22,8 @@ require ( require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.6 // indirect @@ -34,15 +36,16 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.27.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/goccy/go-yaml v1.18.0 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -68,7 +71,7 @@ require ( github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.54.1 // indirect + github.com/quic-go/quic-go v0.55.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.3.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect @@ -77,9 +80,8 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/mock v0.5.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/arch v0.20.0 // indirect + golang.org/x/arch v0.22.0 // indirect golang.org/x/mod v0.29.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/sync v0.18.0 // indirect @@ -87,7 +89,7 @@ require ( golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.14.0 // indirect golang.org/x/tools v0.38.0 // indirect - google.golang.org/protobuf v1.36.9 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 516d18a8..17e69a8f 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,11 +1,17 @@ +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= -github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= @@ -20,8 +26,10 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec= github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -37,8 +45,12 @@ github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= -github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gin-contrib/gzip v1.2.5 h1:fIZs0S+l17pIu1P5XRJOo/YNqfIuPCrZZ3TWB7pjckI= +github.com/gin-contrib/gzip v1.2.5/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw= github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= @@ -54,14 +66,15 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= -github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= @@ -75,14 +88,20 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= @@ -95,6 +114,7 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -102,6 +122,7 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= @@ -119,6 +140,7 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= @@ -143,17 +165,26 @@ github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzM github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= -github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -161,10 +192,13 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= @@ -187,18 +221,19 @@ go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOV go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI= +golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -206,23 +241,27 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -234,3 +273,4 @@ gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/internal/api/handlers/certificate_handler.go b/backend/internal/api/handlers/certificate_handler.go index c9cacc76..24c3ab67 100644 --- a/backend/internal/api/handlers/certificate_handler.go +++ b/backend/internal/api/handlers/certificate_handler.go @@ -4,9 +4,12 @@ import ( "fmt" "net/http" "strconv" + "sync" + "time" "github.com/gin-gonic/gin" + "github.com/Wikid82/charon/backend/internal/logger" "github.com/Wikid82/charon/backend/internal/services" "github.com/Wikid82/charon/backend/internal/util" ) @@ -18,26 +21,32 @@ type BackupServiceInterface interface { DeleteBackup(filename string) error GetBackupPath(filename string) (string, error) RestoreBackup(filename string) error + GetAvailableSpace() (int64, error) } type CertificateHandler struct { service *services.CertificateService backupService BackupServiceInterface notificationService *services.NotificationService + // Rate limiting for notifications + notificationMu sync.Mutex + lastNotificationTime map[uint]time.Time } func NewCertificateHandler(service *services.CertificateService, backupService BackupServiceInterface, ns *services.NotificationService) *CertificateHandler { return &CertificateHandler{ - service: service, - backupService: backupService, - notificationService: ns, + service: service, + backupService: backupService, + notificationService: ns, + lastNotificationTime: make(map[uint]time.Time), } } func (h *CertificateHandler) List(c *gin.Context) { certs, err := h.service.ListCertificates() if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + logger.Log().WithError(err).Error("failed to list certificates") + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list certificates"}) return } @@ -98,7 +107,8 @@ func (h *CertificateHandler) Upload(c *gin.Context) { cert, err := h.service.UploadCertificate(name, certPEM, keyPEM) if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + logger.Log().WithError(err).Error("failed to upload certificate") + c.JSON(http.StatusBadRequest, gin.H{"error": "failed to upload certificate"}) return } @@ -127,9 +137,16 @@ func (h *CertificateHandler) Delete(c *gin.Context) { return } + // Validate ID range + if id == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid id"}) + return + } + // Check if certificate is in use before proceeding inUse, err := h.service.IsCertificateInUse(uint(id)) if err != nil { + logger.Log().WithError(err).WithField("certificate_id", id).Error("failed to check certificate usage") c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to check certificate usage"}) return } @@ -140,7 +157,17 @@ func (h *CertificateHandler) Delete(c *gin.Context) { // Create backup before deletion if h.backupService != nil { + // Check disk space before backup (require at least 100MB free) + if availableSpace, err := h.backupService.GetAvailableSpace(); err != nil { + logger.Log().WithError(err).Warn("unable to check disk space, proceeding with backup") + } else if availableSpace < 100*1024*1024 { + logger.Log().WithField("available_bytes", availableSpace).Warn("low disk space, skipping backup") + c.JSON(http.StatusInsufficientStorage, gin.H{"error": "insufficient disk space for backup"}) + return + } + if _, err := h.backupService.CreateBackup(); err != nil { + logger.Log().WithError(err).Error("failed to create backup before deletion") c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create backup before deletion"}) return } @@ -152,21 +179,31 @@ func (h *CertificateHandler) Delete(c *gin.Context) { c.JSON(http.StatusConflict, gin.H{"error": "certificate is in use by one or more proxy hosts"}) return } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + logger.Log().WithError(err).WithField("certificate_id", id).Error("failed to delete certificate") + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete certificate"}) return } - // Send Notification + // Send Notification with rate limiting (1 per cert per 10 seconds) if h.notificationService != nil { - h.notificationService.SendExternal(c.Request.Context(), - "cert", - "Certificate Deleted", - fmt.Sprintf("Certificate ID %d deleted", id), - map[string]interface{}{ - "ID": id, - "Action": "deleted", - }, - ) + h.notificationMu.Lock() + lastTime, exists := h.lastNotificationTime[uint(id)] + if !exists || time.Since(lastTime) > 10*time.Second { + h.lastNotificationTime[uint(id)] = time.Now() + h.notificationMu.Unlock() + h.notificationService.SendExternal(c.Request.Context(), + "cert", + "Certificate Deleted", + fmt.Sprintf("Certificate ID %d deleted", id), + map[string]interface{}{ + "ID": id, + "Action": "deleted", + }, + ) + } else { + h.notificationMu.Unlock() + logger.Log().WithField("certificate_id", id).Debug("notification rate limited") + } } c.JSON(http.StatusOK, gin.H{"message": "certificate deleted"}) diff --git a/backend/internal/api/handlers/certificate_handler_coverage_test.go b/backend/internal/api/handlers/certificate_handler_coverage_test.go index 21f93025..f6a00be7 100644 --- a/backend/internal/api/handlers/certificate_handler_coverage_test.go +++ b/backend/internal/api/handlers/certificate_handler_coverage_test.go @@ -21,6 +21,7 @@ func TestCertificateHandler_List_DBError(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.GET("/api/certificates", h.List) @@ -38,6 +39,7 @@ func TestCertificateHandler_Delete_InvalidID(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.DELETE("/api/certificates/:id", h.Delete) @@ -56,6 +58,7 @@ func TestCertificateHandler_Delete_NotFound(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.DELETE("/api/certificates/:id", h.Delete) @@ -78,6 +81,7 @@ func TestCertificateHandler_Delete_NoBackupService(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) // Wait for background sync goroutine to complete to avoid race with -race flag // NewCertificateService spawns a goroutine that immediately queries the DB @@ -115,6 +119,7 @@ func TestCertificateHandler_Delete_CheckUsageDBError(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.DELETE("/api/certificates/:id", h.Delete) @@ -137,6 +142,7 @@ func TestCertificateHandler_List_WithCertificates(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.GET("/api/certificates", h.List) diff --git a/backend/internal/api/handlers/certificate_handler_security_test.go b/backend/internal/api/handlers/certificate_handler_security_test.go new file mode 100644 index 00000000..351098b8 --- /dev/null +++ b/backend/internal/api/handlers/certificate_handler_security_test.go @@ -0,0 +1,208 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "gorm.io/driver/sqlite" + "gorm.io/gorm" + + "github.com/Wikid82/charon/backend/internal/models" + "github.com/Wikid82/charon/backend/internal/services" +) + +// TestCertificateHandler_Delete_RequiresAuth tests that delete requires authentication +func TestCertificateHandler_Delete_RequiresAuth(t *testing.T) { + db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) + if err != nil { + t.Fatalf("failed to open db: %v", err) + } + + if err := db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{}); err != nil { + t.Fatalf("failed to migrate: %v", err) + } + + gin.SetMode(gin.TestMode) + r := gin.New() + // Add a middleware that rejects all unauthenticated requests + r.Use(func(c *gin.Context) { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) + }) + svc := services.NewCertificateService("/tmp", db) + h := NewCertificateHandler(svc, nil, nil) + r.DELETE("/api/certificates/:id", h.Delete) + + req := httptest.NewRequest(http.MethodDelete, "/api/certificates/1", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("expected 401 Unauthorized without auth, got %d", w.Code) + } +} + +// TestCertificateHandler_List_RequiresAuth tests that list requires authentication +func TestCertificateHandler_List_RequiresAuth(t *testing.T) { + db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) + if err != nil { + t.Fatalf("failed to open db: %v", err) + } + + if err := db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{}); err != nil { + t.Fatalf("failed to migrate: %v", err) + } + + gin.SetMode(gin.TestMode) + r := gin.New() + // Add a middleware that rejects all unauthenticated requests + r.Use(func(c *gin.Context) { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) + }) + svc := services.NewCertificateService("/tmp", db) + h := NewCertificateHandler(svc, nil, nil) + r.GET("/api/certificates", h.List) + + req := httptest.NewRequest(http.MethodGet, "/api/certificates", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("expected 401 Unauthorized without auth, got %d", w.Code) + } +} + +// TestCertificateHandler_Upload_RequiresAuth tests that upload requires authentication +func TestCertificateHandler_Upload_RequiresAuth(t *testing.T) { + db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) + if err != nil { + t.Fatalf("failed to open db: %v", err) + } + + if err := db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{}); err != nil { + t.Fatalf("failed to migrate: %v", err) + } + + gin.SetMode(gin.TestMode) + r := gin.New() + // Add a middleware that rejects all unauthenticated requests + r.Use(func(c *gin.Context) { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) + }) + svc := services.NewCertificateService("/tmp", db) + h := NewCertificateHandler(svc, nil, nil) + r.POST("/api/certificates", h.Upload) + + req := httptest.NewRequest(http.MethodPost, "/api/certificates", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("expected 401 Unauthorized without auth, got %d", w.Code) + } +} + +// TestCertificateHandler_Delete_DiskSpaceCheck tests the disk space check before backup +func TestCertificateHandler_Delete_DiskSpaceCheck(t *testing.T) { + db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) + if err != nil { + t.Fatalf("failed to open db: %v", err) + } + + if err := db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{}); err != nil { + t.Fatalf("failed to migrate: %v", err) + } + + // Create a certificate + cert := models.SSLCertificate{ + UUID: "test-cert", + Name: "test", + Provider: "custom", + Domains: "test.com", + } + if err := db.Create(&cert).Error; err != nil { + t.Fatalf("failed to create cert: %v", err) + } + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(mockAuthMiddleware()) + svc := services.NewCertificateService("/tmp", db) + + // Mock backup service that reports low disk space + mockBackup := &mockBackupService{ + availableSpaceFunc: func() (int64, error) { + return 50 * 1024 * 1024, nil // 50MB (less than 100MB required) + }, + } + + h := NewCertificateHandler(svc, mockBackup, nil) + r.DELETE("/api/certificates/:id", h.Delete) + + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/api/certificates/%d", cert.ID), nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + if w.Code != http.StatusInsufficientStorage { + t.Fatalf("expected 507 Insufficient Storage with low disk space, got %d", w.Code) + } +} + +// TestCertificateHandler_Delete_NotificationRateLimiting tests rate limiting +func TestCertificateHandler_Delete_NotificationRateLimiting(t *testing.T) { + db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) + if err != nil { + t.Fatalf("failed to open db: %v", err) + } + + if err := db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{}); err != nil { + t.Fatalf("failed to migrate: %v", err) + } + + // Create certificates + cert1 := models.SSLCertificate{UUID: "test-1", Name: "test1", Provider: "custom", Domains: "test1.com"} + cert2 := models.SSLCertificate{UUID: "test-2", Name: "test2", Provider: "custom", Domains: "test2.com"} + if err := db.Create(&cert1).Error; err != nil { + t.Fatalf("failed to create cert1: %v", err) + } + if err := db.Create(&cert2).Error; err != nil { + t.Fatalf("failed to create cert2: %v", err) + } + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(mockAuthMiddleware()) + svc := services.NewCertificateService("/tmp", db) + + mockBackup := &mockBackupService{ + createFunc: func() (string, error) { + return "backup.zip", nil + }, + } + + h := NewCertificateHandler(svc, mockBackup, nil) + r.DELETE("/api/certificates/:id", h.Delete) + + // Delete first cert + req1 := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/api/certificates/%d", cert1.ID), nil) + w1 := httptest.NewRecorder() + r.ServeHTTP(w1, req1) + + if w1.Code != http.StatusOK { + t.Fatalf("first delete failed: got %d", w1.Code) + } + + // Delete second cert (different ID, should not be rate limited) + req2 := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/api/certificates/%d", cert2.ID), nil) + w2 := httptest.NewRecorder() + r.ServeHTTP(w2, req2) + + if w2.Code != http.StatusOK { + t.Fatalf("second delete failed: got %d", w2.Code) + } + + // The test passes if both deletions succeed + // Rate limiting is per-certificate ID, so different certs should not interfere +} diff --git a/backend/internal/api/handlers/certificate_handler_test.go b/backend/internal/api/handlers/certificate_handler_test.go index 4b5f6e55..b72a4080 100644 --- a/backend/internal/api/handlers/certificate_handler_test.go +++ b/backend/internal/api/handlers/certificate_handler_test.go @@ -24,10 +24,20 @@ import ( "github.com/Wikid82/charon/backend/internal/services" ) +// mockAuthMiddleware adds a mock user to the context for testing +func mockAuthMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + c.Set("user", map[string]interface{}{"id": 1, "username": "testuser"}) + c.Next() + } +} + func setupCertTestRouter(t *testing.T, db *gorm.DB) *gin.Engine { t.Helper() gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) @@ -92,6 +102,7 @@ func TestDeleteCertificate_CreatesBackup(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) // Mock BackupService @@ -145,6 +156,7 @@ func TestDeleteCertificate_BackupFailure(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) // Mock BackupService that fails @@ -198,6 +210,7 @@ func TestDeleteCertificate_InUse_NoBackup(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) // Mock BackupService @@ -227,7 +240,8 @@ func TestDeleteCertificate_InUse_NoBackup(t *testing.T) { // Mock BackupService for testing type mockBackupService struct { - createFunc func() (string, error) + createFunc func() (string, error) + availableSpaceFunc func() (int64, error) } func (m *mockBackupService) CreateBackup() (string, error) { @@ -253,6 +267,14 @@ func (m *mockBackupService) RestoreBackup(filename string) error { return fmt.Errorf("not implemented") } +func (m *mockBackupService) GetAvailableSpace() (int64, error) { + if m.availableSpaceFunc != nil { + return m.availableSpaceFunc() + } + // Default: return 1GB available + return 1024 * 1024 * 1024, nil +} + // Test List handler func TestCertificateHandler_List(t *testing.T) { db, err := gorm.Open(sqlite.Open(fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name())), &gorm.Config{}) @@ -266,6 +288,8 @@ func TestCertificateHandler_List(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.GET("/api/certificates", h.List) @@ -292,6 +316,7 @@ func TestCertificateHandler_Upload_MissingName(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.POST("/api/certificates", h.Upload) @@ -319,6 +344,7 @@ func TestCertificateHandler_Upload_MissingCertFile(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.POST("/api/certificates", h.Upload) @@ -349,6 +375,7 @@ func TestCertificateHandler_Upload_MissingKeyFile(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) svc := services.NewCertificateService("/tmp", db) h := NewCertificateHandler(svc, nil, nil) r.POST("/api/certificates", h.Upload) @@ -376,6 +403,7 @@ func TestCertificateHandler_Upload_Success(t *testing.T) { gin.SetMode(gin.TestMode) r := gin.New() + r.Use(mockAuthMiddleware()) // Create a mock CertificateService that returns a created certificate // Create a temporary services.CertificateService with a temp dir and DB diff --git a/backend/internal/api/handlers/crowdsec_decisions_test.go b/backend/internal/api/handlers/crowdsec_decisions_test.go new file mode 100644 index 00000000..3d8b48c7 --- /dev/null +++ b/backend/internal/api/handlers/crowdsec_decisions_test.go @@ -0,0 +1,450 @@ +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockCommandExecutor is a mock implementation of CommandExecutor for testing +type mockCommandExecutor struct { + output []byte + err error + calls [][]string // Track all calls made +} + +func (m *mockCommandExecutor) Execute(ctx context.Context, name string, args ...string) ([]byte, error) { + call := append([]string{name}, args...) + m.calls = append(m.calls, call) + return m.output, m.err +} + +func TestListDecisions_Success(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte(`[{"id":1,"origin":"cscli","type":"ban","scope":"ip","value":"192.168.1.100","duration":"4h","scenario":"manual 'ban' from 'localhost'","created_at":"2025-12-05T10:00:00Z","until":"2025-12-05T14:00:00Z"}]`), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/crowdsec/decisions", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + decisions := resp["decisions"].([]interface{}) + assert.Len(t, decisions, 1) + + decision := decisions[0].(map[string]interface{}) + assert.Equal(t, "192.168.1.100", decision["value"]) + assert.Equal(t, "ban", decision["type"]) + assert.Equal(t, "ip", decision["scope"]) + + // Verify cscli was called with correct args + require.Len(t, mockExec.calls, 1) + assert.Equal(t, []string{"cscli", "decisions", "list", "-o", "json"}, mockExec.calls[0]) +} + +func TestListDecisions_EmptyList(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte("null"), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/crowdsec/decisions", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + decisions := resp["decisions"].([]interface{}) + assert.Len(t, decisions, 0) + assert.Equal(t, float64(0), resp["total"]) +} + +func TestListDecisions_CscliError(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + err: errors.New("cscli not found"), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/crowdsec/decisions", nil) + r.ServeHTTP(w, req) + + // Should return 200 with empty list and error message + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + decisions := resp["decisions"].([]interface{}) + assert.Len(t, decisions, 0) + assert.Contains(t, resp["error"], "cscli not available") +} + +func TestListDecisions_InvalidJSON(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte("invalid json"), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/crowdsec/decisions", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "failed to parse decisions") +} + +func TestBanIP_Success(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte(""), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + payload := BanIPRequest{ + IP: "192.168.1.100", + Duration: "24h", + Reason: "suspicious activity", + } + b, _ := json.Marshal(payload) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + assert.Equal(t, "banned", resp["status"]) + assert.Equal(t, "192.168.1.100", resp["ip"]) + assert.Equal(t, "24h", resp["duration"]) + + // Verify cscli was called with correct args + require.Len(t, mockExec.calls, 1) + assert.Equal(t, "cscli", mockExec.calls[0][0]) + assert.Equal(t, "decisions", mockExec.calls[0][1]) + assert.Equal(t, "add", mockExec.calls[0][2]) + assert.Equal(t, "-i", mockExec.calls[0][3]) + assert.Equal(t, "192.168.1.100", mockExec.calls[0][4]) + assert.Equal(t, "-d", mockExec.calls[0][5]) + assert.Equal(t, "24h", mockExec.calls[0][6]) + assert.Equal(t, "-R", mockExec.calls[0][7]) + assert.Equal(t, "manual ban: suspicious activity", mockExec.calls[0][8]) +} + +func TestBanIP_DefaultDuration(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte(""), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + payload := BanIPRequest{ + IP: "10.0.0.1", + } + b, _ := json.Marshal(payload) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + // Duration should default to 24h + assert.Equal(t, "24h", resp["duration"]) + + // Verify cscli was called with default duration + require.Len(t, mockExec.calls, 1) + assert.Equal(t, "24h", mockExec.calls[0][6]) +} + +func TestBanIP_MissingIP(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + payload := map[string]string{} + b, _ := json.Marshal(payload) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "ip is required") +} + +func TestBanIP_EmptyIP(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + payload := BanIPRequest{ + IP: " ", + } + b, _ := json.Marshal(payload) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "ip cannot be empty") +} + +func TestBanIP_CscliError(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + err: errors.New("cscli failed"), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + payload := BanIPRequest{ + IP: "192.168.1.100", + } + b, _ := json.Marshal(payload) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "failed to ban IP") +} + +func TestUnbanIP_Success(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte(""), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, "/api/v1/admin/crowdsec/ban/192.168.1.100", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + assert.Equal(t, "unbanned", resp["status"]) + assert.Equal(t, "192.168.1.100", resp["ip"]) + + // Verify cscli was called with correct args + require.Len(t, mockExec.calls, 1) + assert.Equal(t, []string{"cscli", "decisions", "delete", "-i", "192.168.1.100"}, mockExec.calls[0]) +} + +func TestUnbanIP_CscliError(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + err: errors.New("cscli failed"), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodDelete, "/api/v1/admin/crowdsec/ban/192.168.1.100", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "failed to unban IP") +} + +func TestListDecisions_MultipleDecisions(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + mockExec := &mockCommandExecutor{ + output: []byte(`[ + {"id":1,"origin":"cscli","type":"ban","scope":"ip","value":"192.168.1.100","duration":"4h","scenario":"manual ban","created_at":"2025-12-05T10:00:00Z"}, + {"id":2,"origin":"crowdsec","type":"ban","scope":"ip","value":"10.0.0.50","duration":"1h","scenario":"ssh-bf","created_at":"2025-12-05T11:00:00Z"}, + {"id":3,"origin":"cscli","type":"ban","scope":"range","value":"172.16.0.0/24","duration":"24h","scenario":"manual ban","created_at":"2025-12-05T12:00:00Z"} + ]`), + } + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + h.CmdExec = mockExec + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/crowdsec/decisions", nil) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var resp map[string]interface{} + err := json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + decisions := resp["decisions"].([]interface{}) + assert.Len(t, decisions, 3) + assert.Equal(t, float64(3), resp["total"]) + + // Verify each decision + d1 := decisions[0].(map[string]interface{}) + assert.Equal(t, "192.168.1.100", d1["value"]) + assert.Equal(t, "cscli", d1["origin"]) + + d2 := decisions[1].(map[string]interface{}) + assert.Equal(t, "10.0.0.50", d2["value"]) + assert.Equal(t, "crowdsec", d2["origin"]) + assert.Equal(t, "ssh-bf", d2["scenario"]) + + d3 := decisions[2].(map[string]interface{}) + assert.Equal(t, "172.16.0.0/24", d3["value"]) + assert.Equal(t, "range", d3["scope"]) +} + +func TestBanIP_InvalidJSON(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupCrowdDB(t) + tmpDir := t.TempDir() + + h := NewCrowdsecHandler(db, &fakeExec{}, "/bin/false", tmpDir) + + r := gin.New() + g := r.Group("/api/v1") + h.RegisterRoutes(g) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/crowdsec/ban", bytes.NewReader([]byte("invalid json"))) + req.Header.Set("Content-Type", "application/json") + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "ip is required") +} diff --git a/backend/internal/api/handlers/crowdsec_handler.go b/backend/internal/api/handlers/crowdsec_handler.go index 2647bac1..a3b98c80 100644 --- a/backend/internal/api/handlers/crowdsec_handler.go +++ b/backend/internal/api/handlers/crowdsec_handler.go @@ -4,15 +4,18 @@ import ( "archive/tar" "compress/gzip" "context" + "encoding/json" "fmt" - "github.com/Wikid82/charon/backend/internal/logger" "io" "net/http" "os" + "os/exec" "path/filepath" "strings" "time" + "github.com/Wikid82/charon/backend/internal/logger" + "github.com/gin-gonic/gin" "gorm.io/gorm" ) @@ -24,16 +27,37 @@ type CrowdsecExecutor interface { Status(ctx context.Context, configDir string) (running bool, pid int, err error) } +// CommandExecutor abstracts command execution for testing +type CommandExecutor interface { + Execute(ctx context.Context, name string, args ...string) ([]byte, error) +} + +// RealCommandExecutor executes commands using os/exec +type RealCommandExecutor struct{} + +// Execute runs a command and returns its output +func (r *RealCommandExecutor) Execute(ctx context.Context, name string, args ...string) ([]byte, error) { + cmd := exec.CommandContext(ctx, name, args...) + return cmd.Output() +} + // CrowdsecHandler manages CrowdSec process and config imports. type CrowdsecHandler struct { DB *gorm.DB Executor CrowdsecExecutor + CmdExec CommandExecutor BinPath string DataDir string } func NewCrowdsecHandler(db *gorm.DB, exec CrowdsecExecutor, binPath, dataDir string) *CrowdsecHandler { - return &CrowdsecHandler{DB: db, Executor: exec, BinPath: binPath, DataDir: dataDir} + return &CrowdsecHandler{ + DB: db, + Executor: exec, + CmdExec: &RealCommandExecutor{}, + BinPath: binPath, + DataDir: dataDir, + } } // Start starts the CrowdSec process. @@ -290,6 +314,149 @@ func (h *CrowdsecHandler) WriteFile(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"status": "written", "backup": backupDir}) } +// CrowdSecDecision represents a ban decision from CrowdSec +type CrowdSecDecision struct { + ID int64 `json:"id"` + Origin string `json:"origin"` + Type string `json:"type"` + Scope string `json:"scope"` + Value string `json:"value"` + Duration string `json:"duration"` + Scenario string `json:"scenario"` + CreatedAt time.Time `json:"created_at"` + Until string `json:"until,omitempty"` +} + +// cscliDecision represents the JSON output from cscli decisions list +type cscliDecision struct { + ID int64 `json:"id"` + Origin string `json:"origin"` + Type string `json:"type"` + Scope string `json:"scope"` + Value string `json:"value"` + Duration string `json:"duration"` + Scenario string `json:"scenario"` + CreatedAt string `json:"created_at"` + Until string `json:"until"` +} + +// ListDecisions calls cscli to get current decisions (banned IPs) +func (h *CrowdsecHandler) ListDecisions(c *gin.Context) { + ctx := c.Request.Context() + output, err := h.CmdExec.Execute(ctx, "cscli", "decisions", "list", "-o", "json") + if err != nil { + // If cscli is not available or returns error, return empty list with warning + logger.Log().WithError(err).Warn("Failed to execute cscli decisions list") + c.JSON(http.StatusOK, gin.H{"decisions": []CrowdSecDecision{}, "error": "cscli not available or failed"}) + return + } + + // Handle empty output (no decisions) + if len(output) == 0 || string(output) == "null" || string(output) == "null\n" { + c.JSON(http.StatusOK, gin.H{"decisions": []CrowdSecDecision{}, "total": 0}) + return + } + + // Parse JSON output + var rawDecisions []cscliDecision + if err := json.Unmarshal(output, &rawDecisions); err != nil { + logger.Log().WithError(err).WithField("output", string(output)).Warn("Failed to parse cscli decisions output") + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to parse decisions"}) + return + } + + // Convert to our format + decisions := make([]CrowdSecDecision, 0, len(rawDecisions)) + for _, d := range rawDecisions { + var createdAt time.Time + if d.CreatedAt != "" { + createdAt, _ = time.Parse(time.RFC3339, d.CreatedAt) + } + decisions = append(decisions, CrowdSecDecision{ + ID: d.ID, + Origin: d.Origin, + Type: d.Type, + Scope: d.Scope, + Value: d.Value, + Duration: d.Duration, + Scenario: d.Scenario, + CreatedAt: createdAt, + Until: d.Until, + }) + } + + c.JSON(http.StatusOK, gin.H{"decisions": decisions, "total": len(decisions)}) +} + +// BanIPRequest represents the request body for banning an IP +type BanIPRequest struct { + IP string `json:"ip" binding:"required"` + Duration string `json:"duration"` + Reason string `json:"reason"` +} + +// BanIP adds a manual ban for an IP address +func (h *CrowdsecHandler) BanIP(c *gin.Context) { + var req BanIPRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "ip is required"}) + return + } + + // Validate IP format (basic check) + ip := strings.TrimSpace(req.IP) + if ip == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "ip cannot be empty"}) + return + } + + // Default duration to 24h if not specified + duration := req.Duration + if duration == "" { + duration = "24h" + } + + // Build reason string + reason := "manual ban" + if req.Reason != "" { + reason = fmt.Sprintf("manual ban: %s", req.Reason) + } + + ctx := c.Request.Context() + args := []string{"decisions", "add", "-i", ip, "-d", duration, "-R", reason, "-t", "ban"} + _, err := h.CmdExec.Execute(ctx, "cscli", args...) + if err != nil { + logger.Log().WithError(err).WithField("ip", ip).Warn("Failed to execute cscli decisions add") + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to ban IP"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "banned", "ip": ip, "duration": duration}) +} + +// UnbanIP removes a ban for an IP address +func (h *CrowdsecHandler) UnbanIP(c *gin.Context) { + ip := c.Param("ip") + if ip == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "ip parameter required"}) + return + } + + // Sanitize IP + ip = strings.TrimSpace(ip) + + ctx := c.Request.Context() + args := []string{"decisions", "delete", "-i", ip} + _, err := h.CmdExec.Execute(ctx, "cscli", args...) + if err != nil { + logger.Log().WithError(err).WithField("ip", ip).Warn("Failed to execute cscli decisions delete") + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to unban IP"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "unbanned", "ip": ip}) +} + // RegisterRoutes registers crowdsec admin routes under protected group func (h *CrowdsecHandler) RegisterRoutes(rg *gin.RouterGroup) { rg.POST("/admin/crowdsec/start", h.Start) @@ -300,4 +467,8 @@ func (h *CrowdsecHandler) RegisterRoutes(rg *gin.RouterGroup) { rg.GET("/admin/crowdsec/files", h.ListFiles) rg.GET("/admin/crowdsec/file", h.ReadFile) rg.POST("/admin/crowdsec/file", h.WriteFile) + // Decision management endpoints (Banned IP Dashboard) + rg.GET("/admin/crowdsec/decisions", h.ListDecisions) + rg.POST("/admin/crowdsec/ban", h.BanIP) + rg.DELETE("/admin/crowdsec/ban/:ip", h.UnbanIP) } diff --git a/backend/internal/api/handlers/feature_flags_handler.go b/backend/internal/api/handlers/feature_flags_handler.go index 4f4c9d6a..2afdd6f3 100644 --- a/backend/internal/api/handlers/feature_flags_handler.go +++ b/backend/internal/api/handlers/feature_flags_handler.go @@ -23,11 +23,8 @@ func NewFeatureFlagsHandler(db *gorm.DB) *FeatureFlagsHandler { // defaultFlags lists the canonical feature flags we expose. var defaultFlags = []string{ - "feature.global.enabled", "feature.cerberus.enabled", "feature.uptime.enabled", - "feature.notifications.enabled", - "feature.docker.enabled", } // GetFlags returns a map of feature flag -> bool. DB setting takes precedence @@ -70,8 +67,8 @@ func (h *FeatureFlagsHandler) GetFlags(c *gin.Context) { } } - // Default false - result[key] = false + // Default true for core optional features + result[key] = true } c.JSON(http.StatusOK, result) diff --git a/backend/internal/api/handlers/feature_flags_handler_coverage_test.go b/backend/internal/api/handlers/feature_flags_handler_coverage_test.go index 63c95c76..82468d59 100644 --- a/backend/internal/api/handlers/feature_flags_handler_coverage_test.go +++ b/backend/internal/api/handlers/feature_flags_handler_coverage_test.go @@ -14,17 +14,248 @@ import ( "github.com/Wikid82/charon/backend/internal/models" ) -func TestFeatureFlags_UpdateFlags_InvalidPayload(t *testing.T) { +func TestFeatureFlagsHandler_GetFlags_DBPrecedence(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Set a flag in DB + db.Create(&models.Setting{ + Key: "feature.cerberus.enabled", + Value: "false", + Type: "bool", + Category: "feature", + }) + + // Set env var that should be ignored (DB takes precedence) + t.Setenv("FEATURE_CERBERUS_ENABLED", "true") + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // DB value (false) should take precedence over env (true) + assert.False(t, flags["feature.cerberus.enabled"]) +} + +func TestFeatureFlagsHandler_GetFlags_EnvFallback(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Set env var (no DB value exists) + t.Setenv("FEATURE_CERBERUS_ENABLED", "false") + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // Env value should be used + assert.False(t, flags["feature.cerberus.enabled"]) +} + +func TestFeatureFlagsHandler_GetFlags_EnvShortForm(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Set short form env var (CERBERUS_ENABLED instead of FEATURE_CERBERUS_ENABLED) + t.Setenv("CERBERUS_ENABLED", "false") + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // Short form env value should be used + assert.False(t, flags["feature.cerberus.enabled"]) +} + +func TestFeatureFlagsHandler_GetFlags_EnvNumeric(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Set numeric env var (1/0 instead of true/false) + t.Setenv("FEATURE_UPTIME_ENABLED", "0") + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // "0" should be parsed as false + assert.False(t, flags["feature.uptime.enabled"]) +} + +func TestFeatureFlagsHandler_GetFlags_DefaultTrue(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // No DB value, no env var - should default to true + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // All flags should default to true + assert.True(t, flags["feature.cerberus.enabled"]) + assert.True(t, flags["feature.uptime.enabled"]) +} + +func TestFeatureFlagsHandler_GetFlags_AllDefaultFlagsPresent(t *testing.T) { + gin.SetMode(gin.TestMode) db := setupFlagsDB(t) h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + // Ensure all default flags are present + for _, key := range defaultFlags { + _, ok := flags[key] + assert.True(t, ok, "expected flag %s to be present", key) + } +} + +func TestFeatureFlagsHandler_UpdateFlags_Success(t *testing.T) { gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + h := NewFeatureFlagsHandler(db) r := gin.New() r.PUT("/api/v1/feature-flags", h.UpdateFlags) - // Send invalid JSON - req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader([]byte("invalid"))) + payload := map[string]bool{ + "feature.cerberus.enabled": false, + "feature.uptime.enabled": true, + } + b, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + // Verify DB persistence + var s1 models.Setting + err := db.Where("key = ?", "feature.cerberus.enabled").First(&s1).Error + require.NoError(t, err) + assert.Equal(t, "false", s1.Value) + assert.Equal(t, "bool", s1.Type) + assert.Equal(t, "feature", s1.Category) + + var s2 models.Setting + err = db.Where("key = ?", "feature.uptime.enabled").First(&s2).Error + require.NoError(t, err) + assert.Equal(t, "true", s2.Value) +} + +func TestFeatureFlagsHandler_UpdateFlags_Upsert(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Create existing setting + db.Create(&models.Setting{ + Key: "feature.cerberus.enabled", + Value: "true", + Type: "bool", + Category: "feature", + }) + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.PUT("/api/v1/feature-flags", h.UpdateFlags) + + // Update existing setting + payload := map[string]bool{ + "feature.cerberus.enabled": false, + } + b, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + // Verify update + var s models.Setting + err := db.Where("key = ?", "feature.cerberus.enabled").First(&s).Error + require.NoError(t, err) + assert.Equal(t, "false", s.Value) + + // Verify only one record exists + var count int64 + db.Model(&models.Setting{}).Where("key = ?", "feature.cerberus.enabled").Count(&count) + assert.Equal(t, int64(1), count) +} + +func TestFeatureFlagsHandler_UpdateFlags_InvalidJSON(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.PUT("/api/v1/feature-flags", h.UpdateFlags) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader([]byte("invalid json"))) req.Header.Set("Content-Type", "application/json") w := httptest.NewRecorder() r.ServeHTTP(w, req) @@ -32,227 +263,199 @@ func TestFeatureFlags_UpdateFlags_InvalidPayload(t *testing.T) { assert.Equal(t, http.StatusBadRequest, w.Code) } -func TestFeatureFlags_UpdateFlags_IgnoresInvalidKeys(t *testing.T) { +func TestFeatureFlagsHandler_UpdateFlags_OnlyAllowedKeys(t *testing.T) { + gin.SetMode(gin.TestMode) db := setupFlagsDB(t) - require.NoError(t, db.AutoMigrate(&models.Setting{})) h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) r := gin.New() r.PUT("/api/v1/feature-flags", h.UpdateFlags) - // Try to update a non-whitelisted key - payload := []byte(`{"invalid.key": true, "feature.global.enabled": true}`) - req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(payload)) + // Try to set a key not in defaultFlags + payload := map[string]bool{ + "feature.cerberus.enabled": false, + "feature.invalid.key": true, // Should be ignored + } + b, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + // Verify allowed key was saved + var s1 models.Setting + err := db.Where("key = ?", "feature.cerberus.enabled").First(&s1).Error + require.NoError(t, err) + + // Verify disallowed key was NOT saved + var s2 models.Setting + err = db.Where("key = ?", "feature.invalid.key").First(&s2).Error + assert.Error(t, err) +} + +func TestFeatureFlagsHandler_UpdateFlags_EmptyPayload(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.PUT("/api/v1/feature-flags", h.UpdateFlags) + + payload := map[string]bool{} + b, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(b)) req.Header.Set("Content-Type", "application/json") w := httptest.NewRecorder() r.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - - // Verify invalid key was NOT saved - var s models.Setting - err := db.Where("key = ?", "invalid.key").First(&s).Error - assert.Error(t, err) // Should not exist - - // Valid key should be saved - err = db.Where("key = ?", "feature.global.enabled").First(&s).Error - assert.NoError(t, err) - assert.Equal(t, "true", s.Value) } -func TestFeatureFlags_EnvFallback_ShortVariant(t *testing.T) { - // Test the short env variant (CERBERUS_ENABLED instead of FEATURE_CERBERUS_ENABLED) - t.Setenv("CERBERUS_ENABLED", "true") - - db := OpenTestDB(t) - h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) - - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - // Parse response - var flags map[string]bool - err := json.Unmarshal(w.Body.Bytes(), &flags) - require.NoError(t, err) - - // Should be true via short env fallback - assert.True(t, flags["feature.cerberus.enabled"]) -} - -func TestFeatureFlags_EnvFallback_WithValue1(t *testing.T) { - // Test env fallback with "1" as value - t.Setenv("FEATURE_UPTIME_ENABLED", "1") - - db := OpenTestDB(t) - h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) - - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) - assert.True(t, flags["feature.uptime.enabled"]) -} - -func TestFeatureFlags_EnvFallback_WithValue0(t *testing.T) { - // Test env fallback with "0" as value (should be false) - t.Setenv("FEATURE_DOCKER_ENABLED", "0") - - db := OpenTestDB(t) - h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) - - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) - assert.False(t, flags["feature.docker.enabled"]) -} - -func TestFeatureFlags_DBTakesPrecedence(t *testing.T) { - // Test that DB value takes precedence over env - t.Setenv("FEATURE_NOTIFICATIONS_ENABLED", "false") - - db := setupFlagsDB(t) - // Set DB value to true - db.Create(&models.Setting{Key: "feature.notifications.enabled", Value: "true", Type: "bool", Category: "feature"}) - - h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) - - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) - // DB value (true) should take precedence over env (false) - assert.True(t, flags["feature.notifications.enabled"]) -} - -func TestFeatureFlags_DBValueVariations(t *testing.T) { - db := setupFlagsDB(t) - - // Test various DB value formats - testCases := []struct { - key string +func TestFeatureFlagsHandler_GetFlags_DBValueVariants(t *testing.T) { + tests := []struct { + name string dbValue string expected bool }{ - {"feature.global.enabled", "1", true}, - {"feature.cerberus.enabled", "yes", true}, - {"feature.uptime.enabled", "TRUE", true}, - {"feature.notifications.enabled", "false", false}, - {"feature.docker.enabled", "0", false}, + {"lowercase true", "true", true}, + {"uppercase TRUE", "TRUE", true}, + {"mixed case True", "True", true}, + {"numeric 1", "1", true}, + {"yes", "yes", true}, + {"YES uppercase", "YES", true}, + {"lowercase false", "false", false}, + {"numeric 0", "0", false}, + {"no", "no", false}, + {"empty string", "", false}, + {"random string", "random", false}, + {"whitespace padded true", " true ", true}, + {"whitespace padded false", " false ", false}, } - for _, tc := range testCases { - db.Create(&models.Setting{Key: tc.key, Value: tc.dbValue, Type: "bool", Category: "feature"}) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) - h := NewFeatureFlagsHandler(db) + // Set flag with test value + db.Create(&models.Setting{ + Key: "feature.cerberus.enabled", + Value: tt.dbValue, + Type: "bool", + Category: "feature", + }) - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) - assert.Equal(t, http.StatusOK, w.Code) + require.Equal(t, http.StatusOK, w.Code) - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) - for _, tc := range testCases { - assert.Equal(t, tc.expected, flags[tc.key], "flag %s expected %v", tc.key, tc.expected) + assert.Equal(t, tt.expected, flags["feature.cerberus.enabled"], + "dbValue=%q should result in %v", tt.dbValue, tt.expected) + }) } } -func TestFeatureFlags_UpdateMultipleFlags(t *testing.T) { +func TestFeatureFlagsHandler_GetFlags_EnvValueVariants(t *testing.T) { + tests := []struct { + name string + envValue string + expected bool + }{ + {"true string", "true", true}, + {"TRUE uppercase", "TRUE", true}, + {"1 numeric", "1", true}, + {"false string", "false", false}, + {"FALSE uppercase", "FALSE", false}, + {"0 numeric", "0", false}, + {"invalid value defaults to numeric check", "invalid", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + // Set env var (no DB value) + t.Setenv("FEATURE_CERBERUS_ENABLED", tt.envValue) + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.GET("/api/v1/feature-flags", h.GetFlags) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var flags map[string]bool + err := json.Unmarshal(w.Body.Bytes(), &flags) + require.NoError(t, err) + + assert.Equal(t, tt.expected, flags["feature.cerberus.enabled"], + "envValue=%q should result in %v", tt.envValue, tt.expected) + }) + } +} + +func TestFeatureFlagsHandler_UpdateFlags_BoolValues(t *testing.T) { + tests := []struct { + name string + value bool + dbExpect string + }{ + {"true", true, "true"}, + {"false", false, "false"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + db := setupFlagsDB(t) + + h := NewFeatureFlagsHandler(db) + r := gin.New() + r.PUT("/api/v1/feature-flags", h.UpdateFlags) + + payload := map[string]bool{ + "feature.cerberus.enabled": tt.value, + } + b, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + + var s models.Setting + err := db.Where("key = ?", "feature.cerberus.enabled").First(&s).Error + require.NoError(t, err) + assert.Equal(t, tt.dbExpect, s.Value) + }) + } +} + +func TestFeatureFlagsHandler_NewFeatureFlagsHandler(t *testing.T) { db := setupFlagsDB(t) - h := NewFeatureFlagsHandler(db) - gin.SetMode(gin.TestMode) - r := gin.New() - r.PUT("/api/v1/feature-flags", h.UpdateFlags) - r.GET("/api/v1/feature-flags", h.GetFlags) - - // Update multiple flags at once - payload := []byte(`{ - "feature.global.enabled": true, - "feature.cerberus.enabled": false, - "feature.uptime.enabled": true - }`) - req := httptest.NewRequest(http.MethodPut, "/api/v1/feature-flags", bytes.NewReader(payload)) - req.Header.Set("Content-Type", "application/json") - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - // Verify by getting flags - req = httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w = httptest.NewRecorder() - r.ServeHTTP(w, req) - - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) - - assert.True(t, flags["feature.global.enabled"]) - assert.False(t, flags["feature.cerberus.enabled"]) - assert.True(t, flags["feature.uptime.enabled"]) -} - -func TestFeatureFlags_ShortEnvFallback_WithUnparseable(t *testing.T) { - // Test short env fallback with a value that's not directly parseable as bool - // but is "1" which should be treated as true - t.Setenv("GLOBAL_ENABLED", "1") - - db := OpenTestDB(t) - h := NewFeatureFlagsHandler(db) - - gin.SetMode(gin.TestMode) - r := gin.New() - r.GET("/api/v1/feature-flags", h.GetFlags) - - req := httptest.NewRequest(http.MethodGet, "/api/v1/feature-flags", nil) - w := httptest.NewRecorder() - r.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - - var flags map[string]bool - json.Unmarshal(w.Body.Bytes(), &flags) - assert.True(t, flags["feature.global.enabled"]) + assert.NotNil(t, h) + assert.NotNil(t, h.DB) + assert.Equal(t, db, h.DB) } diff --git a/backend/internal/api/handlers/proxy_host_handler.go b/backend/internal/api/handlers/proxy_host_handler.go index 2f70fa49..9807c820 100644 --- a/backend/internal/api/handlers/proxy_host_handler.go +++ b/backend/internal/api/handlers/proxy_host_handler.go @@ -297,6 +297,14 @@ func (h *ProxyHostHandler) Update(c *gin.Context) { } } + // Sync associated uptime monitor with updated proxy host values + if h.uptimeService != nil { + if err := h.uptimeService.SyncMonitorForHost(host.ID); err != nil { + middleware.GetRequestLogger(c).WithError(err).WithField("host_id", host.ID).Warn("Failed to sync uptime monitor for host") + // Don't fail the request if sync fails - the host update succeeded + } + } + c.JSON(http.StatusOK, host) } diff --git a/backend/internal/api/routes/routes.go b/backend/internal/api/routes/routes.go index 756924d4..c3d03f3a 100644 --- a/backend/internal/api/routes/routes.go +++ b/backend/internal/api/routes/routes.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/gin-contrib/gzip" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -23,6 +24,9 @@ import ( // Register wires up API routes and performs automatic migrations. func Register(router *gin.Engine, db *gorm.DB, cfg config.Config) error { + // Enable gzip compression for API responses (reduces payload size ~70%) + router.Use(gzip.Gzip(gzip.DefaultCompression)) + // Apply security headers middleware globally // This sets CSP, HSTS, X-Frame-Options, etc. securityHeadersCfg := middleware.SecurityHeadersConfig{ @@ -242,15 +246,32 @@ func Register(router *gin.Engine, db *gorm.DB, cfg config.Config) error { go func() { // Wait a bit for server to start time.Sleep(30 * time.Second) - // Initial sync - if err := uptimeService.SyncMonitors(); err != nil { - logger.Log().WithError(err).Error("Failed to sync monitors") + + // Initial sync if enabled + var s models.Setting + enabled := true + if err := db.Where("key = ?", "feature.uptime.enabled").First(&s).Error; err == nil { + enabled = s.Value == "true" + } + + if enabled { + if err := uptimeService.SyncMonitors(); err != nil { + logger.Log().WithError(err).Error("Failed to sync monitors") + } } ticker := time.NewTicker(1 * time.Minute) for range ticker.C { - _ = uptimeService.SyncMonitors() - uptimeService.CheckAll() + // Check feature flag each tick + enabled := true + if err := db.Where("key = ?", "feature.uptime.enabled").First(&s).Error; err == nil { + enabled = s.Value == "true" + } + + if enabled { + _ = uptimeService.SyncMonitors() + uptimeService.CheckAll() + } } }() @@ -284,6 +305,27 @@ func Register(router *gin.Engine, db *gorm.DB, cfg config.Config) error { crowdsecExec := handlers.NewDefaultCrowdsecExecutor() crowdsecHandler := handlers.NewCrowdsecHandler(db, crowdsecExec, "crowdsec", crowdsecDataDir) crowdsecHandler.RegisterRoutes(protected) + + // Access Lists + accessListHandler := handlers.NewAccessListHandler(db) + protected.GET("/access-lists/templates", accessListHandler.GetTemplates) + protected.GET("/access-lists", accessListHandler.List) + protected.POST("/access-lists", accessListHandler.Create) + protected.GET("/access-lists/:id", accessListHandler.Get) + protected.PUT("/access-lists/:id", accessListHandler.Update) + protected.DELETE("/access-lists/:id", accessListHandler.Delete) + protected.POST("/access-lists/:id/test", accessListHandler.TestIP) + + // Certificate routes + // Use cfg.CaddyConfigDir + "/data" for cert service so we scan the actual Caddy storage + // where ACME and certificates are stored (e.g. /data). + caddyDataDir := cfg.CaddyConfigDir + "/data" + logger.Log().WithField("caddy_data_dir", caddyDataDir).Info("Using Caddy data directory for certificates scan") + certService := services.NewCertificateService(caddyDataDir, db) + certHandler := handlers.NewCertificateHandler(certService, backupService, notificationService) + protected.GET("/certificates", certHandler.List) + protected.POST("/certificates", certHandler.Upload) + protected.DELETE("/certificates/:id", certHandler.Delete) } // Caddy Manager already created above @@ -294,27 +336,6 @@ func Register(router *gin.Engine, db *gorm.DB, cfg config.Config) error { remoteServerHandler := handlers.NewRemoteServerHandler(remoteServerService, notificationService) remoteServerHandler.RegisterRoutes(api) - // Access Lists - accessListHandler := handlers.NewAccessListHandler(db) - protected.GET("/access-lists/templates", accessListHandler.GetTemplates) - protected.GET("/access-lists", accessListHandler.List) - protected.POST("/access-lists", accessListHandler.Create) - protected.GET("/access-lists/:id", accessListHandler.Get) - protected.PUT("/access-lists/:id", accessListHandler.Update) - protected.DELETE("/access-lists/:id", accessListHandler.Delete) - protected.POST("/access-lists/:id/test", accessListHandler.TestIP) - - // Certificate routes - // Use cfg.CaddyConfigDir + "/data" for cert service so we scan the actual Caddy storage - // where ACME and certificates are stored (e.g. /data). - caddyDataDir := cfg.CaddyConfigDir + "/data" - logger.Log().WithField("caddy_data_dir", caddyDataDir).Info("Using Caddy data directory for certificates scan") - certService := services.NewCertificateService(caddyDataDir, db) - certHandler := handlers.NewCertificateHandler(certService, backupService, notificationService) - api.GET("/certificates", certHandler.List) - api.POST("/certificates", certHandler.Upload) - api.DELETE("/certificates/:id", certHandler.Delete) - // Initial Caddy Config Sync go func() { // Wait for Caddy to be ready (max 30 seconds) diff --git a/backend/internal/caddy/config.go b/backend/internal/caddy/config.go index 6cdb1775..4e08e6a4 100644 --- a/backend/internal/caddy/config.go +++ b/backend/internal/caddy/config.go @@ -706,18 +706,25 @@ func buildACLHandler(acl *models.AccessList, adminWhitelist string) (Handler, er return nil, nil } -// buildCrowdSecHandler returns a placeholder CrowdSec handler. In a future -// implementation this can be replaced with a proper Caddy plugin integration -// to call into a local CrowdSec agent. +// buildCrowdSecHandler returns a CrowdSec handler for the caddy-crowdsec-bouncer plugin. +// The plugin expects api_url and optionally api_key fields. +// For local mode, we use the local LAPI address at http://localhost:8080. func buildCrowdSecHandler(host *models.ProxyHost, secCfg *models.SecurityConfig, crowdsecEnabled bool) (Handler, error) { // Only add a handler when the computed runtime flag indicates CrowdSec is enabled. - // The computed flag incorporates runtime overrides and global Cerberus enablement. if !crowdsecEnabled { return nil, nil } - // For now, the local-only mode is supported; crowdsecEnabled implies 'local' + h := Handler{"handler": "crowdsec"} - h["mode"] = "local" + + // caddy-crowdsec-bouncer expects api_url and api_key + // For local mode, use the local LAPI address + if secCfg != nil && secCfg.CrowdSecAPIURL != "" { + h["api_url"] = secCfg.CrowdSecAPIURL + } else { + h["api_url"] = "http://localhost:8080" + } + return h, nil } @@ -817,15 +824,30 @@ func buildWAFHandler(host *models.ProxyHost, rulesets []models.SecurityRuleSet, return h, nil } -// buildRateLimitHandler returns a placeholder for a rate-limit handler. -// Real implementation should use the relevant Caddy module/plugin when available. +// buildRateLimitHandler returns a rate-limit handler using the caddy-ratelimit module. +// The module is registered as http.handlers.rate_limit and expects: +// - handler: "rate_limit" +// - rate_limits: map of named rate limit zones with key, window, and max_events +// See: https://github.com/mholt/caddy-ratelimit +// +// Note: The rateLimitEnabled flag is already checked by the caller (GenerateConfig). +// This function only validates that the config has positive request/window values. func buildRateLimitHandler(host *models.ProxyHost, secCfg *models.SecurityConfig) (Handler, error) { - // If host has custom rate limit metadata we could parse and construct it. + if secCfg == nil { + return nil, nil + } + if secCfg.RateLimitRequests <= 0 || secCfg.RateLimitWindowSec <= 0 { + return nil, nil + } + + // caddy-ratelimit format h := Handler{"handler": "rate_limit"} - if secCfg != nil && secCfg.RateLimitRequests > 0 && secCfg.RateLimitWindowSec > 0 { - h["requests"] = secCfg.RateLimitRequests - h["window_sec"] = secCfg.RateLimitWindowSec - h["burst"] = secCfg.RateLimitBurst + h["rate_limits"] = map[string]interface{}{ + "static": map[string]interface{}{ + "key": "{http.request.remote.host}", + "window": fmt.Sprintf("%ds", secCfg.RateLimitWindowSec), + "max_events": secCfg.RateLimitRequests, + }, } return h, nil } diff --git a/backend/internal/caddy/config_crowdsec_test.go b/backend/internal/caddy/config_crowdsec_test.go new file mode 100644 index 00000000..27818eea --- /dev/null +++ b/backend/internal/caddy/config_crowdsec_test.go @@ -0,0 +1,164 @@ +package caddy + +import ( + "encoding/json" + "testing" + + "github.com/Wikid82/charon/backend/internal/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBuildCrowdSecHandler_Disabled(t *testing.T) { + // When crowdsecEnabled is false, should return nil + h, err := buildCrowdSecHandler(nil, nil, false) + require.NoError(t, err) + assert.Nil(t, h) +} + +func TestBuildCrowdSecHandler_EnabledWithoutConfig(t *testing.T) { + // When crowdsecEnabled is true but no secCfg, should use default localhost URL + h, err := buildCrowdSecHandler(nil, nil, true) + require.NoError(t, err) + require.NotNil(t, h) + + assert.Equal(t, "crowdsec", h["handler"]) + assert.Equal(t, "http://localhost:8080", h["api_url"]) +} + +func TestBuildCrowdSecHandler_EnabledWithEmptyAPIURL(t *testing.T) { + // When crowdsecEnabled is true but CrowdSecAPIURL is empty, should use default + secCfg := &models.SecurityConfig{ + CrowdSecAPIURL: "", + } + h, err := buildCrowdSecHandler(nil, secCfg, true) + require.NoError(t, err) + require.NotNil(t, h) + + assert.Equal(t, "crowdsec", h["handler"]) + assert.Equal(t, "http://localhost:8080", h["api_url"]) +} + +func TestBuildCrowdSecHandler_EnabledWithCustomAPIURL(t *testing.T) { + // When crowdsecEnabled is true and CrowdSecAPIURL is set, should use custom URL + secCfg := &models.SecurityConfig{ + CrowdSecAPIURL: "http://crowdsec-lapi:8081", + } + h, err := buildCrowdSecHandler(nil, secCfg, true) + require.NoError(t, err) + require.NotNil(t, h) + + assert.Equal(t, "crowdsec", h["handler"]) + assert.Equal(t, "http://crowdsec-lapi:8081", h["api_url"]) +} + +func TestBuildCrowdSecHandler_JSONFormat(t *testing.T) { + // Test that the handler produces valid JSON matching caddy-crowdsec-bouncer schema + secCfg := &models.SecurityConfig{ + CrowdSecAPIURL: "http://localhost:8080", + } + h, err := buildCrowdSecHandler(nil, secCfg, true) + require.NoError(t, err) + require.NotNil(t, h) + + // Marshal to JSON and verify structure + b, err := json.Marshal(h) + require.NoError(t, err) + s := string(b) + + // Verify expected JSON content + assert.Contains(t, s, `"handler":"crowdsec"`) + assert.Contains(t, s, `"api_url":"http://localhost:8080"`) + // Should NOT contain old "mode" field + assert.NotContains(t, s, `"mode"`) +} + +func TestBuildCrowdSecHandler_WithHost(t *testing.T) { + // Test that host parameter is accepted (even if not currently used) + host := &models.ProxyHost{ + UUID: "test-uuid", + DomainNames: "example.com", + } + secCfg := &models.SecurityConfig{ + CrowdSecAPIURL: "http://custom-crowdsec:8080", + } + + h, err := buildCrowdSecHandler(host, secCfg, true) + require.NoError(t, err) + require.NotNil(t, h) + + assert.Equal(t, "crowdsec", h["handler"]) + assert.Equal(t, "http://custom-crowdsec:8080", h["api_url"]) +} + +func TestGenerateConfig_WithCrowdSec(t *testing.T) { + // Test that CrowdSec handler is included in generated config when enabled + hosts := []models.ProxyHost{ + { + UUID: "test-uuid", + DomainNames: "example.com", + ForwardHost: "app", + ForwardPort: 8080, + Enabled: true, + }, + } + + secCfg := &models.SecurityConfig{ + CrowdSecMode: "local", + CrowdSecAPIURL: "http://localhost:8080", + } + + // crowdsecEnabled=true should include the handler + config, err := GenerateConfig(hosts, "/tmp/caddy-data", "admin@example.com", "", "", false, true, false, false, false, "", nil, nil, nil, secCfg) + require.NoError(t, err) + require.NotNil(t, config.Apps.HTTP) + + server := config.Apps.HTTP.Servers["charon_server"] + require.NotNil(t, server) + require.Len(t, server.Routes, 1) + + route := server.Routes[0] + // Handlers should include crowdsec + reverse_proxy + require.GreaterOrEqual(t, len(route.Handle), 2) + + // Find the crowdsec handler + var foundCrowdSec bool + for _, h := range route.Handle { + if h["handler"] == "crowdsec" { + foundCrowdSec = true + // Verify it has api_url + assert.Equal(t, "http://localhost:8080", h["api_url"]) + break + } + } + require.True(t, foundCrowdSec, "crowdsec handler should be present") +} + +func TestGenerateConfig_CrowdSecDisabled(t *testing.T) { + // Test that CrowdSec handler is NOT included when disabled + hosts := []models.ProxyHost{ + { + UUID: "test-uuid", + DomainNames: "example.com", + ForwardHost: "app", + ForwardPort: 8080, + Enabled: true, + }, + } + + // crowdsecEnabled=false should NOT include the handler + config, err := GenerateConfig(hosts, "/tmp/caddy-data", "admin@example.com", "", "", false, false, false, false, false, "", nil, nil, nil, nil) + require.NoError(t, err) + require.NotNil(t, config.Apps.HTTP) + + server := config.Apps.HTTP.Servers["charon_server"] + require.NotNil(t, server) + require.Len(t, server.Routes, 1) + + route := server.Routes[0] + + // Verify no crowdsec handler + for _, h := range route.Handle { + assert.NotEqual(t, "crowdsec", h["handler"], "crowdsec handler should not be present when disabled") + } +} diff --git a/backend/internal/caddy/config_extra_test.go b/backend/internal/caddy/config_extra_test.go index 62f6ef70..7ee0c455 100644 --- a/backend/internal/caddy/config_extra_test.go +++ b/backend/internal/caddy/config_extra_test.go @@ -225,7 +225,8 @@ func TestGenerateConfig_SecurityPipeline_Order(t *testing.T) { // Provide rulesets and paths so WAF handler is created with directives rulesets := []models.SecurityRuleSet{{Name: "owasp-crs"}} rulesetPaths := map[string]string{"owasp-crs": "/tmp/owasp.conf"} - secCfg := &models.SecurityConfig{CrowdSecMode: "local"} + // Set rate limit values so rate_limit handler is included (uses caddy-ratelimit format) + secCfg := &models.SecurityConfig{CrowdSecMode: "local", RateLimitRequests: 100, RateLimitWindowSec: 60} cfg, err := GenerateConfig([]models.ProxyHost{host}, "/tmp/caddy-data", "", "", "", false, true, true, true, true, "", rulesets, rulesetPaths, nil, secCfg) require.NoError(t, err) route := cfg.Apps.HTTP.Servers["charon_server"].Routes[0] diff --git a/backend/internal/caddy/config_generate_additional_test.go b/backend/internal/caddy/config_generate_additional_test.go index 675070af..039ee623 100644 --- a/backend/internal/caddy/config_generate_additional_test.go +++ b/backend/internal/caddy/config_generate_additional_test.go @@ -53,7 +53,8 @@ func TestGenerateConfig_SecurityPipeline_Order_Locations(t *testing.T) { // Provide rulesets and paths so WAF handler is created with directives rulesets := []models.SecurityRuleSet{{Name: "owasp-crs"}} rulesetPaths := map[string]string{"owasp-crs": "/tmp/owasp.conf"} - sec := &models.SecurityConfig{CrowdSecMode: "local"} + // Set rate limit values so rate_limit handler is included (uses caddy-ratelimit format) + sec := &models.SecurityConfig{CrowdSecMode: "local", RateLimitRequests: 100, RateLimitWindowSec: 60} cfg, err := GenerateConfig([]models.ProxyHost{host}, "/tmp/caddy-data", "", "", "", false, true, true, true, true, "", rulesets, rulesetPaths, nil, sec) require.NoError(t, err) @@ -364,15 +365,20 @@ func TestGenerateConfig_RateLimitFromSecCfg(t *testing.T) { found := false for _, h := range route.Handle { if hn, ok := h["handler"].(string); ok && hn == "rate_limit" { - if req, ok := h["requests"].(int); ok && req == 10 { - if win, ok := h["window_sec"].(int); ok && win == 60 { - found = true - break + // Check caddy-ratelimit format: rate_limits.static.max_events and window + if rateLimits, ok := h["rate_limits"].(map[string]interface{}); ok { + if static, ok := rateLimits["static"].(map[string]interface{}); ok { + if maxEvents, ok := static["max_events"].(int); ok && maxEvents == 10 { + if window, ok := static["window"].(string); ok && window == "60s" { + found = true + break + } + } } } } } - require.True(t, found, "rate_limit handler with configured values should be present") + require.True(t, found, "rate_limit handler with caddy-ratelimit format should be present") } func TestGenerateConfig_CrowdSecHandlerFromSecCfg(t *testing.T) { @@ -384,13 +390,14 @@ func TestGenerateConfig_CrowdSecHandlerFromSecCfg(t *testing.T) { found := false for _, h := range route.Handle { if hn, ok := h["handler"].(string); ok && hn == "crowdsec" { - if mode, ok := h["mode"].(string); ok && mode == "local" { + // caddy-crowdsec-bouncer expects api_url field + if apiURL, ok := h["api_url"].(string); ok && apiURL == "http://cs.local" { found = true break } } } - require.True(t, found, "crowdsec handler with api_url and mode should be present") + require.True(t, found, "crowdsec handler with api_url should be present") } func TestGenerateConfig_EmptyHostsAndNoFrontend(t *testing.T) { diff --git a/backend/internal/caddy/config_test.go b/backend/internal/caddy/config_test.go index a837cb3e..43bb4588 100644 --- a/backend/internal/caddy/config_test.go +++ b/backend/internal/caddy/config_test.go @@ -311,3 +311,133 @@ func TestBuildACLHandler_AdminWhitelistParsing(t *testing.T) { require.Contains(t, s2, "1.2.3.0/24") require.Contains(t, s2, "192.168.0.1/32") } + +func TestBuildRateLimitHandler_Disabled(t *testing.T) { + // Test nil secCfg returns nil handler + h, err := buildRateLimitHandler(nil, nil) + require.NoError(t, err) + require.Nil(t, h) +} + +func TestBuildRateLimitHandler_InvalidValues(t *testing.T) { + // Test zero requests returns nil handler + secCfg := &models.SecurityConfig{ + RateLimitRequests: 0, + RateLimitWindowSec: 60, + } + h, err := buildRateLimitHandler(nil, secCfg) + require.NoError(t, err) + require.Nil(t, h) + + // Test zero window returns nil handler + secCfg2 := &models.SecurityConfig{ + RateLimitRequests: 100, + RateLimitWindowSec: 0, + } + h, err = buildRateLimitHandler(nil, secCfg2) + require.NoError(t, err) + require.Nil(t, h) + + // Test negative values returns nil handler + secCfg3 := &models.SecurityConfig{ + RateLimitRequests: -1, + RateLimitWindowSec: 60, + } + h, err = buildRateLimitHandler(nil, secCfg3) + require.NoError(t, err) + require.Nil(t, h) +} + +func TestBuildRateLimitHandler_ValidConfig(t *testing.T) { + // Test valid configuration produces correct caddy-ratelimit format + secCfg := &models.SecurityConfig{ + RateLimitRequests: 100, + RateLimitWindowSec: 60, + } + h, err := buildRateLimitHandler(nil, secCfg) + require.NoError(t, err) + require.NotNil(t, h) + + // Verify handler type + require.Equal(t, "rate_limit", h["handler"]) + + // Verify rate_limits structure + rateLimits, ok := h["rate_limits"].(map[string]interface{}) + require.True(t, ok, "rate_limits should be a map") + + staticZone, ok := rateLimits["static"].(map[string]interface{}) + require.True(t, ok, "static zone should be a map") + + // Verify caddy-ratelimit specific fields + require.Equal(t, "{http.request.remote.host}", staticZone["key"]) + require.Equal(t, "60s", staticZone["window"]) + require.Equal(t, 100, staticZone["max_events"]) +} + +func TestBuildRateLimitHandler_JSONFormat(t *testing.T) { + // Test that the handler produces valid JSON matching caddy-ratelimit schema + secCfg := &models.SecurityConfig{ + RateLimitRequests: 30, + RateLimitWindowSec: 10, + } + h, err := buildRateLimitHandler(nil, secCfg) + require.NoError(t, err) + require.NotNil(t, h) + + // Marshal to JSON and verify structure + b, err := json.Marshal(h) + require.NoError(t, err) + s := string(b) + + // Verify expected JSON content + require.Contains(t, s, `"handler":"rate_limit"`) + require.Contains(t, s, `"rate_limits"`) + require.Contains(t, s, `"static"`) + require.Contains(t, s, `"key":"{http.request.remote.host}"`) + require.Contains(t, s, `"window":"10s"`) + require.Contains(t, s, `"max_events":30`) +} + +func TestGenerateConfig_WithRateLimiting(t *testing.T) { + // Test that rate limiting is included in generated config when enabled + hosts := []models.ProxyHost{ + { + UUID: "test-uuid", + DomainNames: "example.com", + ForwardHost: "app", + ForwardPort: 8080, + Enabled: true, + }, + } + + secCfg := &models.SecurityConfig{ + RateLimitEnable: true, + RateLimitRequests: 60, + RateLimitWindowSec: 60, + } + + // rateLimitEnabled=true should include the handler + config, err := GenerateConfig(hosts, "/tmp/caddy-data", "admin@example.com", "", "", false, false, false, true, false, "", nil, nil, nil, secCfg) + require.NoError(t, err) + require.NotNil(t, config.Apps.HTTP) + + server := config.Apps.HTTP.Servers["charon_server"] + require.NotNil(t, server) + require.Len(t, server.Routes, 1) + + route := server.Routes[0] + // Handlers should include rate_limit + reverse_proxy + require.GreaterOrEqual(t, len(route.Handle), 2) + + // Find the rate_limit handler + var foundRateLimit bool + for _, h := range route.Handle { + if h["handler"] == "rate_limit" { + foundRateLimit = true + // Verify it has the correct structure + require.NotNil(t, h["rate_limits"]) + break + } + } + require.True(t, foundRateLimit, "rate_limit handler should be present") +} diff --git a/backend/internal/caddy/manager.go b/backend/internal/caddy/manager.go index 19e0b867..7656e130 100644 --- a/backend/internal/caddy/manager.go +++ b/backend/internal/caddy/manager.go @@ -69,11 +69,38 @@ func (m *Manager) ApplyConfig(ctx context.Context) error { acmeEmail = acmeEmailSetting.Value } - // Fetch SSL Provider setting + // Fetch SSL Provider setting and parse it var sslProviderSetting models.Setting - var sslProvider string + var sslProviderVal string if err := m.db.Where("key = ?", "caddy.ssl_provider").First(&sslProviderSetting).Error; err == nil { - sslProvider = sslProviderSetting.Value + sslProviderVal = sslProviderSetting.Value + } + + // Determine effective provider and staging flag based on the setting value + effectiveProvider := "" + effectiveStaging := false // Default to production + + switch sslProviderVal { + case "letsencrypt-staging": + effectiveProvider = "letsencrypt" + effectiveStaging = true + case "letsencrypt-prod": + effectiveProvider = "letsencrypt" + effectiveStaging = false + case "zerossl": + effectiveProvider = "zerossl" + effectiveStaging = false + case "auto": + effectiveProvider = "" // "both" (auto-select between Let's Encrypt and ZeroSSL) + effectiveStaging = false + default: + // Empty or unrecognized value: fallback to environment variable for backward compatibility + effectiveProvider = "" + if sslProviderVal == "" { + effectiveStaging = m.acmeStaging // Respect env var if setting is unset + } else { + effectiveStaging = false // Unknown value defaults to production + } } // Compute effective security flags (re-read runtime overrides) @@ -194,7 +221,7 @@ func (m *Manager) ApplyConfig(ctx context.Context) error { } } - config, err := generateConfigFunc(hosts, filepath.Join(m.configDir, "data"), acmeEmail, m.frontendDir, sslProvider, m.acmeStaging, crowdsecEnabled, wafEnabled, rateLimitEnabled, aclEnabled, adminWhitelist, rulesets, rulesetPaths, decisions, &secCfg) + config, err := generateConfigFunc(hosts, filepath.Join(m.configDir, "data"), acmeEmail, m.frontendDir, effectiveProvider, effectiveStaging, crowdsecEnabled, wafEnabled, rateLimitEnabled, aclEnabled, adminWhitelist, rulesets, rulesetPaths, decisions, &secCfg) if err != nil { return fmt.Errorf("generate config: %w", err) } diff --git a/backend/internal/caddy/manager_ssl_provider_test.go b/backend/internal/caddy/manager_ssl_provider_test.go new file mode 100644 index 00000000..7ba7cdcf --- /dev/null +++ b/backend/internal/caddy/manager_ssl_provider_test.go @@ -0,0 +1,341 @@ +package caddy + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wikid82/charon/backend/internal/config" + "github.com/Wikid82/charon/backend/internal/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/driver/sqlite" + "gorm.io/gorm" +) + +// mockGenerateConfigFunc creates a mock config generator that captures parameters +func mockGenerateConfigFunc(capturedProvider *string, capturedStaging *bool) func([]models.ProxyHost, string, string, string, string, bool, bool, bool, bool, bool, string, []models.SecurityRuleSet, map[string]string, []models.SecurityDecision, *models.SecurityConfig) (*Config, error) { + return func(hosts []models.ProxyHost, storageDir string, acmeEmail string, frontendDir string, sslProvider string, acmeStaging bool, crowdsecEnabled bool, wafEnabled bool, rateLimitEnabled bool, aclEnabled bool, adminWhitelist string, rulesets []models.SecurityRuleSet, rulesetPaths map[string]string, decisions []models.SecurityDecision, secCfg *models.SecurityConfig) (*Config, error) { + *capturedProvider = sslProvider + *capturedStaging = acmeStaging + return &Config{Apps: Apps{HTTP: &HTTPApp{Servers: map[string]*Server{}}}}, nil + } +} + +// TestManager_ApplyConfig_SSLProvider_Auto tests the "auto" SSL provider setting +func TestManager_ApplyConfig_SSLProvider_Auto(t *testing.T) { + // Track the parameters passed to generateConfigFunc + var capturedProvider string + var capturedStaging bool + + // Mock generateConfigFunc to capture parameters + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + // Mock Caddy Admin API + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + var config Config + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + // Setup DB + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + // Set SSL Provider to "auto" + db.Create(&models.Setting{Key: "caddy.ssl_provider", Value: "auto"}) + + // Setup Manager + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", false, config.SecurityConfig{}) + + // Create a host + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + // Apply Config + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + // Verify that the correct parameters were passed + assert.Equal(t, "", capturedProvider, "auto should map to empty provider (both)") + assert.False(t, capturedStaging, "auto should default to production") +} + +// TestManager_ApplyConfig_SSLProvider_LetsEncryptStaging tests the "letsencrypt-staging" SSL provider setting +func TestManager_ApplyConfig_SSLProvider_LetsEncryptStaging(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + db.Create(&models.Setting{Key: "caddy.ssl_provider", Value: "letsencrypt-staging"}) + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", false, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "letsencrypt", capturedProvider) + assert.True(t, capturedStaging, "letsencrypt-staging should enable staging") +} + +// TestManager_ApplyConfig_SSLProvider_LetsEncryptProd tests the "letsencrypt-prod" SSL provider setting +func TestManager_ApplyConfig_SSLProvider_LetsEncryptProd(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + db.Create(&models.Setting{Key: "caddy.ssl_provider", Value: "letsencrypt-prod"}) + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", false, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "letsencrypt", capturedProvider) + assert.False(t, capturedStaging, "letsencrypt-prod should use production") +} + +// TestManager_ApplyConfig_SSLProvider_ZeroSSL tests the "zerossl" SSL provider setting +func TestManager_ApplyConfig_SSLProvider_ZeroSSL(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + db.Create(&models.Setting{Key: "caddy.ssl_provider", Value: "zerossl"}) + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", false, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "zerossl", capturedProvider) + assert.False(t, capturedStaging, "zerossl should use production") +} + +// TestManager_ApplyConfig_SSLProvider_Empty tests empty/missing SSL provider setting +func TestManager_ApplyConfig_SSLProvider_Empty(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + // No SSL provider setting created - should use env var for staging + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + // Set acmeStaging to true via env var simulation + manager := NewManager(client, db, tmpDir, "", true, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "", capturedProvider, "empty should default to auto (both)") + assert.True(t, capturedStaging, "empty should respect env var for staging") +} + +// TestManager_ApplyConfig_SSLProvider_EmptyWithNoStaging tests empty SSL provider with staging=false in env +func TestManager_ApplyConfig_SSLProvider_EmptyWithNoStaging(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", false, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "", capturedProvider) + assert.False(t, capturedStaging, "empty with staging=false should default to production") +} + +// TestManager_ApplyConfig_SSLProvider_Unknown tests unrecognized SSL provider value +func TestManager_ApplyConfig_SSLProvider_Unknown(t *testing.T) { + var capturedProvider string + var capturedStaging bool + + originalGenerateConfig := generateConfigFunc + defer func() { generateConfigFunc = originalGenerateConfig }() + generateConfigFunc = mockGenerateConfigFunc(&capturedProvider, &capturedStaging) + + caddyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/load" && r.Method == "POST" { + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + defer caddyServer.Close() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{})) + + db.Create(&models.Setting{Key: "caddy.ssl_provider", Value: "unknown-provider"}) + + tmpDir := t.TempDir() + client := NewClient(caddyServer.URL) + manager := NewManager(client, db, tmpDir, "", true, config.SecurityConfig{}) + + host := models.ProxyHost{ + DomainNames: "example.com", + ForwardHost: "127.0.0.1", + ForwardPort: 8080, + } + db.Create(&host) + + err = manager.ApplyConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "", capturedProvider, "unknown value should default to auto (both)") + assert.False(t, capturedStaging, "unknown value should default to production (not respect env var)") +} diff --git a/backend/internal/cerberus/cerberus.go b/backend/internal/cerberus/cerberus.go index 8832b90c..cc05d481 100644 --- a/backend/internal/cerberus/cerberus.go +++ b/backend/internal/cerberus/cerberus.go @@ -48,12 +48,18 @@ func (c *Cerberus) IsEnabled() bool { // Check database setting (runtime toggle) only if db is provided if c.db != nil { var s models.Setting + // Check feature flag + if err := c.db.Where("key = ?", "feature.cerberus.enabled").First(&s).Error; err == nil { + return strings.EqualFold(s.Value, "true") + } + // Fallback to legacy setting for backward compatibility if err := c.db.Where("key = ?", "security.cerberus.enabled").First(&s).Error; err == nil { return strings.EqualFold(s.Value, "true") } } - return false + // Default to true (Optional Features spec) + return true } // Middleware returns a Gin middleware that enforces Cerberus checks when enabled. diff --git a/backend/internal/cerberus/cerberus_isenabled_test.go b/backend/internal/cerberus/cerberus_isenabled_test.go index b7b60471..43202bc4 100644 --- a/backend/internal/cerberus/cerberus_isenabled_test.go +++ b/backend/internal/cerberus/cerberus_isenabled_test.go @@ -51,9 +51,19 @@ func TestIsEnabled_CrowdSecModeLocal(t *testing.T) { require.True(t, c.IsEnabled()) } -func TestIsEnabled_DBSetting(t *testing.T) { +func TestIsEnabled_DBSetting_FeatureFlag(t *testing.T) { db := setupDBForTest(t) - // insert setting to database + // Test new feature flag key + s := models.Setting{Key: "feature.cerberus.enabled", Value: "true"} + require.NoError(t, db.Create(&s).Error) + cfg := config.SecurityConfig{} + c := cerberus.New(cfg, db) + require.True(t, c.IsEnabled()) +} + +func TestIsEnabled_DBSetting_LegacyKey(t *testing.T) { + db := setupDBForTest(t) + // Test backward compatibility with legacy key s := models.Setting{Key: "security.cerberus.enabled", Value: "true"} require.NoError(t, db.Create(&s).Error) cfg := config.SecurityConfig{} @@ -61,9 +71,19 @@ func TestIsEnabled_DBSetting(t *testing.T) { require.True(t, c.IsEnabled()) } +func TestIsEnabled_DBSetting_FeatureFlagTakesPrecedence(t *testing.T) { + db := setupDBForTest(t) + // Feature flag should take precedence over legacy key + require.NoError(t, db.Create(&models.Setting{Key: "feature.cerberus.enabled", Value: "false"}).Error) + require.NoError(t, db.Create(&models.Setting{Key: "security.cerberus.enabled", Value: "true"}).Error) + cfg := config.SecurityConfig{} + c := cerberus.New(cfg, db) + require.False(t, c.IsEnabled()) +} + func TestIsEnabled_DBSettingCaseInsensitive(t *testing.T) { db := setupDBForTest(t) - s := models.Setting{Key: "security.cerberus.enabled", Value: "TrUe"} + s := models.Setting{Key: "feature.cerberus.enabled", Value: "TrUe"} require.NoError(t, db.Create(&s).Error) cfg := config.SecurityConfig{} c := cerberus.New(cfg, db) @@ -72,15 +92,16 @@ func TestIsEnabled_DBSettingCaseInsensitive(t *testing.T) { func TestIsEnabled_DBSettingFalse(t *testing.T) { db := setupDBForTest(t) - s := models.Setting{Key: "security.cerberus.enabled", Value: "false"} + s := models.Setting{Key: "feature.cerberus.enabled", Value: "false"} require.NoError(t, db.Create(&s).Error) cfg := config.SecurityConfig{} c := cerberus.New(cfg, db) require.False(t, c.IsEnabled()) } -func TestIsEnabled_DefaultFalse(t *testing.T) { +func TestIsEnabled_DefaultTrue(t *testing.T) { cfg := config.SecurityConfig{} c := cerberus.New(cfg, nil) - require.False(t, c.IsEnabled()) + // Default to true per Optional Features spec + require.True(t, c.IsEnabled()) } diff --git a/backend/internal/cerberus/cerberus_test.go b/backend/internal/cerberus/cerberus_test.go index 0756bd84..6895c5bb 100644 --- a/backend/internal/cerberus/cerberus_test.go +++ b/backend/internal/cerberus/cerberus_test.go @@ -42,6 +42,9 @@ func TestCerberus_IsEnabled_DBSetting(t *testing.T) { func TestCerberus_IsEnabled_Disabled(t *testing.T) { db := setupTestDB(t) + // Per Optional Features spec: when no DB setting exists and no config modes are enabled, + // Cerberus defaults to true (enabled). To test disabled state, we must set DB flag to false. + db.Create(&models.Setting{Key: "feature.cerberus.enabled", Value: "false"}) cfg := config.SecurityConfig{CerberusEnabled: false} cerb := cerberus.New(cfg, db) t.Logf("cfg: %+v", cfg) diff --git a/backend/internal/database/database.go b/backend/internal/database/database.go index 2c343922..8bd0428d 100644 --- a/backend/internal/database/database.go +++ b/backend/internal/database/database.go @@ -1,18 +1,57 @@ package database import ( + "database/sql" "fmt" + "strings" "gorm.io/driver/sqlite" "gorm.io/gorm" ) -// Connect opens a SQLite database connection. +// Connect opens a SQLite database connection with optimized settings. +// Uses WAL mode for better concurrent read/write performance. func Connect(dbPath string) (*gorm.DB, error) { - db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{}) + // Add SQLite performance pragmas if not already present + dsn := dbPath + if !strings.Contains(dsn, "?") { + dsn += "?" + } else { + dsn += "&" + } + // WAL mode: better concurrent access, faster writes + // busy_timeout: wait up to 5s instead of failing immediately on lock + // cache: shared cache for better memory usage + // synchronous=NORMAL: good balance of safety and speed + dsn += "_journal_mode=WAL&_busy_timeout=5000&_synchronous=NORMAL&_cache_size=-64000" + + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{ + // Skip default transaction for single operations (faster) + SkipDefaultTransaction: true, + // Prepare statements for reuse + PrepareStmt: true, + }) if err != nil { return nil, fmt.Errorf("open database: %w", err) } + // Configure connection pool + sqlDB, err := db.DB() + if err != nil { + return nil, fmt.Errorf("get underlying db: %w", err) + } + configurePool(sqlDB) + return db, nil } + +// configurePool sets connection pool settings for SQLite. +// SQLite handles concurrency differently than server databases, +// so we use conservative settings. +func configurePool(sqlDB *sql.DB) { + // SQLite is file-based, so we limit connections + // but keep some idle for reuse + sqlDB.SetMaxOpenConns(1) // SQLite only allows one writer at a time + sqlDB.SetMaxIdleConns(1) // Keep one connection ready + sqlDB.SetConnMaxLifetime(0) // Don't close idle connections +} diff --git a/backend/internal/services/backup_service.go b/backend/internal/services/backup_service.go index ccf803cf..c9788998 100644 --- a/backend/internal/services/backup_service.go +++ b/backend/internal/services/backup_service.go @@ -8,6 +8,7 @@ import ( "path/filepath" "sort" "strings" + "syscall" "time" "github.com/Wikid82/charon/backend/internal/config" @@ -267,3 +268,13 @@ func (s *BackupService) unzip(src, dest string) error { } return nil } + +// GetAvailableSpace returns the available disk space in bytes for the backup directory +func (s *BackupService) GetAvailableSpace() (int64, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(s.BackupDir, &stat); err != nil { + return 0, fmt.Errorf("failed to get disk space: %w", err) + } + // Available blocks * block size = available bytes + return int64(stat.Bavail) * int64(stat.Bsize), nil +} diff --git a/backend/internal/services/certificate_service_test.go b/backend/internal/services/certificate_service_test.go index 4838dfec..2d27b3c0 100644 --- a/backend/internal/services/certificate_service_test.go +++ b/backend/internal/services/certificate_service_test.go @@ -441,6 +441,36 @@ func TestCertificateService_DeleteCertificate_Errors(t *testing.T) { assert.Equal(t, gorm.ErrRecordNotFound, err) }) + t.Run("delete certificate in use returns ErrCertInUse", func(t *testing.T) { + // Create certificate + domain := "in-use.com" + expiry := time.Now().Add(24 * time.Hour) + certPEM := generateTestCert(t, domain, expiry) + cert, err := cs.UploadCertificate("In Use", string(certPEM), "FAKE KEY") + require.NoError(t, err) + + // Create proxy host using this certificate + ph := models.ProxyHost{ + UUID: "test-ph", + Name: "Test Host", + DomainNames: "in-use.com", + ForwardHost: "localhost", + ForwardPort: 8080, + CertificateID: &cert.ID, + } + require.NoError(t, db.Create(&ph).Error) + + // Attempt to delete certificate - should fail with ErrCertInUse + err = cs.DeleteCertificate(cert.ID) + assert.Error(t, err) + assert.Equal(t, ErrCertInUse, err) + + // Verify certificate still exists + var dbCert models.SSLCertificate + err = db.First(&dbCert, "id = ?", cert.ID).Error + assert.NoError(t, err) + }) + t.Run("delete certificate when file already removed", func(t *testing.T) { // Create and upload cert domain := "to-delete.com" @@ -741,6 +771,122 @@ func TestCertificateService_CertificateWithSANs(t *testing.T) { }) } +func TestCertificateService_IsCertificateInUse(t *testing.T) { + tmpDir := t.TempDir() + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared", t.Name()) + db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.SSLCertificate{}, &models.ProxyHost{})) + + cs := newTestCertificateService(tmpDir, db) + + t.Run("certificate not in use", func(t *testing.T) { + // Create certificate without any proxy hosts + domain := "unused.com" + expiry := time.Now().Add(24 * time.Hour) + certPEM := generateTestCert(t, domain, expiry) + cert, err := cs.UploadCertificate("Unused", string(certPEM), "FAKE KEY") + require.NoError(t, err) + + inUse, err := cs.IsCertificateInUse(cert.ID) + assert.NoError(t, err) + assert.False(t, inUse) + }) + + t.Run("certificate used by one proxy host", func(t *testing.T) { + // Create certificate + domain := "used.com" + expiry := time.Now().Add(24 * time.Hour) + certPEM := generateTestCert(t, domain, expiry) + cert, err := cs.UploadCertificate("Used", string(certPEM), "FAKE KEY") + require.NoError(t, err) + + // Create proxy host using this certificate + ph := models.ProxyHost{ + UUID: "ph-1", + Name: "Test Host 1", + DomainNames: "used.com", + ForwardHost: "localhost", + ForwardPort: 8080, + CertificateID: &cert.ID, + } + require.NoError(t, db.Create(&ph).Error) + + inUse, err := cs.IsCertificateInUse(cert.ID) + assert.NoError(t, err) + assert.True(t, inUse) + }) + + t.Run("certificate used by multiple proxy hosts", func(t *testing.T) { + // Create certificate + domain := "shared.com" + expiry := time.Now().Add(24 * time.Hour) + certPEM := generateTestCert(t, domain, expiry) + cert, err := cs.UploadCertificate("Shared", string(certPEM), "FAKE KEY") + require.NoError(t, err) + + // Create multiple proxy hosts using this certificate + for i := 1; i <= 3; i++ { + ph := models.ProxyHost{ + UUID: fmt.Sprintf("ph-shared-%d", i), + Name: fmt.Sprintf("Test Host %d", i), + DomainNames: fmt.Sprintf("host%d.shared.com", i), + ForwardHost: "localhost", + ForwardPort: 8080 + i, + CertificateID: &cert.ID, + } + require.NoError(t, db.Create(&ph).Error) + } + + inUse, err := cs.IsCertificateInUse(cert.ID) + assert.NoError(t, err) + assert.True(t, inUse) + }) + + t.Run("non-existent certificate", func(t *testing.T) { + inUse, err := cs.IsCertificateInUse(99999) + assert.NoError(t, err) // No error, just returns false + assert.False(t, inUse) + }) + + t.Run("certificate freed after proxy host deletion", func(t *testing.T) { + // Create certificate + domain := "freed.com" + expiry := time.Now().Add(24 * time.Hour) + certPEM := generateTestCert(t, domain, expiry) + cert, err := cs.UploadCertificate("Freed", string(certPEM), "FAKE KEY") + require.NoError(t, err) + + // Create proxy host using this certificate + ph := models.ProxyHost{ + UUID: "ph-freed", + Name: "Test Host Freed", + DomainNames: "freed.com", + ForwardHost: "localhost", + ForwardPort: 8080, + CertificateID: &cert.ID, + } + require.NoError(t, db.Create(&ph).Error) + + // Verify in use + inUse, err := cs.IsCertificateInUse(cert.ID) + assert.NoError(t, err) + assert.True(t, inUse) + + // Delete the proxy host + require.NoError(t, db.Delete(&ph).Error) + + // Verify no longer in use + inUse, err = cs.IsCertificateInUse(cert.ID) + assert.NoError(t, err) + assert.False(t, inUse) + + // Now deletion should succeed + err = cs.DeleteCertificate(cert.ID) + assert.NoError(t, err) + }) +} + func TestCertificateService_CacheBehavior(t *testing.T) { t.Run("cache returns consistent results", func(t *testing.T) { tmpDir := t.TempDir() diff --git a/backend/internal/services/uptime_service.go b/backend/internal/services/uptime_service.go index e9a222b5..e39d013d 100644 --- a/backend/internal/services/uptime_service.go +++ b/backend/internal/services/uptime_service.go @@ -3,8 +3,8 @@ package services import ( "context" "encoding/json" + "errors" "fmt" - "github.com/Wikid82/charon/backend/internal/logger" "net" "net/http" "net/url" @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/Wikid82/charon/backend/internal/logger" "github.com/Wikid82/charon/backend/internal/models" "github.com/Wikid82/charon/backend/internal/util" "gorm.io/gorm" @@ -806,6 +807,47 @@ func (s *UptimeService) FlushPendingNotifications() { } } +// SyncMonitorForHost updates the uptime monitor linked to a specific proxy host. +// This should be called when a proxy host is edited to keep the monitor in sync. +// Returns nil if no monitor exists for the host (does not create one). +func (s *UptimeService) SyncMonitorForHost(hostID uint) error { + var host models.ProxyHost + if err := s.DB.First(&host, hostID).Error; err != nil { + return err + } + + var monitor models.UptimeMonitor + if err := s.DB.Where("proxy_host_id = ?", hostID).First(&monitor).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil // No monitor to sync + } + return err + } + + // Update monitor fields based on current proxy host values + domains := strings.Split(host.DomainNames, ",") + firstDomain := "" + if len(domains) > 0 { + firstDomain = strings.TrimSpace(domains[0]) + } + + scheme := "http" + if host.SSLForced { + scheme = "https" + } + + newName := host.Name + if newName == "" { + newName = firstDomain + } + + monitor.Name = newName + monitor.URL = fmt.Sprintf("%s://%s", scheme, firstDomain) + monitor.UpstreamHost = host.ForwardHost + + return s.DB.Save(&monitor).Error +} + // CRUD for Monitors func (s *UptimeService) ListMonitors() ([]models.UptimeMonitor, error) { diff --git a/backend/internal/services/uptime_service_test.go b/backend/internal/services/uptime_service_test.go index 0ff0d262..5fa85341 100644 --- a/backend/internal/services/uptime_service_test.go +++ b/backend/internal/services/uptime_service_test.go @@ -1201,3 +1201,156 @@ func TestFormatDuration(t *testing.T) { assert.Equal(t, tc.expected, result, "formatDuration(%v)", tc.input) } } + +func TestUptimeService_SyncMonitorForHost(t *testing.T) { + t.Run("updates monitor when proxy host is edited", func(t *testing.T) { + db := setupUptimeTestDB(t) + ns := NewNotificationService(db) + us := NewUptimeService(db, ns) + + // Create a proxy host + host := models.ProxyHost{ + UUID: "sync-test-1", + Name: "Original Name", + DomainNames: "original.example.com", + ForwardHost: "10.0.0.1", + ForwardPort: 8080, + SSLForced: false, + Enabled: true, + } + db.Create(&host) + + // Sync monitors to create the uptime monitor + err := us.SyncMonitors() + assert.NoError(t, err) + + // Verify monitor was created with original values + var monitor models.UptimeMonitor + err = db.Where("proxy_host_id = ?", host.ID).First(&monitor).Error + assert.NoError(t, err) + assert.Equal(t, "Original Name", monitor.Name) + assert.Equal(t, "http://original.example.com", monitor.URL) + assert.Equal(t, "10.0.0.1", monitor.UpstreamHost) + + // Update the proxy host + host.Name = "Updated Name" + host.DomainNames = "updated.example.com" + host.ForwardHost = "10.0.0.2" + host.SSLForced = true + db.Save(&host) + + // Call SyncMonitorForHost + err = us.SyncMonitorForHost(host.ID) + assert.NoError(t, err) + + // Verify monitor was updated + err = db.Where("proxy_host_id = ?", host.ID).First(&monitor).Error + assert.NoError(t, err) + assert.Equal(t, "Updated Name", monitor.Name) + assert.Equal(t, "https://updated.example.com", monitor.URL) + assert.Equal(t, "10.0.0.2", monitor.UpstreamHost) + }) + + t.Run("returns nil when no monitor exists", func(t *testing.T) { + db := setupUptimeTestDB(t) + ns := NewNotificationService(db) + us := NewUptimeService(db, ns) + + // Create a proxy host without creating a monitor + host := models.ProxyHost{ + UUID: "no-monitor-test", + Name: "No Monitor Host", + DomainNames: "nomonitor.example.com", + ForwardHost: "10.0.0.3", + ForwardPort: 8080, + Enabled: true, + } + db.Create(&host) + + // Call SyncMonitorForHost - should return nil without error + err := us.SyncMonitorForHost(host.ID) + assert.NoError(t, err) + + // Verify no monitor was created + var count int64 + db.Model(&models.UptimeMonitor{}).Where("proxy_host_id = ?", host.ID).Count(&count) + assert.Equal(t, int64(0), count) + }) + + t.Run("returns error when host does not exist", func(t *testing.T) { + db := setupUptimeTestDB(t) + ns := NewNotificationService(db) + us := NewUptimeService(db, ns) + + // Call SyncMonitorForHost with non-existent host ID + err := us.SyncMonitorForHost(99999) + assert.Error(t, err) + }) + + t.Run("uses domain name when proxy host name is empty", func(t *testing.T) { + db := setupUptimeTestDB(t) + ns := NewNotificationService(db) + us := NewUptimeService(db, ns) + + // Create a proxy host with a name + host := models.ProxyHost{ + UUID: "empty-name-test", + Name: "Has Name", + DomainNames: "domain.example.com", + ForwardHost: "10.0.0.4", + ForwardPort: 8080, + Enabled: true, + } + db.Create(&host) + + // Sync monitors + err := us.SyncMonitors() + assert.NoError(t, err) + + // Clear the host name + host.Name = "" + db.Save(&host) + + // Call SyncMonitorForHost + err = us.SyncMonitorForHost(host.ID) + assert.NoError(t, err) + + // Verify monitor uses domain name + var monitor models.UptimeMonitor + err = db.Where("proxy_host_id = ?", host.ID).First(&monitor).Error + assert.NoError(t, err) + assert.Equal(t, "domain.example.com", monitor.Name) + }) + + t.Run("handles multiple domains correctly", func(t *testing.T) { + db := setupUptimeTestDB(t) + ns := NewNotificationService(db) + us := NewUptimeService(db, ns) + + // Create a proxy host with multiple domains + host := models.ProxyHost{ + UUID: "multi-domain-test", + Name: "Multi Domain", + DomainNames: "first.example.com, second.example.com, third.example.com", + ForwardHost: "10.0.0.5", + ForwardPort: 8080, + SSLForced: true, + Enabled: true, + } + db.Create(&host) + + // Sync monitors + err := us.SyncMonitors() + assert.NoError(t, err) + + // Call SyncMonitorForHost + err = us.SyncMonitorForHost(host.ID) + assert.NoError(t, err) + + // Verify monitor uses first domain + var monitor models.UptimeMonitor + err = db.Where("proxy_host_id = ?", host.ID).First(&monitor).Error + assert.NoError(t, err) + assert.Equal(t, "https://first.example.com", monitor.URL) + }) +} diff --git a/docs/api.md b/docs/api.md index 9aff26d5..f4731de0 100644 --- a/docs/api.md +++ b/docs/api.md @@ -187,6 +187,95 @@ Response 200: `{ "deleted": true }` --- +### SSL Certificates + +#### List All Certificates + +```http +GET /certificates +``` + +**Response 200:** +```json +[ + { + "id": 1, + "uuid": "cert-uuid-123", + "name": "My Custom Cert", + "provider": "custom", + "domains": "example.com, www.example.com", + "expires_at": "2026-01-01T00:00:00Z", + "created_at": "2025-01-01T10:00:00Z" + } +] +``` + +#### Upload Certificate + +```http +POST /certificates/upload +Content-Type: multipart/form-data +``` + +**Request Body:** +- `name` (required) - Certificate name +- `certificate_file` (required) - Certificate file (.crt or .pem) +- `key_file` (required) - Private key file (.key or .pem) + +**Response 201:** +```json +{ + "id": 1, + "uuid": "cert-uuid-123", + "name": "My Custom Cert", + "provider": "custom", + "domains": "example.com" +} +``` + +#### Delete Certificate + +Delete a certificate. Requires that the certificate is not currently in use by any proxy hosts. + +```http +DELETE /certificates/:id +``` + +**Parameters:** +- `id` (path) - Certificate ID (numeric) + +**Response 200:** +```json +{ + "message": "certificate deleted" +} +``` + +**Response 400:** +```json +{ + "error": "invalid id" +} +``` + +**Response 409:** +```json +{ + "error": "certificate is in use by one or more proxy hosts" +} +``` + +**Response 500:** +```json +{ + "error": "failed to delete certificate" +} +``` + +**Note:** A backup is automatically created before deletion. The certificate files are removed from disk along with the database record. + +--- + ### Proxy Hosts #### List All Proxy Hosts diff --git a/docs/features.md b/docs/features.md index 2cf9ec57..bb6962a9 100644 --- a/docs/features.md +++ b/docs/features.md @@ -4,6 +4,54 @@ Here's everything Charon can do for you, explained simply. --- +## \u2699\ufe0f Optional Features + +Charon includes optional features that can be toggled on or off based on your needs. All features are enabled by default, giving you the full Charon experience from the start. + +### What Are Optional Features? + +**What it does:** Lets you enable or disable major features like security monitoring and uptime checks. + +**Why you care:** If you don't need certain features, turning them off keeps your sidebar cleaner and saves system resources. + +**Where to find it:** Go to **System Settings** → Scroll to **Optional Features** + +### Available Optional Features + +#### Cerberus Security Suite +- **What it is:** Complete security system including CrowdSec integration, country blocking, WAF protection, and access control +- **When enabled:** Security menu appears in sidebar, all protection features are active +- **When disabled:** Security menu is hidden, all protection stops, but configuration data is preserved +- **Default:** Enabled + +#### Uptime Monitoring +- **What it is:** Background checks that monitor if your websites are responding +- **When enabled:** Uptime menu appears in sidebar, automatic checks run every minute +- **When disabled:** Uptime menu is hidden, background checks stop, but uptime history is preserved +- **Default:** Enabled + +### What Happens When Disabled? + +When you disable a feature: + +- ✅ **Sidebar item is hidden** — Keeps your navigation clean +- ✅ **Background jobs stop** — Saves CPU and memory resources +- ✅ **API requests are blocked** — Feature-specific endpoints return appropriate errors +- ✅ **Configuration data is preserved** — Your settings remain intact if you re-enable the feature + +**Important:** Disabling a feature does NOT delete your data. All your security rules, uptime history, and configurations stay safe in the database. You can re-enable features at any time without losing anything. + +### How to Toggle Features + +1. Go to **System Settings** +2. Scroll to the **Optional Features** section +3. Toggle the switch for the feature you want to enable/disable +4. Changes take effect immediately + +**Note:** Both features default to enabled when you first install Charon. This gives you full functionality out of the box. + +--- + ## \ud83d\udd10 SSL Certificates (The Green Lock) **What it does:** Makes browsers show a green lock next to your website address. @@ -11,6 +59,52 @@ Here's everything Charon can do for you, explained simply. **Why you care:** Without it, browsers scream "NOT SECURE!" and people won't trust your site. **What you do:** Nothing. Charon gets free certificates from Let's Encrypt and renews them automatically. +### Choose Your SSL Provider + +**What it does:** Lets you select which Certificate Authority (CA) issues your SSL certificates. + +**Why you care:** Different providers have different rate limits and reliability. You also get a staging option for testing. + +**Where to find it:** Go to System Settings → SSL Provider dropdown + +**Available options:** + +- **Auto (Recommended)** — The smart default. Tries Let's Encrypt first, automatically falls back to ZeroSSL if there are any issues. Best reliability with zero configuration. + +- **Let's Encrypt (Prod)** — Uses only Let's Encrypt production servers. Choose this if you specifically need Let's Encrypt certificates and have no rate limit concerns. + +- **Let's Encrypt (Staging)** — For testing purposes only. Issues certificates that browsers won't trust, but lets you test your configuration without hitting rate limits. See [Testing SSL Certificates](acme-staging.md) for details. + +- **ZeroSSL** — Uses only ZeroSSL as your certificate provider. Choose this if you prefer ZeroSSL or are hitting Let's Encrypt rate limits. + +**Recommended setting:** Leave it on "Auto (Recommended)" unless you have a specific reason to change it. The auto mode gives you the best of both worlds—Let's Encrypt's speed with ZeroSSL as a backup. + +**When to change it:** +- Testing configurations → Use "Let's Encrypt (Staging)" +- Hitting rate limits → Switch to "ZeroSSL" +- Specific CA requirement → Choose that specific provider +- Otherwise → Keep "Auto" +### Smart Certificate Cleanup + +**What it does:** When you delete websites, Charon asks if you want to delete unused certificates too. + +**Why you care:** Custom and staging certificates can pile up over time. This helps you keep things tidy. + +**How it works:** +- Delete a website → Charon checks if its certificate is used elsewhere +- If the certificate is custom or staging (not Let's Encrypt) and orphaned → you get a prompt +- Choose to keep or delete the certificate +- Default is "keep" (safe choice) + +**When it prompts:** +- ✅ Custom certificates you uploaded +- ✅ Staging certificates (for testing) +- ❌ Let's Encrypt certificates (managed automatically) + +**What you do:** +- See the prompt after clicking Delete on a proxy host +- Check the box if you want to delete the orphaned certificate +- Leave unchecked to keep the certificate (in case you need it later) --- @@ -137,6 +231,18 @@ When you change security settings, you see Cerberus—the three-headed guard dog --- +## \ud83d\udcca Uptime Monitoring + +**What it does:** Automatically checks if your websites are responding every minute. + +**Why you care:** Get visibility into uptime history and response times for all your proxy hosts. + +**What you do:** View the "Uptime" page in the sidebar. Uptime checks run automatically in the background. + +**Optional:** You can disable this feature in System Settings → Optional Features if you don't need it. Your uptime history will be preserved. + +--- + ## \ud83d\udccb Logs & Monitoring **What it does:** Shows you what's happening with your proxy. @@ -165,17 +271,7 @@ When you change security settings, you see Cerberus—the three-headed guard dog **What you do:** Nothing—WebSockets work automatically. ---- -## \ud83d\udcca Uptime Monitoring (Coming Soon) - -**What it does:** Checks if your websites are responding. - -**Why you care:** Get notified when something goes down. - -**Status:** Coming in a future update. - ---- ## \ud83d\udcf1 Mobile-Friendly Interface diff --git a/docs/getting-started.md b/docs/getting-started.md index 45efbc48..19c3995b 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -93,12 +93,14 @@ For this to work, you need: If you have both, Charon will automatically: -- Request a free SSL certificate from Let's Encrypt +- Request a free SSL certificate from a trusted provider - Install it - Renew it before it expires **You don't do anything.** It just works. +By default, Charon uses "Auto" mode, which tries Let's Encrypt first and automatically falls back to ZeroSSL if needed. You can change this in System Settings if you want to use a specific certificate provider. + **Testing without a domain?** See [Testing SSL Certificates](acme-staging.md) for a practice mode. --- @@ -125,6 +127,10 @@ In your domain provider's control panel: Wait 5-10 minutes for it to update. +### "Can I change which certificate provider is used?" + +Yes! Go to **System Settings** and look for the **SSL Provider** dropdown. The default "Auto" mode works best for most users, but you can choose a specific provider if needed. See [Features](features.md#choose-your-ssl-provider) for details. + ### "Can I use this for apps on different computers?" Yes! Just use the other computer's IP address in the "Forward To" field. @@ -148,7 +154,8 @@ Now that you have the basics: - **[See All Features](features.md)** — Discover what else Charon can do - **[Import Your Old Config](import-guide.md)** — Bring your existing Caddy setup -- **[Turn On Security](security.md)** — Block attackers (optional but recommended) +- **[Configure Optional Features](features.md#%EF%B8%8F-optional-features)** — Enable/disable features like security and uptime monitoring +- **[Turn On Security](security.md)** — Block attackers (enabled by default, highly recommended) --- diff --git a/docs/issues/hectate.md b/docs/issues/hectate.md new file mode 100644 index 00000000..0672a627 --- /dev/null +++ b/docs/issues/hectate.md @@ -0,0 +1,168 @@ +# Hecate: Tunnel & Pathway Manager + +## 1. Overview +**Hecate** is the internal module within Charon responsible for managing third-party tunneling services. It serves as the "Goddess of Pathways," allowing Charon to route traffic not just to local ports, but through encrypted tunnels to remote networks without exposing ports on the public internet. + +## 2. Architecture + +Hecate is not a separate binary; it is a **Go package** (`internal/hecate`) running within the main Charon daemon. + +### 2.1 The Provider Interface +To support multiple services (Tailscale, Cloudflare, Netbird), Hecate uses a strict Interface pattern. + +```go +type TunnelProvider interface { + // Name returns the unique ID of the provider (e.g., "tailscale-01") + Name() string + + // Status returns the current health (Connected, Connecting, Error) + Status() TunnelState + + // Start initiates the tunnel daemon + Start(ctx context.Context) error + + // Stop gracefully terminates the connection + Stop() error + + // GetAddress returns the internal IP/DNS routed through the tunnel + GetAddress() string +} +``` + +### 2.2 Supported Integrations (Phase 1) + +#### Cloudflare Tunnels (cloudflared) +- **Mechanism**: Charon manages the `cloudflared` binary via `os/exec`. +- **Config**: User provides the Token via the UI. +- **Outcome**: Exposes Charon directly to the edge without opening port 80/443 on the router. + +#### Tailscale / Headscale +- **Mechanism**: Uses `tsnet` (Tailscale's Go library) to embed the node directly into Charon, OR manages the `tailscaled` socket. +- **Outcome**: Charon becomes a node on the Mesh VPN. + +## 3. Dashboard Implementation (Unified UI) + +**Hecate does NOT have a separate "Tunnels" tab.** +Instead, it is fully integrated into the **Remote Servers** dashboard to provide a unified experience for managing connectivity. + +### 3.1 "Add Server" Workflow +When a user clicks "Add Server" in the dashboard, they are presented with a **Connection Type** dropdown that determines how Charon reaches the target. + +#### Connection Types: +1. **Direct / Manual (Existing)** + * **Use Case**: The server is on the same LAN or reachable via a static IP/DNS. + * **Fields**: `Host`, `Port`, `TLS Toggle`. + * **Backend**: Standard TCP dialer. + +2. **Orthrus Agent (New)** + * **Use Case**: The server is behind a NAT/Firewall and cannot accept inbound connections. + * **Workflow**: + * User selects "Orthrus Agent". + * Charon generates a unique `AUTH_KEY`. + * UI displays a `docker-compose.yml` snippet pre-filled with the key and `CHARON_LINK`. + * User deploys the agent on the remote host. + * Hecate waits for the incoming WebSocket connection. + +3. **Cloudflare Tunnel (Future)** + * **Use Case**: Exposing a service via Cloudflare's edge network. + * **Fields**: `Tunnel Token`. + * **Backend**: Hecate spawns/manages the `cloudflared` process. + +### 3.2 Hecate's Role +Hecate acts as the invisible backend engine for these non-direct connection types. It manages the lifecycle of the tunnels and agents, while the UI simply shows the status (Online/Offline) of the "Server". + +### 3.3 Install Options & UX Snippets +When a user selects `Orthrus Agent` or chooses a `Managed Tunnel` flow, the UI should offer multiple installation options so both containerized and non-containerized environments are supported. + +Provide these install options as tabs/snippets in the `Add Server` flow: + +- **Docker Compose**: A one-file snippet the user can copy/paste (already covered in `orthrus` docs). +- **Standalone Binary + systemd**: Download URL, SHA256, install+`systemd` unit snippet for Linux hosts. +- **Tarball + Installer**: For offline installs with checksum verification. +- **Deb / RPM**: `apt`/`yum` install commands (when packages are available). +- **Homebrew**: `brew tap` + `brew install` for macOS / Linuxbrew users. +- **Kubernetes DaemonSet**: YAML for fleet or cluster-based deployments. + +UI Requirements: +- Show the generated `AUTH_KEY` prominently and a single-copy button. +- Provide checksum and GPG signature links for any downloadable artifact. +- Offer a small troubleshooting panel with commands like `journalctl -u orthrus -f` and `systemctl status orthrus`. +- Allow the user to copy a recommended sidecar snippet that runs a VPN client (e.g., Tailscale) next to Orthrus when desired. + + +## 4. API Endpoints +- `GET /api/hecate/status` - Returns health of all tunnels. +- `POST /api/hecate/configure` - Accepts auth tokens and provider types. +- `POST /api/hecate/logs` - Streams logs from the underlying tunnel binary (e.g., cloudflared logs) for debugging. + +## 5. Security (Cerberus Integration) +Traffic entering through Hecate must still pass through Cerberus. +- Tunnels terminate **before** the middleware chain. +- Requests from a Cloudflare Tunnel are tagged `source:tunnel` and subjected to the same WAF rules as standard traffic. + +## 6. Implementation Details + +### 6.1 Process Supervision +Hecate will act as a process supervisor for external binaries like `cloudflared`. +- **Supervisor Pattern**: A `TunnelManager` struct will maintain a map of active `TunnelProvider` instances. +- **Lifecycle**: + - On startup, `TunnelManager` loads enabled configs from the DB. + - It launches the binary using `os/exec`. + - It monitors the process state. If the process exits unexpectedly, it triggers a **Restart Policy** (Exponential Backoff: 5s, 10s, 30s, 1m). +- **Graceful Shutdown**: When Charon shuts down, Hecate must send `SIGTERM` to all child processes and wait (with timeout) for them to exit. + +### 6.2 Secrets Management +API tokens and sensitive credentials must not be stored in plaintext. +- **Encryption**: Sensitive fields (like Cloudflare Tokens) will be encrypted at rest in the SQLite database using AES-GCM. +- **Key Management**: An encryption key will be generated on first run and stored in `data/keys/hecate.key` (secured with 600 permissions), or provided via `CHARON_SECRET_KEY` env var. + +### 6.3 Logging & Observability +- **Capture**: The `TunnelProvider` implementation will attach to the `Stdout` and `Stderr` pipes of the child process. +- **Storage**: + - **Hot Logs**: A circular buffer (Ring Buffer) in memory (last 1000 lines) for real-time dashboard viewing. + - **Cold Logs**: Rotated log files stored in `data/logs/tunnels/.log`. +- **Streaming**: The frontend will consume logs via a WebSocket endpoint (`/api/ws/hecate/logs/:id`) or Server-Sent Events (SSE) to display real-time output. + +### 6.4 Frontend Components +- **TunnelStatusBadge**: Visual indicator (Green=Connected, Yellow=Starting, Red=Error/Stopped). +- **LogViewer**: A terminal-like component (using `xterm.js` or a virtualized list) to display the log stream. +- **ConfigForm**: A dynamic form that renders fields based on the selected provider (e.g., "Token" for Cloudflare, "Auth Key" for Tailscale). + +## 7. Database Schema + +We will introduce a new GORM model `TunnelConfig` in `internal/models`. + +```go +package models + +import ( + "time" + "github.com/google/uuid" + "gorm.io/datatypes" +) + +type TunnelProviderType string + +const ( + ProviderCloudflare TunnelProviderType = "cloudflare" + ProviderTailscale TunnelProviderType = "tailscale" +) + +type TunnelConfig struct { + ID uuid.UUID `gorm:"type:uuid;primaryKey" json:"id"` + Name string `gorm:"not null" json:"name"` // User-friendly name (e.g., "Home Lab Tunnel") + Provider TunnelProviderType `gorm:"not null" json:"provider"` + + // EncryptedCredentials stores the API token or Auth Key. + // It is encrypted at rest and decrypted only when starting the process. + EncryptedCredentials []byte `gorm:"not null" json:"-"` + + // Configuration stores provider-specific settings (JSON). + // e.g., Cloudflare specific flags, region settings, etc. + Configuration datatypes.JSON `json:"configuration"` + + IsActive bool `gorm:"default:false" json:"is_active"` // User's desired state + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} +``` diff --git a/docs/issues/orthrus.md b/docs/issues/orthrus.md new file mode 100644 index 00000000..d503d403 --- /dev/null +++ b/docs/issues/orthrus.md @@ -0,0 +1,236 @@ +# Orthrus: Remote Socket Proxy Agent + +## 1. Overview +**Orthrus** is a lightweight, standalone agent designed to run on remote servers. Named after the brother of Cerberus, its job is to guard the remote resource and securely transport it back to Charon. + +It eliminates the need for SSH tunneling or complex port forwarding by utilizing the tunneling protocols managed by Hecate. + +## 2. Operational Logic +Orthrus operates in **Reverse Mode**. It does not listen on a public port. Instead, it dials *out* to the tunneling network to connect with Charon. +++- + +### 2.1 Core Functions +1. **Docker Socket Proxy:** Securely proxies the remote server's `/var/run/docker.sock` so Charon can auto-discover containers on the remote host. +2. **Service Proxy:** Proxies specific localhost ports (e.g., a database on port 5432) over the tunnel. + +## 3. Technical Implementation + +### 3.1 Tech Stack +* **Language:** Go (Golang) +* **Base Image:** `scratch` or `alpine` (Goal: < 20MB image size) + +### 3.2 Configuration (Environment Variables) +Orthrus is configured entirely via Environment Variables for easy Docker Compose deployment. + +| Variable | Description | +| :--- | :--- | +| `ORTHRUS_NAME` | Unique identifier for this agent (e.g., `vps-london-01`) | +| `ORTHRUS_MODE` | `socket` (Docker Socket) or `port` (Specific Port) | +| `CHARON_LINK` | The IP/DNS of the main Charon server (e.g., `100.x.y.z:8080` or `charon.example.com`) | +| `AUTH_KEY` | A shared secret or JWT generated by Charon to authorize this agent | + +### 3.3 External Connectivity +**Orthrus does NOT manage VPNs or network tunnels internally.** + +It relies entirely on the host operating system for network connectivity. +1. **User Responsibility**: The user must ensure the host running Orthrus can reach the `CHARON_LINK` address. +2. **VPNs**: If you are using Tailscale, WireGuard, or ZeroTier, you must install and configure the VPN client on the **Host OS** (or a sidecar container). Orthrus simply dials the IP provided in `CHARON_LINK`. +3. **Reverse Mode**: Orthrus initiates the connection. Charon waits for the incoming handshake. This means you do not need to open inbound ports on the Orthrus side, but Charon must be reachable. + +### 3.4 The "Leash" Protocol (Communication) +Orthrus communicates with Charon via a custom gRPC stream or WebSocket called "The Leash." + +1. **Handshake**: Orthrus connects to `Charon:InternalIP`. +2. **Auth**: Orthrus presents the `AUTH_KEY`. +3. **Registration**: Orthrus tells Charon: *"I have access to Docker Network X and Port Y."* +4. **Tunneling**: Charon requests a resource; Orthrus pipes the data securely over "The Leash." + +## 4. Deployment Example (Docker Compose) + +```yaml +services: + orthrus: + image: wikid82/orthrus:latest + container_name: orthrus-agent + restart: always + environment: + - ORTHRUS_NAME=remote-media-server + - CHARON_LINK=100.x.y.z:8080 + - AUTH_KEY=ch_xxxxx_secret + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + # No ports required! +``` + +## 5. Security Considerations +* **Read-Only Socket**: By default, Orthrus mounts the Docker socket as Read-Only to prevent Charon (or a compromised Charon) from destroying the remote server. +* **Mutual TLS (mTLS)**: All communication between Charon and Orthrus should be encrypted via mTLS if not running inside an encrypted VPN (like Tailscale). + +## 6. Implementation Details + +### 6.1 Communication Architecture +Orthrus uses a **Reverse Tunnel** architecture established via **WebSockets** with **Yamux** multiplexing. + +1. **Transport**: Secure WebSocket (`wss://`) initiates the connection from Orthrus to Charon. This bypasses inbound firewall rules on the remote network. +2. **Multiplexing**: [Yamux](https://github.com/hashicorp/yamux) is used over the WebSocket stream to create multiple logical channels. + * **Control Channel (Stream ID 0)**: Handles heartbeats, configuration updates, and command signals. + * **Data Channels (Stream ID > 0)**: Ephemeral streams created for each proxied request (e.g., a single HTTP request to the Docker socket or a TCP connection to a database). + +### 6.2 Authentication & Security +* **Token-Based Handshake**: The `AUTH_KEY` is passed in the `Authorization` header during the WebSocket Upgrade request. +* **mTLS (Mutual TLS)**: + * **Charon as CA**: Charon maintains an internal Certificate Authority. + * **Enrollment**: On first connect with a valid `AUTH_KEY`, Orthrus generates a private key and sends a CSR. Charon signs it and returns the certificate. + * **Rotation**: Orthrus monitors certificate expiry and initiates a renewal request over the Control Channel 24 hours before expiration. +* **Encryption**: All traffic is TLS 1.3 encrypted. + +### 6.3 Docker Socket Proxying (The "Muzzle") +To prevent security risks, Orthrus does not blindly pipe traffic to `/var/run/docker.sock`. It implements an application-level filter (The "Muzzle"): +1. **Parser**: Intercepts HTTP requests destined for the socket. +2. **Allowlist**: Only permits safe methods/endpoints (e.g., `GET /v1.xx/containers/json`, `GET /v1.xx/info`). +3. **Blocking**: Rejects `POST`, `DELETE`, `PUT` requests (unless explicitly configured to allow specific actions like "Restart Container") with a `403 Forbidden`. + +### 6.4 Heartbeat & Health +* **Mechanism**: Orthrus sends a custom "Ping" packet over the Control Channel every 5 seconds. +* **Timeout**: Charon expects a "Ping" within 10 seconds. If missed, the agent is marked `Offline`. +* **Reconnection**: Orthrus implements exponential backoff (1s, 2s, 4s... max 30s) to reconnect if the link is severed. + +## 7. Protocol Specification ("The Leash") + +### 7.1 Handshake +```http +GET /api/v1/orthrus/connect HTTP/1.1 +Host: charon.example.com +Upgrade: websocket +Connection: Upgrade +Authorization: Bearer +X-Orthrus-Version: 1.0.0 +X-Orthrus-ID: +``` + +### 7.2 Message Types (Control Channel) +Messages are Protobuf-encoded for efficiency. + +* `HEARTBEAT`: `{ timestamp: int64, load_avg: float, memory_usage: int }` +* `PROXY_REQUEST`: Sent by Charon to request a new stream. `{ stream_id: int, target_type: "docker"|"tcp", target_addr: "localhost:5432" }` +* `CONFIG_UPDATE`: Sent by Charon to update allowlists or rotation policies. + +### 7.3 Data Flow +1. **Charon** receives a request for a remote container (e.g., user views logs). +2. **Charon** sends `PROXY_REQUEST` on Control Channel. +3. **Orthrus** accepts, opens a new Yamux stream. +4. **Orthrus** dials the local Docker socket. +5. **Orthrus** pipes the stream, applying "The Muzzle" filter in real-time. + +## 8. Repository Structure (Monorepo) + +Orthrus resides in the **same repository** as Charon to ensure protocol synchronization and simplified CI/CD. + +### 8.1 Directory Layout +To maintain a lightweight footprint (< 20MB), Orthrus uses a separate Go module within the `agent/` directory. This prevents it from inheriting Charon's heavy backend dependencies (GORM, SQLite, etc.). + +```text +/projects/Charon +├── go.work # Manages the workspace (includes ./backend and ./agent) +├── backend/ # The Main Server (Heavy) +│ ├── go.mod +│ └── ... +├── agent/ # Orthrus (Lightweight) +│ ├── go.mod # Separate dependencies (Standard Lib + Yamux) +│ ├── main.go +│ └── Dockerfile # Separate build process +└── protocol/ # Shared Definitions (Protobufs) + ├── go.mod + └── leash.proto +``` + +### 8.2 Build Strategy +* **Charon**: Built from `backend/Dockerfile`. +* **Orthrus**: Built from `agent/Dockerfile`. +* **CI/CD**: A single GitHub Action workflow builds and pushes both images (`charon:latest` and `orthrus:latest`) synchronously. + +## 9. Packaging & Install Options + +Orthrus should be distributed in multiple formats so users can choose one that fits their environment and security posture. + +### 9.1 Supported Distribution Formats +- **Docker / Docker Compose**: easiest for container-based hosts. +- **Standalone static binary (recommended)**: small, copy to `/usr/local/bin`, run via `systemd`. +- **Deb / RPM packages**: for managed installs via `apt`/`yum`. +- **Homebrew formula**: for macOS / Linuxbrew users. +- **Tarball with installer**: for offline or custom installs. +- **Kubernetes DaemonSet**: for fleet deployment inside clusters. + +### 9.2 Quick Install Snippets (copyable) + +1) Docker Compose + +```yaml +version: "3.8" +services: + orthrus: + image: wikid82/orthrus:latest + restart: always + environment: + - ORTHRUS_NAME=remote-media-server + - CHARON_LINK=100.x.y.z:8080 + - AUTH_KEY=REPLACE_WITH_AUTH_KEY + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +2) Standalone binary + `systemd` (Linux) + +```bash +# download and install +curl -L https://example.com/orthrus/latest/orthrus-linux-amd64 -o /usr/local/bin/orthrus +chmod +x /usr/local/bin/orthrus + +# systemd unit (/etc/systemd/system/orthrus.service) +cat > /etc/systemd/system/orthrus.service <<'EOF' +[Unit] +Description=Orthrus agent +After=network.target + +[Service] +Environment=ORTHRUS_NAME=remote-media-server +Environment=CHARON_LINK=100.x.y.z:8080 +Environment=AUTH_KEY=REPLACE_WITH_AUTH_KEY +ExecStart=/usr/local/bin/orthrus +Restart=on-failure +User=root + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable --now orthrus +``` + +3) Tarball + install script + +```bash +curl -L -o orthrus.tar.gz https://example.com/orthrus/vX.Y.Z/orthrus-linux-amd64.tar.gz +sha256sum orthrus.tar.gz # compare with UI-provided hash +tar -xzf orthrus.tar.gz -C /usr/local/bin +chmod +x /usr/local/bin/orthrus +# then use the systemd unit above +``` + +4) Homebrew (macOS / Linuxbrew) + +``` +brew tap wikid82/charon +brew install orthrus +``` + +5) Kubernetes DaemonSet + +Provide a DaemonSet YAML referencing the `orthrus` image and the required env vars (`AUTH_KEY`, `CHARON_LINK`), optionally mounting the Docker socket or using hostNetworking. + +### 9.3 Security & UX Notes +- Provide SHA256 checksums and GPG signatures for binary downloads. +- Avoid recommending `curl | sh`; prefer explicit steps and checksum verification. +- The Hecate UI should present each snippet as a selectable tab with a copy button and an inline checksum. +- Offer a one-click `AUTH_KEY` regenerate action in the UI and mark old keys revoked. diff --git a/docs/plans/current_spec.md b/docs/plans/current_spec.md index 0ad50afb..23ada78b 100644 --- a/docs/plans/current_spec.md +++ b/docs/plans/current_spec.md @@ -1,98 +1,66 @@ -## 📋 Plan: Security Hardening, User Gateway & Identity +# Plan: Refactor Feature Flags to Optional Features -### 🧐 UX & Context Analysis +## Overview +Refactor the existing "Feature Flags" system into a user-friendly "Optional Features" section in System Settings. This involves renaming, consolidating toggles (Cerberus, Uptime), and enforcing behavior (hiding sidebar items, stopping background jobs) when features are disabled. -This plan expands on the initial security hardening to include a full **Identity Provider (IdP)** feature set. This allows Charon to manage users, invite them via email, and let them log in using external providers (SSO), while providing seamless access to downstream apps. +## User Requirements +1. **Rename**: 'Feature Flags' -> 'Optional Features'. +2. **Cerberus**: Move global toggle to 'Optional Features'. +3. **Uptime**: Add toggle to 'Optional Features'. +4. **Cleanup**: Remove unused flags (`feature.global.enabled`, `feature.notifications.enabled`, `feature.docker.enabled`). +5. **Behavior**: + - **Default**: Cerberus and Uptime ON. + - **OFF State**: Hide from Sidebar, stop background jobs, block notifications. + - **Persistence**: Do NOT delete data when disabled. -#### 1. The User Gateway (Forward Auth) -* **Scenario:** Admin shares `jellyseerr.example.com` with a friend. -* **Flow:** - 1. Friend visits `jellyseerr.example.com`. - 2. Redirected to Charon Login. - 3. Logs in via **Plex / Google / GitHub** OR Local Account. - 4. Charon verifies access. - 5. Charon redirects back to Jellyseerr, injecting `X-Forwarded-User: friend@email.com`. - 6. **Magic:** Jellyseerr (configured for header auth) sees the header and logs the friend in automatically. **No second login.** +## Implementation Details -#### 2. User Onboarding (SMTP & Invites) -* **Problem:** Admin shouldn't set passwords manually. -* **Solution:** Admin enters email -> Charon sends Invite Link -> User clicks link -> User sets Password & Name. +### 1. Backend Changes -#### 3. User-Centric Permissions (Allow/Block Lists) -* **Concept:** Instead of managing groups, Admin manages permissions *per user*. -* **UX:** - * Go to **Users** -> Edit User -> **Permissions** Tab. - * **Mode:** Toggle between **"Allow All (Blacklist)"** or **"Deny All (Whitelist)"**. - * **Exceptions:** Multi-select list of Proxy Hosts. - * *Example:* Set Mode to "Deny All", select "Jellyseerr". User can ONLY access Jellyseerr. - * *Example:* Set Mode to "Allow All", select "Home Assistant". User can access everything EXCEPT Home Assistant. +#### `backend/internal/api/handlers/feature_flags_handler.go` +- Update `defaultFlags` list: + - Keep: `feature.cerberus.enabled`, `feature.uptime.enabled` + - Remove: `feature.global.enabled`, `feature.notifications.enabled`, `feature.docker.enabled` +- Ensure defaults are `true` if not set in DB or Env. -### 🤝 Handoff Contract (The Truth) +#### `backend/internal/cerberus/cerberus.go` +- Update `IsEnabled()` to check `feature.cerberus.enabled` instead of `security.cerberus.enabled`. +- Maintain backward compatibility or migrate existing setting if necessary (or just switch to the new key). -#### 1. Auth Verification (Internal API for Caddy) -* **Endpoint:** `GET /api/auth/verify` -* **Response Headers:** - * `X-Forwarded-User`: The user's email or username. - * `X-Forwarded-Groups`: (Future) User roles/groups. +#### `backend/internal/api/routes/routes.go` +- **Uptime Background Job**: + - In the `go func()` that runs the ticker: + - Check `feature.uptime.enabled` before running `uptimeService.CheckAll()`. + - If disabled, skip the check. +- **Cerberus Middleware**: + - The middleware already calls `IsEnabled()`, so updating `cerberus.go` is sufficient. -#### 2. SMTP Configuration -```json -// POST /api/settings/smtp -{ - "host": "smtp.gmail.com", - "port": 587, - "username": "admin@example.com", - "password": "app-password", - "from_address": "Charon ", - "encryption": "starttls" // none, ssl, starttls -} -``` +### 2. Frontend Changes -#### 3. User Permissions -```json -// POST /api/users -{ - "email": "friend@example.com", - "role": "user", - "permission_mode": "deny_all", // or "allow_all" - "permitted_hosts": [1, 4, 5] // List of ProxyHost IDs to treat as exceptions -} -``` +#### `frontend/src/pages/SystemSettings.tsx` +- **Rename Card**: Change "Feature Flags" to "Optional Features". +- **Consolidate Toggles**: + - Remove "Enable Cerberus Security" from "General Configuration". + - Render specific toggles for "Cerberus Security" and "Uptime Monitoring" in the "Optional Features" card. + - Use `feature.cerberus.enabled` and `feature.uptime.enabled` keys. + - Add user-friendly descriptions for each. +- **Remove Generic List**: Instead of iterating over all keys, explicitly render the supported optional features to control order and presentation. -### 🏗️ Phase 1: Security Hardening (Quick Wins) -1. **Secure Headers:** `Content-Security-Policy`, `Strict-Transport-Security`, `X-Frame-Options`. -2. **Cookie Security:** `HttpOnly`, `Secure`, `SameSite=Strict`. +#### `frontend/src/components/Layout.tsx` +- **Fetch Flags**: Use `getFeatureFlags` (or a new hook) to get current state. +- **Conditional Rendering**: + - Hide "Uptime" nav item if `feature.uptime.enabled` is false. + - Hide "Security" nav group if `feature.cerberus.enabled` is false. -### 🏗️ Phase 2: Backend Core (User & SMTP) -1. **Models:** - * `User`: Add `InviteToken`, `InviteExpires`, `PermissionMode` (string), `Permissions` (Many-to-Many with ProxyHost). - * `ProxyHost`: Add `ForwardAuthEnabled` (bool). - * `Setting`: Add keys for `smtp_host`, `smtp_port`, etc. -2. **Logic:** - * `internal/services/mail`: Implement SMTP sender. - * `internal/api/handlers/user.go`: Add `InviteUser` handler and Permission logic. +### 3. Migration / Data Integrity +- Existing `security.cerberus.enabled` setting in DB should be migrated to `feature.cerberus.enabled` or the code should handle the transition. +- **Action**: We will switch to `feature.cerberus.enabled`. The user can re-enable it if it defaults to off, but we'll try to default it to ON in the handler. -### 🏗️ Phase 3: SSO Implementation -1. **Library:** Use `github.com/markbates/goth` or `golang.org/x/oauth2`. -2. **Models:** `SocialAccount` (UserID, Provider, ProviderID, Email). -3. **Routes:** - * `GET /auth/:provider`: Start OAuth flow. - * `GET /auth/:provider/callback`: Handle return, create/link user, set session. +## Step-by-Step Execution -### 🏗️ Phase 4: Forward Auth Integration -1. **Caddy:** Configure `forward_auth` directive to point to Charon API. -2. **Logic:** `VerifyAccess` handler: - * Check if User is logged in. - * Fetch User's `PermissionMode` and `Permissions`. - * If `allow_all`: Grant access UNLESS host is in `Permissions`. - * If `deny_all`: Deny access UNLESS host is in `Permissions`. - -### 🎨 Phase 5: Frontend Implementation -1. **Settings:** New "SMTP" and "SSO" tabs in Settings page. -2. **User List:** "Invite User" button. -3. **User Edit:** New "Permissions" tab with "Allow/Block" toggle and Host selector. -4. **Login Page:** Add "Sign in with Google/Plex/GitHub" buttons. - -### 📚 Phase 6: Documentation -1. **SSO Guides:** How to get Client IDs from Google/GitHub. -2. **Header Auth:** Guide on configuring Jellyseerr/Grafana to trust Charon. +1. **Backend**: Update `feature_flags_handler.go` to clean up flags and set defaults. +2. **Backend**: Update `cerberus.go` to use new flag key. +3. **Backend**: Update `routes.go` to gate Uptime background job. +4. **Frontend**: Update `SystemSettings.tsx` UI. +5. **Frontend**: Update `Layout.tsx` sidebar logic. +6. **Verify**: Test toggling features and checking sidebar/background behavior. diff --git a/docs/plans/current_spec.md.bak b/docs/plans/current_spec.md.bak new file mode 100644 index 00000000..f702f56c --- /dev/null +++ b/docs/plans/current_spec.md.bak @@ -0,0 +1,216 @@ + + + + +# Current Plan: Aggregated Host Statuses + +This feature adds a backend endpoint that returns aggregated health information for upstream hosts +and a frontend Dashboard widget to display the aggregated view. The goal is to provide host-level +health at-a-glance to help identify server-wide outages and quickly navigate to affected services. + +## Summary +- Endpoint: `GET /api/v1/uptime/hosts/aggregated` (authenticated) +- Backend: Service method + handler + route + GORM query, small in-memory cache, server-side filters +- Frontend: API client, custom React Query hook, `HostStatusesWidget` in Dashboard, demo/test pages +- Acceptance: Auth respects accessible hosts, accurate counts, performance (fast aggregate queries) + +## HandOff JSON contract (Truth) +Request: `GET /api/v1/uptime/hosts/aggregated` +- Query Params (optional): + - `status` (string): filter results by host status: up|down|pending|maintenance + - `q` (string): search text (host or name) + - `sort_by` (string): `monitor_count|down_count|avg_latency|last_check` (default: `down_count`) + - `order` (string): `asc|desc` (default: `desc`) + - `page` (int): pagination page (default 1) + - `per_page` (int): items per page (default 50) + +Response: 200 JSON +```json +{ + "aggregated_hosts": [ + { + "id": "uuid", + "host": "10.0.0.12", + "name": "web-01", + "status": "down", + "monitor_count": 3, + "counts": { "up": 1, "down": 2, "pending": 0, "maintenance": 0 }, + "avg_latency_ms": 257, + "last_check": "2025-12-05T09:54:54Z", + "last_status_change": "2025-12-05T09:53:44Z", + "affected_monitors": [ + { "id": "mon-1", "name": "example-api", "status": "down", "last_check": "2025-12-05T09:54:54Z" }, + { "id": "mon-2", "name": "webapp", "status": "down", "last_check": "2025-12-05T09:52:14Z" } + ], + "uptime_24h": 99.3 + } + ], + "meta": { "page": 1, "per_page": 50, "total": 1 } +} +``` + +Notes: +- All timestamps are ISO 8601 UTC. +- Field names use snake_case (server -> frontend contract per project guidelines). +- Only accessible hosts are returned to the authenticated caller (utilize existing auth handlers). + +## Backend Requirements +1. Database + - Ensure index on `uptime_monitors(uptime_host_id)`, `uptime_monitors(status)`, and `uptime_monitors(last_check)`. + - No model changes required for `UptimeHost` or `UptimeMonitor` unless we want an `avg_latency` column cached (optional). + +2. Service (in `internal/services/uptime_service.go`) + - Add method: `GetAggregatedHostStatuses(filters AggregationFilter) ([]AggregatedHost, error)`. + - Implementation detail: + - Query should join `uptime_hosts` and `uptime_monitors` and run a `GROUP BY uptime_host_id`. + - Use a SELECT that computes: monitor_count, up_count, down_count, pending_count, maintenance_count, avg_latency, last_check (MAX), last_status_change (MAX). + - Provide a parameter to include a limited list of affected monitors (eg. top N by last_check) and optional `uptime_24h` calculation where a heartbeat history exists. + - Return GORM structs matching the `AggregatedHost` DTO. + +3. Handler (in `internal/api/handlers/uptime_handler.go`) + - Add `func (h *UptimeHandler) AggregatedHosts(c *gin.Context)` that: + - Binds query params; validates and normalizes them. + - Calls `service.GetAggregatedHostStatuses(filters)`. + - Filters the results using `authMiddleware` (maintain accessible hosts list or `authHandler.GetAccessibleHosts` logic). + - Caches the result for `CHARON_UPTIME_AGGREGATION_TTL` (default 30s). Cache strategy: package global in `services` with simple `sync.Map` + TTL. + - Produces a 200 JSON with the contract above. + - Add unit tests and integration tests verifying results and auth scoping. + +4. Routes + - Register under protected group in `internal/api/routes/routes.go`: + - `protected.GET('/uptime/hosts/aggregated', uptimeHandler.AggregatedHosts)` + +5. Observability + - Add a Prometheus counter/metric: `charon_uptime_aggregated_requests_total` (labels: status, cache_hit true/false). + - Add logs for aggregation errors. + +6. Security + - Ensure only authenticated users can access aggregated endpoint. + - Respect `authHandler.GetAccessibleHosts` (or similar) to filter hosts the user should see. + +7. Tests + - Unit tests for service logic calculating aggregates (mock DB / in-memory DB fixtures). + - Handler integration tests using the testdb and router that verify JSON response structure, pagination, filters, and auth filtering. + - Perf tests: basic benchmark to ensure aggregation query completes within acceptable time for 10k monitors (e.g. < 200ms unless run on dev env; document specifics). + +## Frontend Requirements +1. API client changes (`frontend/src/api/uptime.ts`) + - Add `export const getAggregatedHosts = async (params?: AggregationQueryParams) => client.get('/uptime/hosts/aggregated', { params }).then(r => r.data)` + - Add new TypeScript types for `AggregatedHost`, `AggregatedHostCounts`, `AffectedMonitor`. + +2. React Query Hook (`frontend/src/hooks/useAggregatedHosts.ts`) + - `useAggregatedHosts` should accept params similar to query params (filters), and accept `enabled` flag. + - Use TanStack Query with `refetchInterval: 30_000` and `staleTime: 30_000` to match backend TTL. + +3. Dashboard Widget (`frontend/src/components/Dashboard/HostStatusesWidget.tsx`) + - Shows high-level summary: total hosts, down_count, up_count, pending. + - Clickable host rows navigate to the uptime or host detail page. + - Visuals: small status badge, host name, counts, avg latency, last check time. + - Accessible: all interactive elements keyboard and screen-reader navigable. + - Fallback: if the aggregated endpoint is not found or returns 403, display a short explanatory message with a link to uptime page. + +4. Dashboard Page Update (`frontend/src/pages/Dashboard.tsx`) + - Add `HostStatusesWidget` to the Dashboard layout (prefer 2nd column near `UptimeWidget`). + +5. Tests + - Unit tests for `HostStatusesWidget` rendering different states. + - Mock API responses for `useAggregatedHosts` using the existing test utilities. + - Add Storybook story if used in repo (optional). + +6. Styling + - Keep styling consistent with `UptimeWidget` (dark-card, status badges, mini bars). + +## Acceptance Criteria +1. API + - `GET /api/v1/uptime/hosts/aggregated` returns aggregated host objects in the correct format. + - Query params `status`, `q`, `sort_by`, `order`, `page`, `per_page` work as expected. + - The endpoint respects user-specific host access permissions. + - Endpoint adheres to TTL caching; cache invalidation occurs after TTL or when underlying monitor status change triggers invalidation. + +2. Backend Tests + - Unit tests cover all aggregation branches and logic (e.g. zero-monitor host, mixed statuses, all down host). + - Integration tests validate auth-scoped responses. + +3. Frontend UI + - Widget displays host-level counts and shows a list of top N hosts with status badges. + - Clicking a host navigates to the uptime or host detail page. + - Widget refreshes according to TTL and reacts to manual refreshes. + - UI has automated tests covering rendering with typical API responses, filtering and pagination UI behavior. + +4. Performance + - Aggregation query responds within acceptable time for typical deployments (document target; e.g. < 200ms for 5k monitors), or we add a follow-up plan to add precomputation. + +## Example API Contract (Sample Request + Response) +Request: +```http +GET /api/v1/uptime/hosts/aggregated?sort_by=down_count&order=desc&page=1&per_page=20 +Authorization: Bearer +``` + +Response: +```json +{ + "aggregated_hosts": [ + { + "id": "39b6f7c2-2a5c-47d7-9c9d-1d7f1977dabc", + "host": "10.0.10.12", + "name": "production-web-1", + "status": "down", + "monitor_count": 3, + "counts": {"up": 1, "down": 2, "pending": 0, "maintenance": 0}, + "avg_latency_ms": 257, + "last_check": "2025-12-05T09:54:54Z", + "last_status_change": "2025-12-05T09:53:44Z", + "affected_monitors": [ + {"id":"m-01","name":"api.example","status":"down","last_check":"2025-12-05T09:54:54Z","latency":105}, + {"id":"m-02","name":"www.example","status":"down","last_check":"2025-12-05T09:52:14Z","latency":401} + ], + "uptime_24h": 98.77 + } + ], + "meta": {"page":1,"per_page":20,"total":1} +} +``` + +## Error cases +- 401 Unauthorized — Invalid or missing token. +- 403 Forbidden — Caller lacks host access. +- 500 Internal Server Error — DB / aggregation error. + +## Observability & Operational Notes +- Metrics: `charon_uptime_aggregated_requests_total`, `charon_uptime_aggregated_cache_hits_total`. +- Cache TTL: default 30s via `CHARON_UPTIME_AGGREGATION_TTL` env var. +- Logging: Rate-limited errors and aggregation durations logged to the general logger. + +## Follow-ups & Optional Enhancements +1. Add an endpoint-level `since` parameter that returns delta/trend information (e.g. change in down_count in last 24 hours). +2. Background precompute task (materialized aggregated table) for very large installations. +3. Add a configuration to show `affected_monitors` collapsed/expanded per host for faster page loads. + +## Short List of Files To Change +- Backend: + - backend/internal/services/uptime_service.go (add aggregation method) + - backend/internal/api/handlers/uptime_handler.go (add handler method) + - backend/internal/api/routes/routes.go (register new route) + - backend/internal/services/uptime_service_test.go (add tests) + - backend/internal/api/handlers/uptime_handler_test.go (add handler tests) + - backend/internal/models/uptime.go / uptime_host.go (index recommendations or small schema updates if needed) + +- Frontend: + - frontend/src/api/uptime.ts (add `getAggregatedHosts`) + - frontend/src/hooks/useAggregatedHosts.ts (new hook) + - frontend/src/components/Dashboard/HostStatusesWidget.tsx (new widget) + - frontend/src/pages/Dashboard.tsx (add widget) + - frontend/src/components/__tests__/HostStatusesWidget.test.tsx (new tests) + +--- +If you want, I can now scaffold the backend service method + handler and the frontend API client and widget as a follow-up PR. diff --git a/docs/plans/sample_orchestration_plan.md b/docs/plans/sample_orchestration_plan.md new file mode 100644 index 00000000..3cc52cad --- /dev/null +++ b/docs/plans/sample_orchestration_plan.md @@ -0,0 +1,44 @@ + + +# Plan: Aggregated Host Statuses Endpoint + Dashboard Widget + +## 1) Title +Implement `/api/v1/host_statuses` backend endpoint and the `CharonStatusWidget` frontend component. + +## 2) Overview +This feature provides an aggregated view of the number of proxy hosts and the number of hosts that are up/down. The backend exposes an endpoint returning aggregated counts, and the frontend consumes the endpoint and presents a dashboard widget. + +## 3) Handoff Contract (Example) +**GET** /api/v1/stats/host_statuses + +Response (200): +```json +{ + "total_proxy_hosts": 12, + "hosts_up": 10, + "hosts_down": 2 +} +``` + +## 4) Backend Requirements + - Add a new read-only route `GET /api/v1/stats/host_statuses` under `internal/api/handlers/`. + - Implement the handler to use existing models/services and return the aggregated counts in JSON. + - Add unit tests under `backend/internal/services` and the handler's folder. + +## 5) Frontend Requirements + - Add `frontend/src/components/CharonStatusWidget.tsx` to render the widget using the endpoint or existing monitors if no endpoint is present. + - Add a hook and update the API client if necessary: `frontend/src/api/stats.ts` with `getHostStatuses()`. + - Add unit tests: vitest for the component and the hook. + +## 6) Acceptance Criteria +- Backend: `go test ./...` passes. +- Frontend: `npm run type-check` and `npm run build` pass. +- All unit tests pass and new coverage for added code is included. + +## 7) Artifacts +- `docs/plans/current_spec.md` (the plan file) +- `backend` changed files including handler and tests +- `frontend` changed files including component and tests diff --git a/docs/plans/security_features_spec.md b/docs/plans/security_features_spec.md new file mode 100644 index 00000000..61140aa5 --- /dev/null +++ b/docs/plans/security_features_spec.md @@ -0,0 +1,396 @@ +# 📋 Plan: Security Features Deep Dive - Issues #17, #18, #19 + +**Created**: December 5, 2025 +**Status**: Analysis Complete - Implementation Assessment + +--- + +## 🧐 Executive Summary + +After a comprehensive analysis of the CrowdSec (#17), WAF (#18), and Rate Limiting (#19) features, the findings show that **all three features are substantially implemented** with working frontend UIs, backend APIs, and Caddy integration. However, each has specific gaps that need to be addressed for full production readiness. + +--- + +## 📊 Implementation Status Matrix + +| Feature | Backend | Frontend | Caddy Integration | Testing | Status | +|---------|---------|----------|-------------------|---------|--------| +| **CrowdSec (#17)** | ✅ 90% | ✅ 90% | ⚠️ 70% | ⚠️ 50% | Near Complete | +| **WAF (#18)** | ✅ 95% | ✅ 95% | ✅ 85% | ✅ 80% | Near Complete | +| **Rate Limiting (#19)** | ⚠️ 60% | ✅ 90% | ⚠️ 40% | ⚠️ 30% | Needs Work | + +--- + +## 🔍 Issue #17: CrowdSec Integration (Critical) + +### What's Implemented ✅ + +**Backend:** +- CrowdSec handler (`crowdsec_handler.go`) with: + - Start/Stop process control via `CrowdsecExecutor` interface + - Status monitoring endpoint + - Import/Export configuration (tar.gz) + - File listing/reading/writing for config files + - Routes registered at `/admin/crowdsec/*` + +- Security handler integration: + - `security.crowdsec.mode` setting (disabled/local) + - `security.crowdsec.enabled` runtime override + - CrowdSec enabled flag computed in `computeEffectiveFlags()` + +**Frontend:** +- `CrowdSecConfig.tsx` page with: + - Mode selection (disabled/local) + - Import configuration (file upload) + - Export configuration (download) + - File editor for config files + - Loading states and error handling + +**Docker:** +- CrowdSec binary installed at `/usr/local/bin/crowdsec` +- Config directory at `/app/data/crowdsec` +- `caddy-crowdsec-bouncer` plugin compiled into Caddy + +### Gaps Identified ❌ + +1. **Banned IP Dashboard** - Not implemented + - Need `/api/v1/crowdsec/decisions` endpoint to list banned IPs + - Need frontend UI to display and manage banned IPs + +2. **Manual IP Ban/Unban** - Partially implemented + - `SecurityDecision` model exists but manual CrowdSec bans not wired + - Need `/api/v1/crowdsec/ban` and `/api/v1/crowdsec/unban` endpoints + +3. **Scenario/Collection Management** - Not implemented + - No UI for managing CrowdSec scenarios or collections + - Backend would need to interact with CrowdSec CLI or API + +4. **CrowdSec Log Parsing Setup** - Not implemented + - Need to configure CrowdSec to parse Caddy logs + - Acquisition config not auto-generated + +5. **Caddy Integration Handler** - Placeholder only + - `buildCrowdSecHandler()` returns `Handler{"handler": "crowdsec"}` but Caddy's `caddy-crowdsec-bouncer` expects different configuration: + ```json + { + "handler": "crowdsec", + "api_url": "http://localhost:8080", + "api_key": "..." + } + ``` + +### Acceptance Criteria Assessment + +| Criteria | Status | +|----------|--------| +| CrowdSec blocks malicious IPs automatically | ⚠️ Partial - bouncer configured but handler incomplete | +| Banned IPs visible in dashboard | ❌ Not implemented | +| Can manually ban/unban IPs | ⚠️ Partial - backend exists but not wired | +| CrowdSec status visible | ✅ Implemented | + +--- + +## 🔍 Issue #18: WAF Integration (High Priority) + +### What's Implemented ✅ + +**Backend:** +- `SecurityRuleSet` model for storing WAF rules +- `SecurityConfig.WAFMode` (disabled/monitor/block) +- `SecurityConfig.WAFRulesSource` for ruleset selection +- `buildWAFHandler()` generates Coraza handler config: + ```go + h := Handler{"handler": "waf"} + h["directives"] = fmt.Sprintf("Include %s", rulesetPath) + ``` +- Ruleset files written to `/app/data/caddy/coraza/rulesets/` +- `SecRuleEngine On/DetectionOnly` auto-prepended based on mode +- Security service CRUD for rulesets + +**Frontend:** +- `WafConfig.tsx` with: + - Rule set CRUD (create, edit, delete) + - Mode selection (blocking/detection) + - WAF presets (OWASP CRS, SQLi protection, XSS protection, Bad Bots) + - Source URL or inline content support + - Rule count display + +**Docker:** +- `coraza-caddy/v2` plugin compiled into Caddy + +**Testing:** +- Integration test `coraza_integration_test.go` +- Unit tests for WAF handler building + +### Gaps Identified ❌ + +1. **WAF Logging and Alerts** - Partially implemented + - Coraza logs to Caddy but not parsed/displayed in UI + - No WAF-specific notifications + +2. **WAF Statistics Dashboard** - Not implemented + - Need metrics collection (requests blocked, attack types) + - Prometheus metrics defined in docs but not implemented + +3. **Paranoia Level Selector** - Not implemented + - OWASP CRS paranoia levels (1-4) not exposed in UI + - Would need `SecAction "id:900000,setvar:tx.paranoia_level=2"` + +4. **Per-Host WAF Toggle** - Partially implemented + - `host.AdvancedConfig` can reference `ruleset_name` but no UI + - Need checkbox in ProxyHostForm for "Enable WAF" + +5. **Rule Exclusion System** - Not implemented + - No UI for excluding specific rules that cause false positives + - Would need `SecRuleRemoveById` directive management + +### Acceptance Criteria Assessment + +| Criteria | Status | +|----------|--------| +| WAF blocks common attacks (SQLi, XSS) | ✅ Working with Coraza | +| Can enable/disable per host | ⚠️ Via advanced config only | +| False positives manageable | ❌ No exclusion UI | +| WAF events logged and visible | ⚠️ Logs exist but not in UI | + +--- + +## 🔍 Issue #19: Rate Limiting (High Priority) + +### What's Implemented ✅ + +**Backend:** +- `SecurityConfig` model fields: + ```go + RateLimitEnable bool + RateLimitBurst int + RateLimitRequests int + RateLimitWindowSec int + ``` +- `security.rate_limit.enabled` setting +- `buildRateLimitHandler()` generates config: + ```go + h := Handler{"handler": "rate_limit"} + h["requests"] = secCfg.RateLimitRequests + h["window_sec"] = secCfg.RateLimitWindowSec + h["burst"] = secCfg.RateLimitBurst + ``` + +**Frontend:** +- `RateLimiting.tsx` with: + - Enable/disable toggle + - Requests per second input + - Burst allowance input + - Window (seconds) input + - Save configuration + +### Gaps Identified ❌ + +1. **Caddy Rate Limit Directive** - **CRITICAL GAP** + - Caddy doesn't have a built-in `rate_limit` handler + - Need to use `caddy-ratelimit` module or Caddy's `respond` with headers + - Current handler is a no-op placeholder + +2. **Rate Limit Presets** - Not implemented + - Issue specifies presets: login, API, standard + - Need predefined configurations + +3. **Per-IP Rate Limiting** - Not implemented correctly + - Handler exists but Caddy module not compiled in + - Need `github.com/mholt/caddy-ratelimit` in Dockerfile + +4. **Per-Endpoint Rate Limits** - Not implemented + - No UI for path-specific rate limits + - Would need rate limit rules per route + +5. **Bypass List (Trusted IPs)** - Not implemented + - Admin whitelist exists but not connected to rate limiting + +6. **Rate Limit Violation Logging** - Not implemented + - No logging when rate limits are hit + +7. **Rate Limit Testing Tool** - Not implemented + - No way to test rate limits from UI + +### Acceptance Criteria Assessment + +| Criteria | Status | +|----------|--------| +| Rate limits prevent brute force | ❌ Handler is placeholder | +| Presets work correctly | ❌ Not implemented | +| Legitimate traffic not affected | ⚠️ No bypass list | +| Rate limit hits logged | ❌ Not implemented | + +--- + +## 🤝 Handoff Contracts (API Specifications) + +### CrowdSec Banned IPs API + +```json +// GET /api/v1/crowdsec/decisions +{ + "response": { + "decisions": [ + { + "id": "uuid", + "ip": "192.168.1.100", + "reason": "ssh-bf", + "duration": "4h", + "created_at": "2025-12-05T10:00:00Z", + "source": "crowdsec" + } + ], + "total": 15 + } +} + +// POST /api/v1/crowdsec/ban +{ + "request": { + "ip": "192.168.1.100", + "duration": "24h", + "reason": "Manual ban - suspicious activity" + }, + "response": { + "success": true, + "decision_id": "uuid" + } +} + +// DELETE /api/v1/crowdsec/ban/:ip +{ + "response": { + "success": true + } +} +``` + +### Rate Limit Caddy Integration Fix + +The rate limit handler needs to output proper Caddy JSON: + +```json +// Correct Caddy rate_limit handler format (requires caddy-ratelimit module) +{ + "handler": "rate_limit", + "rate_limits": { + "static": { + "match": [{"method": ["GET", "POST"]}], + "key": "{http.request.remote.host}", + "window": "1m", + "max_events": 60 + } + } +} +``` + +--- + +## 🏗️ Implementation Phases + +### Phase 1: Rate Limiting Fix (Critical - Blocking Beta) + +**Backend Changes:** +1. Add `github.com/mholt/caddy-ratelimit` to Dockerfile xcaddy build +2. Fix `buildRateLimitHandler()` to output correct Caddy JSON format +3. Add rate limit bypass using admin whitelist + +**Frontend Changes:** +1. Add presets dropdown (Login: 5/min, API: 100/min, Standard: 30/min) +2. Add bypass IP list input (reuse admin whitelist) + +### Phase 2: CrowdSec Completeness (High Priority) + +**Backend Changes:** +1. Create `/api/v1/crowdsec/decisions` endpoint (call cscli) +2. Create `/api/v1/crowdsec/ban` and `unban` endpoints +3. Fix `buildCrowdSecHandler()` to include proper bouncer config +4. Auto-generate acquisition.yaml for Caddy log parsing + +**Frontend Changes:** +1. Add "Banned IPs" tab to CrowdSecConfig page +2. Add "Ban IP" button with duration selector +3. Add "Unban" action to each banned IP row + +### Phase 3: WAF Enhancements (Medium Priority) + +**Backend Changes:** +1. Add paranoia level to SecurityConfig model +2. Add rule exclusion list to SecurityRuleSet model +3. Parse Coraza logs for WAF events + +**Frontend Changes:** +1. Add paranoia level slider (1-4) to WAF config +2. Add "Enable WAF" checkbox to ProxyHostForm +3. Add rule exclusion UI (list of rule IDs to exclude) +4. Add WAF events log viewer + +### Phase 4: Testing & QA + +1. Create integration tests for each feature +2. Add E2E tests for security flows +3. Manual penetration testing + +--- + +## 🕵️ QA & Security Considerations + +### CrowdSec Security +- Ensure API key not exposed in logs +- Validate IP inputs to prevent injection +- Rate limit the ban/unban endpoints themselves + +### WAF Security +- Validate ruleset content (no malicious directives) +- Prevent path traversal in ruleset file paths +- Test for WAF bypass techniques + +### Rate Limiting Security +- Prevent bypass via IP spoofing (X-Forwarded-For) +- Ensure rate limits apply to all methods +- Test distributed rate limiting behavior + +--- + +## 📚 Documentation Updates Needed + +1. Update `docs/cerberus.md` with actual implementation status +2. Update `docs/security.md` user guide with new features +3. Add rate limiting configuration guide +4. Add CrowdSec setup wizard documentation + +--- + +## 🎯 Priority Order + +1. **Rate Limiting Caddy Module** - Blocking issue, handler is no-op +2. **CrowdSec Banned IP Dashboard** - High visibility feature +3. **WAF Per-Host Toggle** - User expectation from issue +4. **CrowdSec Manual Ban/Unban** - Security operations feature +5. **WAF Rule Exclusions** - False positive management +6. **Rate Limit Presets** - UX improvement + +--- + +## Summary: What Works vs What Doesn't + +### ✅ Working Now +- WAF rule management and blocking (Coraza integration) +- CrowdSec process control (start/stop/status) +- CrowdSec config import/export +- Rate limiting UI and settings storage +- Security status API reporting + +### ⚠️ Partially Working +- CrowdSec bouncer (handler exists but config incomplete) +- Per-host WAF (via advanced config only) +- Rate limiting settings (stored but not enforced) + +### ❌ Not Working / Missing +- Rate limiting actual enforcement (Caddy module missing) +- CrowdSec banned IP dashboard +- Manual IP ban/unban +- WAF rule exclusions +- Rate limit presets +- WAF paranoia levels diff --git a/docs/plans/ui_ux_bugfixes_spec.md b/docs/plans/ui_ux_bugfixes_spec.md new file mode 100644 index 00000000..6b9e9910 --- /dev/null +++ b/docs/plans/ui_ux_bugfixes_spec.md @@ -0,0 +1,525 @@ +# 📋 Plan: UI/UX and Backend Bug Fixes - Multi-Issue Resolution + +**Created**: December 5, 2025 +**Status**: Planning Complete - Ready for Implementation + +--- + +## 🧐 UX & Context Analysis + +The user has identified **12 distinct issues** affecting usability, consistency, and functionality. These span both frontend (UI/UX) and backend (API/data) concerns. + +### Issue Summary Matrix + +| # | Issue | Category | Severity | Component | +|---|-------|----------|----------|-----------| +| 1 | Uptime card not updated when editing proxy host | Backend/Frontend | High | ProxyHostForm, UptimeService | +| 2 | Certificates missing delete action | Frontend | Medium | CertificateList | +| 3 | Inconsistent app sizing between IP and domain access | Frontend/CSS | Medium | index.css, Layout | +| 4 | Notification Provider template dropdown invisible text | Frontend | High | Notifications | +| 5 | Templates should be in Provider section with assignment | Frontend | Medium | Notifications | +| 6 | Banner/header sizing issues (tiny on desktop, huge on mobile) | Frontend | Medium | Layout | +| 7 | Mobile drawer icon should be on left side | Frontend | Low | Layout | +| 8 | Mobile view should show logo instead of banner | Frontend | Low | Layout | +| 9 | CrowdSec card buttons truncated on smaller screens | Frontend | Medium | Security | +| 10 | /security/crowdsec shows blank page | Frontend | High | CrowdSecConfig, Layout | +| 11 | Reorganize sidebar: Users → Account Management under Settings | Frontend | Medium | Layout, Router | +| 12 | Missing loading overlay when adding/removing ACL from proxy host | Frontend | High | ProxyHosts, useProxyHosts | + +--- + +## 🤝 Handoff Contracts (API Specifications) + +### Issue #1: Uptime Card Sync on Proxy Host Edit + +**Problem**: When editing a proxy host (e.g., changing name or domain), the associated UptimeMonitor is not updated. Users must manually delete and recreate uptime cards. + +**Current Behavior**: `syncMonitors()` only creates new monitors or updates name; it doesn't handle domain/URL changes properly. + +**Required Backend Changes**: + +```json +// PUT /api/v1/proxy-hosts/:uuid +// Backend should automatically trigger uptime monitor sync when relevant fields change +{ + "request_payload": { + "name": "Updated Name", + "domain_names": "new.example.com", + "forward_host": "192.168.1.100", + "forward_port": 8080 + }, + "response_success": { + "uuid": "abc123", + "name": "Updated Name", + "domain_names": "new.example.com", + "uptime_monitor_synced": true + } +} +``` + +**Implementation**: Modify `updateProxyHost` handler to call `UptimeService.SyncMonitorForHost(hostID)` after successful update. The sync should update URL, name, and upstream_host on the linked monitor. + +--- + +### Issue #2: Certificate Delete Action + +**Problem**: Certificates page shows no actions in the table. Delete button only appears conditionally and conditions are too restrictive. + +**Frontend Fix**: Always show delete action for custom and staging certificates. Improve visibility logic. + +```tsx +// CertificateList.tsx - Actions column should always render delete for deletable certs +// Current condition is too restrictive: +// cert.id && (cert.provider === 'custom' || cert.issuer?.toLowerCase().includes('staging')) +// AND status check: cert.status !== 'valid' && cert.status !== 'expiring' + +// New condition: Remove status restriction, allow delete for custom/staging certs +// Only block if certificate is in use by a proxy host +``` + +--- + +### Issue #3: Inconsistent App Sizing + +**Root Cause**: `body { zoom: 0.75; }` in `index.css` causes different rendering based on browser zoom behavior when accessed via IP vs domain. + +**Solution**: Remove the `zoom: 0.75` property and instead use proper CSS scaling or adjust layout max-widths for consistent sizing. + +```css +/* BEFORE */ +body { + zoom: 0.75; /* REMOVE THIS */ +} + +/* AFTER */ +body { + margin: 0; + min-width: 320px; + min-height: 100vh; + /* Use consistent font sizing and container widths instead */ +} +``` + +--- + +### Issue #4: Notification Template Dropdown Invisible Text + +**Problem**: In `ProviderForm`, the template ` + +// AFTER + + + + + + + {externalTemplates.map(t => )} + + + + + {templateSelection === 'custom' && ( + <> +