Merge pull request #754 from Wikid82/feature/beta-release

Enable and test Gotify and Custom Webhook notifications
This commit is contained in:
Jeremy
2026-02-26 22:31:53 -05:00
committed by GitHub
167 changed files with 11267 additions and 2945 deletions
+2
View File
@@ -32,6 +32,8 @@ services:
#- CPM_SECURITY_RATELIMIT_ENABLED=false
#- CPM_SECURITY_ACL_ENABLED=false
- FEATURE_CERBERUS_ENABLED=true
# Docker socket group access: copy docker-compose.override.example.yml
# to docker-compose.override.yml and set your host's docker GID.
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # For local container discovery
- crowdsec_data:/app/data/crowdsec
+2
View File
@@ -27,6 +27,8 @@ services:
- FEATURE_CERBERUS_ENABLED=true
# Emergency "break-glass" token for security reset when ACL blocks access
- CHARON_EMERGENCY_TOKEN=03e4682c1164f0c1cb8e17c99bd1a2d9156b59824dde41af3bb67c513e5c5e92
# Docker socket group access: copy docker-compose.override.example.yml
# to docker-compose.override.yml and set your host's docker GID.
extra_hosts:
- "host.docker.internal:host-gateway"
cap_add:
@@ -0,0 +1,26 @@
# Docker Compose override — copy to docker-compose.override.yml to activate.
#
# Use case: grant the container access to the host Docker socket so that
# Charon can discover running containers.
#
# 1. cp docker-compose.override.example.yml docker-compose.override.yml
# 2. Uncomment the service that matches your compose file:
# - "charon" for docker-compose.local.yml
# - "app" for docker-compose.dev.yml
# 3. Replace <GID> with the output of: stat -c '%g' /var/run/docker.sock
# 4. docker compose up -d
services:
# Uncomment for docker-compose.local.yml
charon:
group_add:
- "<GID>" # e.g. "988" — run: stat -c '%g' /var/run/docker.sock
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Uncomment for docker-compose.dev.yml
app:
group_add:
- "<GID>" # e.g. "988" — run: stat -c '%g' /var/run/docker.sock
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -85,6 +85,7 @@ services:
- playwright_data:/app/data
- playwright_caddy_data:/data
- playwright_caddy_config:/config
- /var/run/docker.sock:/var/run/docker.sock:ro # For container discovery in tests
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8080/api/v1/health"]
interval: 5s
@@ -111,6 +112,7 @@ services:
volumes:
- playwright_crowdsec_data:/var/lib/crowdsec/data
- playwright_crowdsec_config:/etc/crowdsec
- /var/run/docker.sock:/var/run/docker.sock:ro # For container discovery in tests
healthcheck:
test: ["CMD", "cscli", "version"]
interval: 10s
@@ -49,6 +49,8 @@ services:
# True tmpfs for E2E test data - fresh on every run, in-memory only
# mode=1777 allows any user to write (container runs as non-root)
- /app/data:size=100M,mode=1777
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # For container discovery in tests
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8080/api/v1/health || exit 1"]
interval: 5s
+16 -15
View File
@@ -27,30 +27,24 @@ get_group_by_gid() {
}
create_group_with_gid() {
local gid="$1"
local name="$2"
if command -v addgroup >/dev/null 2>&1; then
addgroup -g "$gid" "$name" 2>/dev/null || true
addgroup -g "$1" "$2" 2>/dev/null || true
return
fi
if command -v groupadd >/dev/null 2>&1; then
groupadd -g "$gid" "$name" 2>/dev/null || true
groupadd -g "$1" "$2" 2>/dev/null || true
fi
}
add_user_to_group() {
local user="$1"
local group="$2"
if command -v addgroup >/dev/null 2>&1; then
addgroup "$user" "$group" 2>/dev/null || true
addgroup "$1" "$2" 2>/dev/null || true
return
fi
if command -v usermod >/dev/null 2>&1; then
usermod -aG "$group" "$user" 2>/dev/null || true
usermod -aG "$2" "$1" 2>/dev/null || true
fi
}
@@ -142,8 +136,15 @@ if [ -S "/var/run/docker.sock" ] && is_root; then
fi
fi
elif [ -S "/var/run/docker.sock" ]; then
echo "Note: Docker socket mounted but container is running non-root; skipping docker.sock group setup."
echo " If Docker discovery is needed, run with matching group permissions (e.g., --group-add)"
DOCKER_SOCK_GID=$(stat -c '%g' /var/run/docker.sock 2>/dev/null || echo "unknown")
echo "Note: Docker socket mounted (GID=$DOCKER_SOCK_GID) but container is running non-root; skipping docker.sock group setup."
echo " If Docker discovery is needed, add 'group_add: [\"$DOCKER_SOCK_GID\"]' to your compose service."
if [ "$DOCKER_SOCK_GID" = "0" ]; then
if [ "${ALLOW_DOCKER_SOCK_GID_0:-false}" != "true" ]; then
echo "⚠️ WARNING: Docker socket GID is 0 (root group). group_add: [\"0\"] grants root-group access."
echo " Set ALLOW_DOCKER_SOCK_GID_0=true to acknowledge this risk."
fi
fi
else
echo "Note: Docker socket not found. Docker container discovery will be unavailable."
fi
@@ -191,7 +192,7 @@ if command -v cscli >/dev/null; then
echo "Initializing persistent CrowdSec configuration..."
# Check if .dist has content
if [ -d "/etc/crowdsec.dist" ] && [ -n "$(ls -A /etc/crowdsec.dist 2>/dev/null)" ]; then
if [ -d "/etc/crowdsec.dist" ] && find /etc/crowdsec.dist -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null | grep -q .; then
echo "Copying config from /etc/crowdsec.dist..."
if ! cp -r /etc/crowdsec.dist/* "$CS_CONFIG_DIR/"; then
echo "ERROR: Failed to copy config from /etc/crowdsec.dist"
@@ -208,7 +209,7 @@ if command -v cscli >/dev/null; then
exit 1
fi
echo "✓ Successfully initialized config from .dist directory"
elif [ -d "/etc/crowdsec" ] && [ ! -L "/etc/crowdsec" ] && [ -n "$(ls -A /etc/crowdsec 2>/dev/null)" ]; then
elif [ -d "/etc/crowdsec" ] && [ ! -L "/etc/crowdsec" ] && find /etc/crowdsec -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null | grep -q .; then
echo "Copying config from /etc/crowdsec (fallback)..."
if ! cp -r /etc/crowdsec/* "$CS_CONFIG_DIR/"; then
echo "ERROR: Failed to copy config from /etc/crowdsec (fallback)"
@@ -248,7 +249,7 @@ if command -v cscli >/dev/null; then
echo "Expected: /etc/crowdsec -> /app/data/crowdsec/config"
echo "This indicates a critical build-time issue. Symlink must be created at build time as root."
echo "DEBUG: Directory check:"
ls -la /etc/ | grep crowdsec || echo " (no crowdsec entry found)"
find /etc -mindepth 1 -maxdepth 1 -name '*crowdsec*' -exec ls -ld {} \; 2>/dev/null || echo " (no crowdsec entry found)"
exit 1
fi
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
+55
View File
@@ -0,0 +1,55 @@
version: 1
effective_date: 2026-02-25
scope:
- local pre-commit manual security hooks
- github actions security workflows
defaults:
blocking:
- critical
- high
medium:
mode: risk-based
default_action: report
require_sla: true
default_sla_days: 14
escalation:
trigger: high-signal class or repeated finding
action: require issue + owner + due date
low:
action: report
codeql:
severity_mapping:
error: high_or_critical
warning: medium_or_lower
note: informational
blocking_levels:
- error
warning_policy:
default_action: report
escalation_high_signal_rule_ids:
- go/request-forgery
- js/missing-rate-limiting
- js/insecure-randomness
trivy:
blocking_severities:
- CRITICAL
- HIGH
medium_policy:
action: report
escalation: issue-with-sla
grype:
blocking_severities:
- Critical
- High
medium_policy:
action: report
escalation: issue-with-sla
enforcement_contract:
codeql_local_vs_ci: "local and ci block on codeql error-level findings only"
supply_chain_medium: "medium vulnerabilities are non-blocking by default and require explicit triage"
auth_regression_guard: "state-changing routes must remain protected by auth middleware"
+3 -1
View File
@@ -3,6 +3,8 @@ name: Go Benchmark
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
concurrency:
@@ -33,7 +35,7 @@ jobs:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
+4 -2
View File
@@ -3,6 +3,8 @@ name: Upload Coverage to Codecov
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
inputs:
run_backend:
@@ -17,7 +19,7 @@ on:
type: boolean
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.run_id }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
@@ -43,7 +45,7 @@ jobs:
ref: ${{ github.sha }}
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
+63 -10
View File
@@ -4,7 +4,7 @@ on:
pull_request:
branches: [main, nightly, development]
push:
branches: [main, nightly, development, 'feature/**', 'fix/**']
branches: [main]
workflow_dispatch:
schedule:
- cron: '0 3 * * 1' # Mondays 03:00 UTC
@@ -57,7 +57,7 @@ jobs:
- name: Setup Go
if: matrix.language == 'go'
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version: 1.26.0
cache-dependency-path: backend/go.sum
@@ -122,10 +122,28 @@ jobs:
exit 1
fi
# shellcheck disable=SC2016
EFFECTIVE_LEVELS_JQ='[
.runs[] as $run
| $run.results[]
| . as $result
| ($run.tool.driver.rules // []) as $rules
| ((
$result.level
// (if (($result.ruleIndex | type) == "number") then ($rules[$result.ruleIndex].defaultConfiguration.level // empty) else empty end)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| (.defaultConfiguration.level // empty)
][0] // empty)
// ""
) | ascii_downcase)
]'
echo "Found SARIF file: $SARIF_FILE"
ERROR_COUNT=$(jq '[.runs[].results[] | select(.level == "error")] | length' "$SARIF_FILE")
WARNING_COUNT=$(jq '[.runs[].results[] | select(.level == "warning")] | length' "$SARIF_FILE")
NOTE_COUNT=$(jq '[.runs[].results[] | select(.level == "note")] | length' "$SARIF_FILE")
ERROR_COUNT=$(jq -r "${EFFECTIVE_LEVELS_JQ} | map(select(. == \"error\")) | length" "$SARIF_FILE")
WARNING_COUNT=$(jq -r "${EFFECTIVE_LEVELS_JQ} | map(select(. == \"warning\")) | length" "$SARIF_FILE")
NOTE_COUNT=$(jq -r "${EFFECTIVE_LEVELS_JQ} | map(select(. == \"note\")) | length" "$SARIF_FILE")
{
echo "**Findings:**"
@@ -135,14 +153,32 @@ jobs:
echo ""
if [ "$ERROR_COUNT" -gt 0 ]; then
echo "❌ **CRITICAL:** High-severity security issues found!"
echo "❌ **BLOCKING:** CodeQL error-level security issues found"
echo ""
echo "### Top Issues:"
echo '```'
jq -r '.runs[].results[] | select(.level == "error") | "\(.ruleId): \(.message.text)"' "$SARIF_FILE" | head -5
# shellcheck disable=SC2016
jq -r '
.runs[] as $run
| $run.results[]
| . as $result
| ($run.tool.driver.rules // []) as $rules
| ((
$result.level
// (if (($result.ruleIndex | type) == "number") then ($rules[$result.ruleIndex].defaultConfiguration.level // empty) else empty end)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| (.defaultConfiguration.level // empty)
][0] // empty)
// ""
) | ascii_downcase) as $effectiveLevel
| select($effectiveLevel == "error")
| "\($effectiveLevel): \($result.ruleId // \"<unknown-rule>\"): \($result.message.text)"
' "$SARIF_FILE" | head -5
echo '```'
else
echo "✅ No high-severity issues found"
echo "✅ No blocking CodeQL issues found"
fi
} >> "$GITHUB_STEP_SUMMARY"
@@ -169,9 +205,26 @@ jobs:
exit 1
fi
ERROR_COUNT=$(jq '[.runs[].results[] | select(.level == "error")] | length' "$SARIF_FILE")
# shellcheck disable=SC2016
ERROR_COUNT=$(jq -r '[
.runs[] as $run
| $run.results[]
| . as $result
| ($run.tool.driver.rules // []) as $rules
| ((
$result.level
// (if (($result.ruleIndex | type) == "number") then ($rules[$result.ruleIndex].defaultConfiguration.level // empty) else empty end)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| (.defaultConfiguration.level // empty)
][0] // empty)
// ""
) | ascii_downcase) as $effectiveLevel
| select($effectiveLevel == "error")
] | length' "$SARIF_FILE")
if [ "$ERROR_COUNT" -gt 0 ]; then
echo "::error::CodeQL found $ERROR_COUNT high-severity security issues. Fix before merging."
echo "::error::CodeQL found $ERROR_COUNT blocking findings (effective-level=error). Fix before merging. Policy: .github/security-severity-policy.yml"
exit 1
fi
+150 -34
View File
@@ -5,10 +5,6 @@ on:
- cron: '0 3 * * 0' # Weekly: Sundays at 03:00 UTC
workflow_dispatch:
inputs:
registries:
description: 'Comma-separated registries to prune (ghcr,dockerhub)'
required: false
default: 'ghcr,dockerhub'
keep_days:
description: 'Number of days to retain images (unprotected)'
required: false
@@ -27,16 +23,17 @@ permissions:
contents: read
jobs:
prune:
prune-ghcr:
runs-on: ubuntu-latest
env:
OWNER: ${{ github.repository_owner }}
IMAGE_NAME: charon
REGISTRIES: ${{ github.event.inputs.registries || 'ghcr,dockerhub' }}
KEEP_DAYS: ${{ github.event.inputs.keep_days || '30' }}
KEEP_LAST_N: ${{ github.event.inputs.keep_last_n || '30' }}
DRY_RUN: ${{ github.event.inputs.dry_run || 'false' }}
PROTECTED_REGEX: '["^v","^latest$","^main$","^develop$"]'
DRY_RUN: ${{ github.event_name == 'pull_request' && 'true' || github.event.inputs.dry_run || 'false' }}
PROTECTED_REGEX: '["^v?[0-9]+\\.[0-9]+\\.[0-9]+$","^latest$","^main$","^develop$"]'
PRUNE_UNTAGGED: 'true'
PRUNE_SBOM_TAGS: 'true'
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
@@ -45,21 +42,19 @@ jobs:
run: |
sudo apt-get update && sudo apt-get install -y jq curl
- name: Run container prune
- name: Run GHCR prune
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
run: |
chmod +x scripts/prune-container-images.sh
./scripts/prune-container-images.sh 2>&1 | tee prune-${{ github.run_id }}.log
chmod +x scripts/prune-ghcr.sh
./scripts/prune-ghcr.sh 2>&1 | tee prune-ghcr-${{ github.run_id }}.log
- name: Summarize prune results (space reclaimed)
if: ${{ always() }}
- name: Summarize GHCR results
if: always()
run: |
set -euo pipefail
SUMMARY_FILE=prune-summary.env
LOG_FILE=prune-${{ github.run_id }}.log
SUMMARY_FILE=prune-summary-ghcr.env
LOG_FILE=prune-ghcr-${{ github.run_id }}.log
human() {
local bytes=${1:-0}
@@ -67,7 +62,7 @@ jobs:
echo "0 B"
return
fi
awk -v b="$bytes" 'function human(x){ split("B KiB MiB GiB TiB",u," "); i=0; while(x>1024){x/=1024;i++} printf "%0.2f %s", x, u[i+1]} END{human(b)}'
awk -v b="$bytes" 'BEGIN { split("B KiB MiB GiB TiB",u," "); i=0; x=b; while(x>1024){x/=1024;i++} printf "%0.2f %s", x, u[i+1] }'
}
if [ -f "$SUMMARY_FILE" ]; then
@@ -77,34 +72,155 @@ jobs:
TOTAL_DELETED_BYTES=$(grep -E '^TOTAL_DELETED_BYTES=' "$SUMMARY_FILE" | cut -d= -f2 || echo 0)
{
echo "## Container prune summary"
echo "## GHCR prune summary"
echo "- candidates: ${TOTAL_CANDIDATES} (≈ $(human "${TOTAL_CANDIDATES_BYTES}"))"
echo "- deleted: ${TOTAL_DELETED} (≈ $(human "${TOTAL_DELETED_BYTES}"))"
} >> "$GITHUB_STEP_SUMMARY"
printf 'PRUNE_SUMMARY: candidates=%s candidates_bytes=%s deleted=%s deleted_bytes=%s\n' \
"${TOTAL_CANDIDATES}" "${TOTAL_CANDIDATES_BYTES}" "${TOTAL_DELETED}" "${TOTAL_DELETED_BYTES}"
echo "Deleted approximately: $(human "${TOTAL_DELETED_BYTES}")"
echo "space_saved=$(human "${TOTAL_DELETED_BYTES}")" >> "$GITHUB_OUTPUT"
else
deleted_bytes=$(grep -oE '\( *approx +[0-9]+ bytes\)' "$LOG_FILE" | sed -E 's/.*approx +([0-9]+) bytes.*/\1/' | awk '{s+=$1} END {print s+0}' || true)
deleted_count=$(grep -cE 'deleting |DRY RUN: would delete' "$LOG_FILE" || true)
{
echo "## Container prune summary"
echo "## GHCR prune summary"
echo "- deleted (approx): ${deleted_count} (≈ $(human "${deleted_bytes}"))"
} >> "$GITHUB_STEP_SUMMARY"
printf 'PRUNE_SUMMARY: deleted_approx=%s deleted_bytes=%s\n' "${deleted_count}" "${deleted_bytes}"
echo "Deleted approximately: $(human "${deleted_bytes}")"
echo "space_saved=$(human "${deleted_bytes}")" >> "$GITHUB_OUTPUT"
fi
- name: Upload prune artifacts
if: ${{ always() }}
- name: Upload GHCR prune artifacts
if: always()
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
with:
name: prune-log-${{ github.run_id }}
name: prune-ghcr-log-${{ github.run_id }}
path: |
prune-${{ github.run_id }}.log
prune-summary.env
prune-ghcr-${{ github.run_id }}.log
prune-summary-ghcr.env
prune-dockerhub:
runs-on: ubuntu-latest
env:
OWNER: ${{ github.repository_owner }}
IMAGE_NAME: charon
KEEP_DAYS: ${{ github.event.inputs.keep_days || '30' }}
KEEP_LAST_N: ${{ github.event.inputs.keep_last_n || '30' }}
DRY_RUN: ${{ github.event_name == 'pull_request' && 'true' || github.event.inputs.dry_run || 'false' }}
PROTECTED_REGEX: '["^v?[0-9]+\\.[0-9]+\\.[0-9]+$","^latest$","^main$","^develop$"]'
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Install tools
run: |
sudo apt-get update && sudo apt-get install -y jq curl
- name: Run Docker Hub prune
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
run: |
chmod +x scripts/prune-dockerhub.sh
./scripts/prune-dockerhub.sh 2>&1 | tee prune-dockerhub-${{ github.run_id }}.log
- name: Summarize Docker Hub results
if: always()
run: |
set -euo pipefail
SUMMARY_FILE=prune-summary-dockerhub.env
LOG_FILE=prune-dockerhub-${{ github.run_id }}.log
human() {
local bytes=${1:-0}
if [ -z "$bytes" ] || [ "$bytes" -eq 0 ]; then
echo "0 B"
return
fi
awk -v b="$bytes" 'BEGIN { split("B KiB MiB GiB TiB",u," "); i=0; x=b; while(x>1024){x/=1024;i++} printf "%0.2f %s", x, u[i+1] }'
}
if [ -f "$SUMMARY_FILE" ]; then
TOTAL_CANDIDATES=$(grep -E '^TOTAL_CANDIDATES=' "$SUMMARY_FILE" | cut -d= -f2 || echo 0)
TOTAL_CANDIDATES_BYTES=$(grep -E '^TOTAL_CANDIDATES_BYTES=' "$SUMMARY_FILE" | cut -d= -f2 || echo 0)
TOTAL_DELETED=$(grep -E '^TOTAL_DELETED=' "$SUMMARY_FILE" | cut -d= -f2 || echo 0)
TOTAL_DELETED_BYTES=$(grep -E '^TOTAL_DELETED_BYTES=' "$SUMMARY_FILE" | cut -d= -f2 || echo 0)
{
echo "## Docker Hub prune summary"
echo "- candidates: ${TOTAL_CANDIDATES} (≈ $(human "${TOTAL_CANDIDATES_BYTES}"))"
echo "- deleted: ${TOTAL_DELETED} (≈ $(human "${TOTAL_DELETED_BYTES}"))"
} >> "$GITHUB_STEP_SUMMARY"
else
deleted_bytes=$(grep -oE '\( *approx +[0-9]+ bytes\)' "$LOG_FILE" | sed -E 's/.*approx +([0-9]+) bytes.*/\1/' | awk '{s+=$1} END {print s+0}' || true)
deleted_count=$(grep -cE 'deleting |DRY RUN: would delete' "$LOG_FILE" || true)
{
echo "## Docker Hub prune summary"
echo "- deleted (approx): ${deleted_count} (≈ $(human "${deleted_bytes}"))"
} >> "$GITHUB_STEP_SUMMARY"
fi
- name: Upload Docker Hub prune artifacts
if: always()
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
with:
name: prune-dockerhub-log-${{ github.run_id }}
path: |
prune-dockerhub-${{ github.run_id }}.log
prune-summary-dockerhub.env
summarize:
runs-on: ubuntu-latest
needs: [prune-ghcr, prune-dockerhub]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
pattern: prune-*-log-${{ github.run_id }}
merge-multiple: true
- name: Combined summary
run: |
set -euo pipefail
human() {
local bytes=${1:-0}
if [ -z "$bytes" ] || [ "$bytes" -eq 0 ]; then
echo "0 B"
return
fi
awk -v b="$bytes" 'BEGIN { split("B KiB MiB GiB TiB",u," "); i=0; x=b; while(x>1024){x/=1024;i++} printf "%0.2f %s", x, u[i+1] }'
}
GHCR_CANDIDATES=0 GHCR_CANDIDATES_BYTES=0 GHCR_DELETED=0 GHCR_DELETED_BYTES=0
if [ -f prune-summary-ghcr.env ]; then
GHCR_CANDIDATES=$(grep -E '^TOTAL_CANDIDATES=' prune-summary-ghcr.env | cut -d= -f2 || echo 0)
GHCR_CANDIDATES_BYTES=$(grep -E '^TOTAL_CANDIDATES_BYTES=' prune-summary-ghcr.env | cut -d= -f2 || echo 0)
GHCR_DELETED=$(grep -E '^TOTAL_DELETED=' prune-summary-ghcr.env | cut -d= -f2 || echo 0)
GHCR_DELETED_BYTES=$(grep -E '^TOTAL_DELETED_BYTES=' prune-summary-ghcr.env | cut -d= -f2 || echo 0)
fi
HUB_CANDIDATES=0 HUB_CANDIDATES_BYTES=0 HUB_DELETED=0 HUB_DELETED_BYTES=0
if [ -f prune-summary-dockerhub.env ]; then
HUB_CANDIDATES=$(grep -E '^TOTAL_CANDIDATES=' prune-summary-dockerhub.env | cut -d= -f2 || echo 0)
HUB_CANDIDATES_BYTES=$(grep -E '^TOTAL_CANDIDATES_BYTES=' prune-summary-dockerhub.env | cut -d= -f2 || echo 0)
HUB_DELETED=$(grep -E '^TOTAL_DELETED=' prune-summary-dockerhub.env | cut -d= -f2 || echo 0)
HUB_DELETED_BYTES=$(grep -E '^TOTAL_DELETED_BYTES=' prune-summary-dockerhub.env | cut -d= -f2 || echo 0)
fi
TOTAL_CANDIDATES=$((GHCR_CANDIDATES + HUB_CANDIDATES))
TOTAL_CANDIDATES_BYTES=$((GHCR_CANDIDATES_BYTES + HUB_CANDIDATES_BYTES))
TOTAL_DELETED=$((GHCR_DELETED + HUB_DELETED))
TOTAL_DELETED_BYTES=$((GHCR_DELETED_BYTES + HUB_DELETED_BYTES))
{
echo "## Combined container prune summary"
echo ""
echo "| Registry | Candidates | Deleted | Space Reclaimed |"
echo "|----------|------------|---------|-----------------|"
echo "| GHCR | ${GHCR_CANDIDATES} | ${GHCR_DELETED} | $(human "${GHCR_DELETED_BYTES}") |"
echo "| Docker Hub | ${HUB_CANDIDATES} | ${HUB_DELETED} | $(human "${HUB_DELETED_BYTES}") |"
echo "| **Total** | **${TOTAL_CANDIDATES}** | **${TOTAL_DELETED}** | **$(human "${TOTAL_DELETED_BYTES}")** |"
} >> "$GITHUB_STEP_SUMMARY"
printf 'PRUNE_SUMMARY: candidates=%s candidates_bytes=%s deleted=%s deleted_bytes=%s\n' \
"${TOTAL_CANDIDATES}" "${TOTAL_CANDIDATES_BYTES}" "${TOTAL_DELETED}" "${TOTAL_DELETED_BYTES}"
echo "Total space reclaimed: $(human "${TOTAL_DELETED_BYTES}")"
+43 -4
View File
@@ -23,7 +23,11 @@ name: Docker Build, Publish & Test
on:
pull_request:
push:
branches: [main]
workflow_dispatch:
workflow_run:
workflows: ["Docker Lint"]
types: [completed]
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.head_ref || github.ref_name }}
@@ -38,7 +42,7 @@ env:
TRIGGER_HEAD_SHA: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_sha || github.sha }}
TRIGGER_REF: ${{ github.event_name == 'workflow_run' && format('refs/heads/{0}', github.event.workflow_run.head_branch) || github.ref }}
TRIGGER_HEAD_REF: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.head_ref }}
TRIGGER_PR_NUMBER: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.pull_requests[0].number || github.event.pull_request.number }}
TRIGGER_PR_NUMBER: ${{ github.event_name == 'workflow_run' && join(github.event.workflow_run.pull_requests.*.number, '') || github.event.pull_request.number }}
TRIGGER_ACTOR: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.actor.login || github.actor }}
jobs:
@@ -561,12 +565,13 @@ jobs:
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: 'trivy-results.sarif'
category: '.github/workflows/docker-build.yml:build-and-push'
token: ${{ secrets.GITHUB_TOKEN }}
# Generate SBOM (Software Bill of Materials) for supply chain security
# Only for production builds (main/development) - feature branches use downstream supply-chain-pr.yml
- name: Generate SBOM
uses: anchore/sbom-action@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2
uses: anchore/sbom-action@17ae1740179002c89186b61233e0f892c3118b11 # v0.23.0
if: env.TRIGGER_EVENT != 'pull_request' && steps.skip.outputs.skip_build != 'true' && steps.skip.outputs.is_feature_push != 'true'
with:
image: ${{ env.GHCR_REGISTRY }}/${{ env.IMAGE_NAME }}@${{ steps.build-and-push.outputs.digest }}
@@ -575,7 +580,7 @@ jobs:
# Create verifiable attestation for the SBOM
- name: Attest SBOM
uses: actions/attest-sbom@4651f806c01d8637787e274ac3bdf724ef169f34 # v3.0.0
uses: actions/attest-sbom@07e74fc4e78d1aad915e867f9a094073a9f71527 # v4.0.0
if: env.TRIGGER_EVENT != 'pull_request' && steps.skip.outputs.skip_build != 'true' && steps.skip.outputs.is_feature_push != 'true'
with:
subject-name: ${{ env.GHCR_REGISTRY }}/${{ env.IMAGE_NAME }}
@@ -702,13 +707,47 @@ jobs:
exit-code: '1' # Intended to block, but continued on error for now
continue-on-error: true
- name: Upload Trivy scan results
- name: Check Trivy PR SARIF exists
if: always()
id: trivy-pr-check
run: |
if [ -f trivy-pr-results.sarif ]; then
echo "exists=true" >> "$GITHUB_OUTPUT"
else
echo "exists=false" >> "$GITHUB_OUTPUT"
fi
- name: Upload Trivy scan results
if: always() && steps.trivy-pr-check.outputs.exists == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: 'trivy-pr-results.sarif'
category: 'docker-pr-image'
- name: Upload Trivy compatibility results (docker-build category)
if: always() && steps.trivy-pr-check.outputs.exists == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: 'trivy-pr-results.sarif'
category: '.github/workflows/docker-build.yml:build-and-push'
continue-on-error: true
- name: Upload Trivy compatibility results (docker-publish alias)
if: always() && steps.trivy-pr-check.outputs.exists == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: 'trivy-pr-results.sarif'
category: '.github/workflows/docker-publish.yml:build-and-push'
continue-on-error: true
- name: Upload Trivy compatibility results (nightly alias)
if: always() && steps.trivy-pr-check.outputs.exists == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: 'trivy-pr-results.sarif'
category: 'trivy-nightly'
continue-on-error: true
- name: Create scan summary
if: always()
run: |
+8 -9
View File
@@ -80,7 +80,6 @@ on:
default: false
type: boolean
pull_request:
push:
env:
NODE_VERSION: '20'
@@ -96,7 +95,7 @@ env:
CI_LOG_LEVEL: 'verbose'
concurrency:
group: e2e-split-${{ github.workflow }}-${{ github.ref }}-${{ github.event.pull_request.head.sha || github.sha }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
@@ -143,7 +142,7 @@ jobs:
- name: Set up Go
if: steps.resolve-image.outputs.image_source == 'build'
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version: ${{ env.GO_VERSION }}
cache: true
@@ -247,7 +246,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
@@ -448,7 +447,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
@@ -657,7 +656,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
@@ -878,7 +877,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
@@ -1082,7 +1081,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
@@ -1294,7 +1293,7 @@ jobs:
- name: Download Docker image artifact
if: needs.build.outputs.image_source == 'build'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8
with:
name: docker-image
+112 -6
View File
@@ -220,7 +220,7 @@ jobs:
echo "- ${{ env.GHCR_REGISTRY }}/${{ env.IMAGE_NAME }}:nightly@${{ steps.build.outputs.digest }}" >> "$GITHUB_STEP_SUMMARY"
- name: Generate SBOM
uses: anchore/sbom-action@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2
uses: anchore/sbom-action@17ae1740179002c89186b61233e0f892c3118b11 # v0.23.0
with:
image: ${{ env.GHCR_REGISTRY }}/${{ env.IMAGE_NAME }}:nightly@${{ steps.build.outputs.digest }}
format: cyclonedx-json
@@ -331,7 +331,7 @@ jobs:
run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> "$GITHUB_ENV"
- name: Download SBOM
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
name: sbom-nightly
@@ -355,10 +355,116 @@ jobs:
sarif_file: 'trivy-nightly.sarif'
category: 'trivy-nightly'
- name: Check for critical CVEs
- name: Security severity policy summary
run: |
if grep -q "CRITICAL" trivy-nightly.sarif; then
echo "❌ Critical vulnerabilities found in nightly build"
{
echo "## 🔐 Nightly Supply Chain Severity Policy"
echo ""
echo "- Blocking: Critical, High"
echo "- Medium: non-blocking by default (report + triage SLA)"
echo "- Policy file: .github/security-severity-policy.yml"
} >> "$GITHUB_STEP_SUMMARY"
- name: Check for Critical/High CVEs
run: |
set -euo pipefail
jq -e . trivy-nightly.sarif >/dev/null
CRITICAL_COUNT=$(jq -r '
[
.runs[] as $run
| ($run.tool.driver.rules // []) as $rules
| $run.results[]?
| . as $result
| (
(
if (($result.ruleIndex | type) == "number") then
($rules[$result.ruleIndex].properties["security-severity"] // empty)
else
empty
end
)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| .properties["security-severity"]
][0] // empty)
// empty
) as $securitySeverity
| (try ($securitySeverity | tonumber) catch empty) as $score
| select($score != null and $score >= 9.0)
] | length
' trivy-nightly.sarif)
HIGH_COUNT=$(jq -r '
[
.runs[] as $run
| ($run.tool.driver.rules // []) as $rules
| $run.results[]?
| . as $result
| (
(
if (($result.ruleIndex | type) == "number") then
($rules[$result.ruleIndex].properties["security-severity"] // empty)
else
empty
end
)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| .properties["security-severity"]
][0] // empty)
// empty
) as $securitySeverity
| (try ($securitySeverity | tonumber) catch empty) as $score
| select($score != null and $score >= 7.0 and $score < 9.0)
] | length
' trivy-nightly.sarif)
MEDIUM_COUNT=$(jq -r '
[
.runs[] as $run
| ($run.tool.driver.rules // []) as $rules
| $run.results[]?
| . as $result
| (
(
if (($result.ruleIndex | type) == "number") then
($rules[$result.ruleIndex].properties["security-severity"] // empty)
else
empty
end
)
// ([
$rules[]?
| select((.id // "") == ($result.ruleId // ""))
| .properties["security-severity"]
][0] // empty)
// empty
) as $securitySeverity
| (try ($securitySeverity | tonumber) catch empty) as $score
| select($score != null and $score >= 4.0 and $score < 7.0)
] | length
' trivy-nightly.sarif)
{
echo "- Structured SARIF counts: CRITICAL=${CRITICAL_COUNT}, HIGH=${HIGH_COUNT}, MEDIUM=${MEDIUM_COUNT}"
} >> "$GITHUB_STEP_SUMMARY"
if [ "$CRITICAL_COUNT" -gt 0 ]; then
echo "❌ Critical vulnerabilities found in nightly build (${CRITICAL_COUNT})"
exit 1
fi
echo "✅ No critical vulnerabilities found"
if [ "$HIGH_COUNT" -gt 0 ]; then
echo "❌ High vulnerabilities found in nightly build (${HIGH_COUNT})"
exit 1
fi
if [ "$MEDIUM_COUNT" -gt 0 ]; then
echo "::warning::Medium vulnerabilities found in nightly build (${MEDIUM_COUNT}). Non-blocking by policy; triage with SLA per .github/security-severity-policy.yml"
fi
echo "✅ No Critical/High vulnerabilities found"
+24 -1
View File
@@ -3,6 +3,8 @@ name: Quality Checks
on:
pull_request:
push:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -18,6 +20,27 @@ env:
GOTOOLCHAIN: auto
jobs:
auth-route-protection-contract:
name: Auth Route Protection Contract
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
ref: ${{ github.sha }}
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
- name: Run auth protection contract tests
run: |
set -euo pipefail
cd backend
go test ./internal/api/routes -run 'TestRegister_StateChangingRoutesRequireAuthentication|TestRegister_StateChangingRoutesDenyByDefaultWithExplicitAllowlist|TestRegister_AuthenticatedRoutes' -count=1 -v
codecov-trigger-parity-guard:
name: Codecov Trigger/Comment Parity Guard
runs-on: ubuntu-latest
@@ -113,7 +136,7 @@ jobs:
} >> "$GITHUB_ENV"
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
+1 -1
View File
@@ -45,7 +45,7 @@ jobs:
fi
- name: Set up Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: backend/go.sum
+222 -117
View File
@@ -4,18 +4,22 @@
name: Security Scan (PR)
on:
workflow_run:
workflows: ["Docker Build, Publish & Test"]
types: [completed]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to scan (optional)'
required: false
description: 'PR number to scan'
required: true
type: string
pull_request:
push:
branches: [main]
concurrency:
group: security-pr-${{ github.event.workflow_run.event || github.event_name }}-${{ github.event.workflow_run.head_branch || github.ref }}
group: security-pr-${{ github.event_name == 'workflow_run' && github.event.workflow_run.event || github.event_name }}-${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.ref }}
cancel-in-progress: true
jobs:
@@ -23,16 +27,18 @@ jobs:
name: Trivy Binary Scan
runs-on: ubuntu-latest
timeout-minutes: 10
# Run for: manual dispatch, PR builds, or any push builds from docker-build
# Run for manual dispatch, direct PR/push, or successful upstream workflow_run
if: >-
github.event_name == 'workflow_dispatch' ||
github.event_name == 'pull_request' ||
((github.event.workflow_run.event == 'push' || github.event.workflow_run.pull_requests[0].number != null) &&
(github.event.workflow_run.status != 'completed' || github.event.workflow_run.conclusion == 'success'))
github.event_name == 'push' ||
(github.event_name == 'workflow_run' &&
github.event.workflow_run.event == 'pull_request' &&
github.event.workflow_run.status == 'completed' &&
github.event.workflow_run.conclusion == 'success')
permissions:
contents: read
pull-requests: write
security-events: write
actions: read
@@ -41,27 +47,65 @@ jobs:
# actions/checkout v4.2.2
uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
ref: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_sha || github.sha }}
- name: Extract PR number from workflow_run
id: pr-info
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
# Manual dispatch - use input or fail gracefully
if [[ -n "${{ inputs.pr_number }}" ]]; then
echo "pr_number=${{ inputs.pr_number }}" >> "$GITHUB_OUTPUT"
echo "✅ Using manually provided PR number: ${{ inputs.pr_number }}"
else
echo "⚠️ No PR number provided for manual dispatch"
echo "pr_number=" >> "$GITHUB_OUTPUT"
fi
if [[ "${{ github.event_name }}" == "push" ]]; then
echo "pr_number=" >> "$GITHUB_OUTPUT"
echo "is_push=true" >> "$GITHUB_OUTPUT"
echo "✅ Push event detected; using local image path"
exit 0
fi
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "pr_number=${{ github.event.pull_request.number }}" >> "$GITHUB_OUTPUT"
echo "is_push=false" >> "$GITHUB_OUTPUT"
echo "✅ Pull request event detected: PR #${{ github.event.pull_request.number }}"
exit 0
fi
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
INPUT_PR_NUMBER="${{ inputs.pr_number }}"
if [[ -z "${INPUT_PR_NUMBER}" ]]; then
echo "❌ workflow_dispatch requires inputs.pr_number"
exit 1
fi
if [[ ! "${INPUT_PR_NUMBER}" =~ ^[0-9]+$ ]]; then
echo "❌ reason_category=invalid_input"
echo "reason=workflow_dispatch pr_number must be digits-only"
exit 1
fi
PR_NUMBER="${INPUT_PR_NUMBER}"
echo "pr_number=${PR_NUMBER}" >> "$GITHUB_OUTPUT"
echo "is_push=false" >> "$GITHUB_OUTPUT"
echo "✅ Using manually provided PR number: ${PR_NUMBER}"
exit 0
fi
if [[ "${{ github.event_name }}" == "workflow_run" ]]; then
if [[ "${{ github.event.workflow_run.event }}" != "pull_request" ]]; then
# Explicit contract validation happens in the dedicated guard step.
echo "pr_number=" >> "$GITHUB_OUTPUT"
echo "is_push=false" >> "$GITHUB_OUTPUT"
exit 0
fi
if [[ -n "${{ github.event.workflow_run.pull_requests[0].number || '' }}" ]]; then
echo "pr_number=${{ github.event.workflow_run.pull_requests[0].number }}" >> "$GITHUB_OUTPUT"
echo "is_push=false" >> "$GITHUB_OUTPUT"
echo "✅ Found PR number from workflow_run payload: ${{ github.event.workflow_run.pull_requests[0].number }}"
exit 0
fi
fi
# Extract PR number from context
HEAD_SHA="${{ github.event.workflow_run.head_sha || github.event.pull_request.head.sha || github.sha }}"
HEAD_SHA="${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_sha || github.event.pull_request.head.sha || github.sha }}"
echo "🔍 Looking for PR with head SHA: ${HEAD_SHA}"
# Query GitHub API for PR associated with this commit
@@ -73,21 +117,38 @@ jobs:
if [[ -n "${PR_NUMBER}" ]]; then
echo "pr_number=${PR_NUMBER}" >> "$GITHUB_OUTPUT"
echo "is_push=false" >> "$GITHUB_OUTPUT"
echo "✅ Found PR number: ${PR_NUMBER}"
else
echo "⚠️ Could not find PR number for SHA: ${HEAD_SHA}"
echo "pr_number=" >> "$GITHUB_OUTPUT"
echo " Could not determine PR number for workflow_run SHA: ${HEAD_SHA}"
exit 1
fi
# Check if this is a push event (not a PR)
if [[ "${{ github.event_name }}" == "push" || "${{ github.event.workflow_run.event }}" == "push" || -z "${PR_NUMBER}" ]]; then
HEAD_BRANCH="${{ github.event.workflow_run.head_branch || github.ref_name }}"
echo "is_push=true" >> "$GITHUB_OUTPUT"
echo "✅ Detected push build from branch: ${HEAD_BRANCH}"
else
echo "is_push=false" >> "$GITHUB_OUTPUT"
- name: Validate workflow_run trust boundary and event contract
if: github.event_name == 'workflow_run'
run: |
if [[ "${{ github.event.workflow_run.name }}" != "Docker Build, Publish & Test" ]]; then
echo "❌ reason_category=unexpected_upstream_workflow"
echo "workflow_name=${{ github.event.workflow_run.name }}"
exit 1
fi
if [[ "${{ github.event.workflow_run.event }}" != "pull_request" ]]; then
echo "❌ reason_category=unsupported_upstream_event"
echo "upstream_event=${{ github.event.workflow_run.event }}"
echo "run_id=${{ github.event.workflow_run.id }}"
exit 1
fi
if [[ "${{ github.event.workflow_run.head_repository.full_name }}" != "${{ github.repository }}" ]]; then
echo "❌ reason_category=untrusted_upstream_repository"
echo "upstream_head_repository=${{ github.event.workflow_run.head_repository.full_name }}"
echo "expected_repository=${{ github.repository }}"
exit 1
fi
echo "✅ workflow_run trust boundary and event contract validated"
- name: Build Docker image (Local)
if: github.event_name == 'push' || github.event_name == 'pull_request'
run: |
@@ -97,95 +158,149 @@ jobs:
- name: Check for PR image artifact
id: check-artifact
if: (steps.pr-info.outputs.pr_number != '' || steps.pr-info.outputs.is_push == 'true') && github.event_name != 'push' && github.event_name != 'pull_request'
if: github.event_name == 'workflow_run' || github.event_name == 'workflow_dispatch'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Determine artifact name based on event type
if [[ "${{ steps.pr-info.outputs.is_push }}" == "true" ]]; then
ARTIFACT_NAME="push-image"
else
PR_NUMBER="${{ steps.pr-info.outputs.pr_number }}"
ARTIFACT_NAME="pr-image-${PR_NUMBER}"
PR_NUMBER="${{ steps.pr-info.outputs.pr_number }}"
if [[ ! "${PR_NUMBER}" =~ ^[0-9]+$ ]]; then
echo "❌ reason_category=invalid_input"
echo "reason=Resolved PR number must be digits-only"
exit 1
fi
RUN_ID="${{ github.event.workflow_run.id }}"
ARTIFACT_NAME="pr-image-${PR_NUMBER}"
RUN_ID="${{ github.event_name == 'workflow_run' && github.event.workflow_run.id || '' }}"
echo "🔍 Checking for artifact: ${ARTIFACT_NAME}"
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
# For manual dispatch, find the most recent workflow run with this artifact
RUN_ID=$(gh api \
# Manual replay path: find latest successful docker-build pull_request run for this PR.
RUNS_JSON=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/${{ github.repository }}/actions/workflows/docker-build.yml/runs?status=success&per_page=10" \
--jq '.workflow_runs[0].id // empty' 2>/dev/null || echo "")
"/repos/${{ github.repository }}/actions/workflows/docker-build.yml/runs?event=pull_request&status=success&per_page=100" 2>&1)
RUNS_STATUS=$?
if [[ ${RUNS_STATUS} -ne 0 ]]; then
echo "❌ reason_category=api_error"
echo "reason=Failed to query workflow runs for PR lookup"
echo "upstream_run_id=unknown"
echo "artifact_name=${ARTIFACT_NAME}"
echo "api_output=${RUNS_JSON}"
exit 1
fi
RUN_ID=$(printf '%s' "${RUNS_JSON}" | jq -r --argjson pr "${PR_NUMBER}" '.workflow_runs[] | select((.pull_requests // []) | any(.number == $pr)) | .id' | head -n 1)
if [[ -z "${RUN_ID}" ]]; then
echo "⚠️ No successful workflow runs found"
echo "artifact_exists=false" >> "$GITHUB_OUTPUT"
exit 0
echo "❌ reason_category=not_found"
echo "reason=No successful docker-build pull_request run found for PR #${PR_NUMBER}"
echo "upstream_run_id=unknown"
echo "artifact_name=${ARTIFACT_NAME}"
exit 1
fi
elif [[ -z "${RUN_ID}" ]]; then
# If triggered by push/pull_request, RUN_ID is empty. Find recent run for this commit.
HEAD_SHA="${{ github.event.workflow_run.head_sha || github.event.pull_request.head.sha || github.sha }}"
echo "🔍 Searching for workflow run for SHA: ${HEAD_SHA}"
# Retry a few times as the run might be just starting or finishing
for i in {1..3}; do
RUN_ID=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/${{ github.repository }}/actions/workflows/docker-build.yml/runs?head_sha=${HEAD_SHA}&status=success&per_page=1" \
--jq '.workflow_runs[0].id // empty' 2>/dev/null || echo "")
if [[ -n "${RUN_ID}" ]]; then break; fi
echo "⏳ Waiting for workflow run to appear/complete... ($i/3)"
sleep 5
done
fi
echo "run_id=${RUN_ID}" >> "$GITHUB_OUTPUT"
# Check if the artifact exists in the workflow run
ARTIFACT_ID=$(gh api \
ARTIFACTS_JSON=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/${{ github.repository }}/actions/runs/${RUN_ID}/artifacts" \
--jq ".artifacts[] | select(.name == \"${ARTIFACT_NAME}\") | .id" 2>/dev/null || echo "")
"/repos/${{ github.repository }}/actions/runs/${RUN_ID}/artifacts" 2>&1)
ARTIFACTS_STATUS=$?
if [[ -n "${ARTIFACT_ID}" ]]; then
echo "artifact_exists=true" >> "$GITHUB_OUTPUT"
echo "artifact_id=${ARTIFACT_ID}" >> "$GITHUB_OUTPUT"
echo "✅ Found artifact: ${ARTIFACT_NAME} (ID: ${ARTIFACT_ID})"
else
echo "artifact_exists=false" >> "$GITHUB_OUTPUT"
echo "⚠️ Artifact not found: ${ARTIFACT_NAME}"
echo "️ This is expected for non-PR builds or if the image was not uploaded"
if [[ ${ARTIFACTS_STATUS} -ne 0 ]]; then
echo "❌ reason_category=api_error"
echo "reason=Failed to query artifacts for upstream run"
echo "upstream_run_id=${RUN_ID}"
echo "artifact_name=${ARTIFACT_NAME}"
echo "api_output=${ARTIFACTS_JSON}"
exit 1
fi
- name: Skip if no artifact
if: ((steps.pr-info.outputs.pr_number == '' && steps.pr-info.outputs.is_push != 'true') || steps.check-artifact.outputs.artifact_exists != 'true') && github.event_name != 'push' && github.event_name != 'pull_request'
run: |
echo "️ Skipping security scan - no PR image artifact available"
echo "This is expected for:"
echo " - Pushes to main/release branches"
echo " - PRs where Docker build failed"
echo " - Manual dispatch without PR number"
exit 0
ARTIFACT_ID=$(printf '%s' "${ARTIFACTS_JSON}" | jq -r --arg name "${ARTIFACT_NAME}" '.artifacts[] | select(.name == $name) | .id' | head -n 1)
if [[ -z "${ARTIFACT_ID}" ]]; then
echo "❌ reason_category=not_found"
echo "reason=Required artifact was not found"
echo "upstream_run_id=${RUN_ID}"
echo "artifact_name=${ARTIFACT_NAME}"
exit 1
fi
{
echo "artifact_exists=true"
echo "artifact_id=${ARTIFACT_ID}"
echo "artifact_name=${ARTIFACT_NAME}"
} >> "$GITHUB_OUTPUT"
echo "✅ Found artifact: ${ARTIFACT_NAME} (ID: ${ARTIFACT_ID})"
- name: Download PR image artifact
if: steps.check-artifact.outputs.artifact_exists == 'true'
if: github.event_name == 'workflow_run' || github.event_name == 'workflow_dispatch'
# actions/download-artifact v4.1.8
uses: actions/download-artifact@ac21fcf45e0aaee541c0f7030558bdad38d77d6c
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3
with:
name: ${{ steps.pr-info.outputs.is_push == 'true' && 'push-image' || format('pr-image-{0}', steps.pr-info.outputs.pr_number) }}
name: ${{ steps.check-artifact.outputs.artifact_name }}
run-id: ${{ steps.check-artifact.outputs.run_id }}
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Load Docker image
if: steps.check-artifact.outputs.artifact_exists == 'true'
if: github.event_name == 'workflow_run' || github.event_name == 'workflow_dispatch'
id: load-image
run: |
echo "📦 Loading Docker image..."
docker load < charon-pr-image.tar
echo "✅ Docker image loaded"
if [[ ! -r "charon-pr-image.tar" ]]; then
echo "❌ ERROR: Artifact image tar is missing or unreadable"
exit 1
fi
MANIFEST_TAGS=""
if tar -tf charon-pr-image.tar | grep -qx "manifest.json"; then
MANIFEST_TAGS=$(tar -xOf charon-pr-image.tar manifest.json 2>/dev/null | jq -r '.[]?.RepoTags[]?' 2>/dev/null | sed '/^$/d' || true)
else
echo "⚠️ manifest.json not found in artifact tar; will try docker-load-image-id fallback"
fi
LOAD_OUTPUT=$(docker load < charon-pr-image.tar 2>&1)
echo "${LOAD_OUTPUT}"
SOURCE_IMAGE_REF=""
SOURCE_RESOLUTION_MODE=""
while IFS= read -r tag; do
[[ -z "${tag}" ]] && continue
if docker image inspect "${tag}" >/dev/null 2>&1; then
SOURCE_IMAGE_REF="${tag}"
SOURCE_RESOLUTION_MODE="manifest_tag"
break
fi
done <<< "${MANIFEST_TAGS}"
if [[ -z "${SOURCE_IMAGE_REF}" ]]; then
LOAD_IMAGE_ID=$(printf '%s\n' "${LOAD_OUTPUT}" | sed -nE 's/^Loaded image ID: (sha256:[0-9a-f]+)$/\1/p' | head -n1)
if [[ -n "${LOAD_IMAGE_ID}" ]] && docker image inspect "${LOAD_IMAGE_ID}" >/dev/null 2>&1; then
SOURCE_IMAGE_REF="${LOAD_IMAGE_ID}"
SOURCE_RESOLUTION_MODE="load_image_id"
fi
fi
if [[ -z "${SOURCE_IMAGE_REF}" ]]; then
echo "❌ ERROR: Could not resolve a valid image reference from manifest tags or docker load image ID"
exit 1
fi
docker tag "${SOURCE_IMAGE_REF}" "charon:artifact"
{
echo "source_image_ref=${SOURCE_IMAGE_REF}"
echo "source_resolution_mode=${SOURCE_RESOLUTION_MODE}"
echo "image_ref=charon:artifact"
} >> "$GITHUB_OUTPUT"
echo "✅ Docker image resolved via ${SOURCE_RESOLUTION_MODE} and tagged as charon:artifact"
docker images | grep charon
- name: Extract charon binary from container
@@ -214,31 +329,10 @@ jobs:
exit 0
fi
# Normalize image name for reference
IMAGE_NAME=$(echo "${{ github.repository_owner }}/charon" | tr '[:upper:]' '[:lower:]')
if [[ "${{ steps.pr-info.outputs.is_push }}" == "true" ]]; then
BRANCH_NAME="${{ github.event.workflow_run.head_branch }}"
if [[ -z "${BRANCH_NAME}" ]]; then
echo "❌ ERROR: Branch name is empty for push build"
exit 1
fi
# Normalize branch name for Docker tag (replace / and other special chars with -)
# This matches docker/metadata-action behavior: type=ref,event=branch
TAG_SAFE_BRANCH="${BRANCH_NAME//\//-}"
IMAGE_REF="ghcr.io/${IMAGE_NAME}:${TAG_SAFE_BRANCH}"
elif [[ -n "${{ steps.pr-info.outputs.pr_number }}" ]]; then
IMAGE_REF="ghcr.io/${IMAGE_NAME}:pr-${{ steps.pr-info.outputs.pr_number }}"
else
echo "❌ ERROR: Cannot determine image reference"
echo " - is_push: ${{ steps.pr-info.outputs.is_push }}"
echo " - pr_number: ${{ steps.pr-info.outputs.pr_number }}"
echo " - branch: ${{ github.event.workflow_run.head_branch }}"
exit 1
fi
# Validate the image reference format
if [[ ! "${IMAGE_REF}" =~ ^ghcr\.io/[a-z0-9_-]+/[a-z0-9_-]+:[a-zA-Z0-9._-]+$ ]]; then
echo "❌ ERROR: Invalid image reference format: ${IMAGE_REF}"
# For workflow_run artifact path, always use locally tagged image from loaded artifact.
IMAGE_REF="${{ steps.load-image.outputs.image_ref }}"
if [[ -z "${IMAGE_REF}" ]]; then
echo "❌ ERROR: Loaded artifact image reference is empty"
exit 1
fi
@@ -268,7 +362,7 @@ jobs:
- name: Run Trivy filesystem scan (SARIF output)
if: steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request'
# aquasecurity/trivy-action v0.33.1
uses: aquasecurity/trivy-action@e368e328979b113139d6f9068e03accaed98a518
uses: aquasecurity/trivy-action@1bd062560b422f5944df1de50abd05162bea079e
with:
scan-type: 'fs'
scan-ref: ${{ steps.extract.outputs.binary_path }}
@@ -277,19 +371,30 @@ jobs:
severity: 'CRITICAL,HIGH,MEDIUM'
continue-on-error: true
- name: Check Trivy SARIF output exists
if: always() && (steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request')
id: trivy-sarif-check
run: |
if [[ -f trivy-binary-results.sarif ]]; then
echo "exists=true" >> "$GITHUB_OUTPUT"
else
echo "exists=false" >> "$GITHUB_OUTPUT"
echo "️ No Trivy SARIF output found; skipping SARIF/artifact upload steps"
fi
- name: Upload Trivy SARIF to GitHub Security
if: steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request'
if: always() && steps.trivy-sarif-check.outputs.exists == 'true'
# github/codeql-action v4
uses: github/codeql-action/upload-sarif@cb4e075f119f8bccbc942d49655b2cd4dc6e615a
uses: github/codeql-action/upload-sarif@b0ed4dedcb6dac75e55f599c0ac323404c92645a
with:
sarif_file: 'trivy-binary-results.sarif'
category: ${{ steps.pr-info.outputs.is_push == 'true' && format('security-scan-{0}', github.event.workflow_run.head_branch) || format('security-scan-pr-{0}', steps.pr-info.outputs.pr_number) }}
category: ${{ steps.pr-info.outputs.is_push == 'true' && format('security-scan-{0}', github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.ref_name) || format('security-scan-pr-{0}', steps.pr-info.outputs.pr_number) }}
continue-on-error: true
- name: Run Trivy filesystem scan (fail on CRITICAL/HIGH)
if: steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request'
# aquasecurity/trivy-action v0.33.1
uses: aquasecurity/trivy-action@e368e328979b113139d6f9068e03accaed98a518
uses: aquasecurity/trivy-action@1bd062560b422f5944df1de50abd05162bea079e
with:
scan-type: 'fs'
scan-ref: ${{ steps.extract.outputs.binary_path }}
@@ -298,11 +403,11 @@ jobs:
exit-code: '1'
- name: Upload scan artifacts
if: always() && (steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request')
if: always() && steps.trivy-sarif-check.outputs.exists == 'true'
# actions/upload-artifact v4.4.3
uses: actions/upload-artifact@47309c993abb98030a35d55ef7ff34b7fa1074b5
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f
with:
name: ${{ steps.pr-info.outputs.is_push == 'true' && format('security-scan-{0}', github.event.workflow_run.head_branch) || format('security-scan-pr-{0}', steps.pr-info.outputs.pr_number) }}
name: ${{ steps.pr-info.outputs.is_push == 'true' && format('security-scan-{0}', github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.ref_name) || format('security-scan-pr-{0}', steps.pr-info.outputs.pr_number) }}
path: |
trivy-binary-results.sarif
retention-days: 14
@@ -312,7 +417,7 @@ jobs:
run: |
{
if [[ "${{ steps.pr-info.outputs.is_push }}" == "true" ]]; then
echo "## 🔒 Security Scan Results - Branch: ${{ github.event.workflow_run.head_branch }}"
echo "## 🔒 Security Scan Results - Branch: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.ref_name }}"
else
echo "## 🔒 Security Scan Results - PR #${{ steps.pr-info.outputs.pr_number }}"
fi
+34 -4
View File
@@ -11,6 +11,8 @@ on:
type: string
pull_request:
push:
branches:
- main
concurrency:
group: supply-chain-pr-${{ github.event.workflow_run.event || github.event_name }}-${{ github.event.workflow_run.head_branch || github.ref }}
@@ -264,7 +266,7 @@ jobs:
# Generate SBOM using official Anchore action (auto-updated by Renovate)
- name: Generate SBOM
if: steps.set-target.outputs.image_name != ''
uses: anchore/sbom-action@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2
uses: anchore/sbom-action@17ae1740179002c89186b61233e0f892c3118b11 # v0.23.0
id: sbom
with:
image: ${{ steps.set-target.outputs.image_name }}
@@ -337,6 +339,27 @@ jobs:
echo " Low: ${LOW_COUNT}"
echo " Total: ${TOTAL_COUNT}"
- name: Security severity policy summary
if: steps.set-target.outputs.image_name != ''
run: |
CRITICAL_COUNT="${{ steps.vuln-summary.outputs.critical_count }}"
HIGH_COUNT="${{ steps.vuln-summary.outputs.high_count }}"
MEDIUM_COUNT="${{ steps.vuln-summary.outputs.medium_count }}"
{
echo "## 🔐 Supply Chain Severity Policy"
echo ""
echo "- Blocking: Critical, High"
echo "- Medium: non-blocking by default (report + triage SLA)"
echo "- Policy file: .github/security-severity-policy.yml"
echo ""
echo "Current scan counts: Critical=${CRITICAL_COUNT}, High=${HIGH_COUNT}, Medium=${MEDIUM_COUNT}"
} >> "$GITHUB_STEP_SUMMARY"
if [[ "${MEDIUM_COUNT}" -gt 0 ]]; then
echo "::warning::${MEDIUM_COUNT} medium vulnerabilities found. Non-blocking by policy; create/maintain triage issue with SLA per .github/security-severity-policy.yml"
fi
- name: Upload SARIF to GitHub Security
if: steps.check-artifact.outputs.artifact_found == 'true'
uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4
@@ -348,7 +371,7 @@ jobs:
- name: Upload supply chain artifacts
if: steps.set-target.outputs.image_name != ''
# actions/upload-artifact v4.6.0
uses: actions/upload-artifact@47309c993abb98030a35d55ef7ff34b7fa1074b5
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f
with:
name: ${{ steps.pr-number.outputs.is_push == 'true' && format('supply-chain-{0}', steps.sanitize.outputs.branch) || format('supply-chain-pr-{0}', steps.pr-number.outputs.pr_number) }}
path: |
@@ -433,10 +456,11 @@ jobs:
echo "✅ PR comment posted"
- name: Fail on critical vulnerabilities
- name: Fail on Critical/High vulnerabilities
if: steps.set-target.outputs.image_name != ''
run: |
CRITICAL_COUNT="${{ steps.vuln-summary.outputs.critical_count }}"
HIGH_COUNT="${{ steps.vuln-summary.outputs.high_count }}"
if [[ "${CRITICAL_COUNT}" -gt 0 ]]; then
echo "🚨 Found ${CRITICAL_COUNT} CRITICAL vulnerabilities!"
@@ -444,4 +468,10 @@ jobs:
exit 1
fi
echo "✅ No critical vulnerabilities found"
if [[ "${HIGH_COUNT}" -gt 0 ]]; then
echo "🚨 Found ${HIGH_COUNT} HIGH vulnerabilities!"
echo "Please review the vulnerability report and address high severity issues before merging."
exit 1
fi
echo "✅ No Critical/High vulnerabilities found"
+1 -1
View File
@@ -119,7 +119,7 @@ jobs:
# Generate SBOM using official Anchore action (auto-updated by Renovate)
- name: Generate and Verify SBOM
if: steps.image-check.outputs.exists == 'true'
uses: anchore/sbom-action@28d71544de8eaf1b958d335707167c5f783590ad # v0.22.2
uses: anchore/sbom-action@17ae1740179002c89186b61233e0f892c3118b11 # v0.23.0
with:
image: ghcr.io/${{ github.repository_owner }}/charon:${{ steps.tag.outputs.tag }}
format: cyclonedx-json
+156
View File
@@ -815,6 +815,162 @@
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Non-Security Shards 1/4-4/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=1 npx playwright test --project=chromium --shard=1/4 --output=playwright-output/chromium-shard-1 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=2 npx playwright test --project=chromium --shard=2/4 --output=playwright-output/chromium-shard-2 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=3 npx playwright test --project=chromium --shard=3/4 --output=playwright-output/chromium-shard-3 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=4 npx playwright test --project=chromium --shard=4/4 --output=playwright-output/chromium-shard-4 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Non-Security Shard 1/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=1 npx playwright test --project=chromium --shard=1/4 --output=playwright-output/chromium-shard-1 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Non-Security Shard 2/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=2 npx playwright test --project=chromium --shard=2/4 --output=playwright-output/chromium-shard-2 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Non-Security Shard 3/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=3 npx playwright test --project=chromium --shard=3/4 --output=playwright-output/chromium-shard-3 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Non-Security Shard 4/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=4 npx playwright test --project=chromium --shard=4/4 --output=playwright-output/chromium-shard-4 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Non-Security Shards 1/4-4/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=1 npx playwright test --project=webkit --shard=1/4 --output=playwright-output/webkit-shard-1 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=2 npx playwright test --project=webkit --shard=2/4 --output=playwright-output/webkit-shard-2 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=3 npx playwright test --project=webkit --shard=3/4 --output=playwright-output/webkit-shard-3 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks && cd /projects/Charon && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=4 npx playwright test --project=webkit --shard=4/4 --output=playwright-output/webkit-shard-4 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Non-Security Shard 1/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=1 npx playwright test --project=webkit --shard=1/4 --output=playwright-output/webkit-shard-1 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Non-Security Shard 2/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=2 npx playwright test --project=webkit --shard=2/4 --output=playwright-output/webkit-shard-2 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Non-Security Shard 3/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=3 npx playwright test --project=webkit --shard=3/4 --output=playwright-output/webkit-shard-3 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Non-Security Shard 4/4",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=false PLAYWRIGHT_SKIP_SECURITY_DEPS=1 TEST_WORKER_INDEX=4 npx playwright test --project=webkit --shard=4/4 --output=playwright-output/webkit-shard-4 tests/core tests/dns-provider-crud.spec.ts tests/dns-provider-types.spec.ts tests/integration tests/manual-dns-provider.spec.ts tests/monitoring tests/settings tests/tasks",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (Chromium) - Security Suite",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=true PLAYWRIGHT_SKIP_SECURITY_DEPS=0 npx playwright test --project=security-tests --output=playwright-output/chromium-security tests/security",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (FireFox) - Security Suite",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=true PLAYWRIGHT_SKIP_SECURITY_DEPS=0 npx playwright test --project=firefox --output=playwright-output/firefox-security tests/security",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright (WebKit) - Security Suite",
"type": "shell",
"command": "cd /projects/Charon && if [ -f .env ]; then set -a; . ./.env; set +a; fi && : \"${CHARON_EMERGENCY_TOKEN:?CHARON_EMERGENCY_TOKEN is required (set it in /projects/Charon/.env)}\" && CI=true PLAYWRIGHT_BASE_URL=http://127.0.0.1:8080 CHARON_SECURITY_TESTS_ENABLED=true PLAYWRIGHT_SKIP_SECURITY_DEPS=0 npx playwright test --project=webkit --output=playwright-output/webkit-security tests/security",
"group": "test",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated",
"close": false
}
},
{
"label": "Test: E2E Playwright with Coverage",
"type": "shell",
+1 -1
View File
@@ -68,7 +68,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
# ---- Frontend Builder ----
# Build the frontend using the BUILDPLATFORM to avoid arm64 musl Rollup native issues
# renovate: datasource=docker depName=node
FROM --platform=$BUILDPLATFORM node:24.13.1-alpine AS frontend-builder
FROM --platform=$BUILDPLATFORM node:24.14.0-alpine AS frontend-builder
WORKDIR /app/frontend
# Copy frontend package files
+13
View File
@@ -94,6 +94,19 @@ services:
retries: 3
start_period: 40s
```
> **Docker Socket Access:** Charon runs as a non-root user. If you mount the Docker socket for container discovery, the container needs permission to read it. Find your socket's group ID and add it to the compose file:
>
> ```bash
> stat -c '%g' /var/run/docker.sock
> ```
>
> Then add `group_add: ["<gid>"]` under your service (replace `<gid>` with the number from the command above). For example, if the result is `998`:
>
> ```yaml
> group_add:
> - "998"
> ```
### 2️⃣ Generate encryption key:
```bash
openssl rand -base64 32
+4 -4
View File
@@ -25,11 +25,10 @@ We take security seriously. If you discover a security vulnerability in Charon,
- Impact assessment
- Suggested fix (if applicable)
**Alternative Method**: Email
**Alternative Method**: GitHub Issues (Public)
- Send to: `security@charon.dev` (if configured)
- Use PGP encryption (key available below, if applicable)
- Include same information as GitHub advisory
1. Go to <https://github.com/Wikid82/Charon/issues>
2. Create a new issue with the same information as above
### What to Include
@@ -125,6 +124,7 @@ For complete technical details, see:
### Infrastructure Security
- **Non-root by default**: Charon runs as an unprivileged user (`charon`, uid 1000) inside the container. Docker socket access is granted via a minimal supplemental group matching the host socket's GID—never by running as root. If the socket GID is `0` (root group), Charon requires explicit opt-in before granting access.
- **Container isolation**: Docker-based deployment
- **Minimal attack surface**: Alpine Linux base image
- **Dependency scanning**: Regular Trivy and govulncheck scans
+1 -1
View File
@@ -12,7 +12,7 @@ linters:
- ineffassign # Ineffectual assignments
- unused # Unused code detection
- gosec # Security checks (critical issues only)
linters-settings:
settings:
govet:
enable:
- shadow
+2 -2
View File
@@ -1,5 +1,5 @@
# golangci-lint configuration
version: 2
version: "2"
run:
timeout: 5m
tests: true
@@ -14,7 +14,7 @@ linters:
- staticcheck
- unused
- errcheck
linters-settings:
settings:
gocritic:
enabled-tags:
- diagnostic
+1 -1
View File
@@ -260,7 +260,7 @@ func main() {
}
// Register import handler with config dependencies
routes.RegisterImportHandler(router, db, cfg.CaddyBinary, cfg.ImportDir, cfg.ImportCaddyfile)
routes.RegisterImportHandler(router, db, cfg, cfg.CaddyBinary, cfg.ImportDir, cfg.ImportCaddyfile)
// Check for mounted Caddyfile on startup
if err := handlers.CheckMountedImport(db, cfg.ImportCaddyfile, cfg.CaddyBinary, cfg.ImportDir); err != nil {
+2 -1
View File
@@ -311,7 +311,8 @@ func TestMain_DefaultStartupGracefulShutdown_Subprocess(t *testing.T) {
if err != nil {
t.Fatalf("find free http port: %v", err)
}
if err := os.MkdirAll(filepath.Dir(dbPath), 0o750); err != nil {
err = os.MkdirAll(filepath.Dir(dbPath), 0o750)
if err != nil {
t.Fatalf("mkdir db dir: %v", err)
}
+4 -2
View File
@@ -64,11 +64,13 @@ func main() {
jsonOutPath := resolvePath(repoRoot, *jsonOutFlag)
mdOutPath := resolvePath(repoRoot, *mdOutFlag)
if err := assertFileExists(backendCoveragePath, "backend coverage file"); err != nil {
err = assertFileExists(backendCoveragePath, "backend coverage file")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := assertFileExists(frontendCoveragePath, "frontend coverage file"); err != nil {
err = assertFileExists(frontendCoveragePath, "frontend coverage file")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
+6 -3
View File
@@ -235,7 +235,8 @@ func TestGitDiffAndWriters(t *testing.T) {
t.Fatalf("expected empty diff for HEAD...HEAD, got: %q", diffContent)
}
if _, err := gitDiff(repoRoot, "bad-baseline"); err == nil {
_, err = gitDiff(repoRoot, "bad-baseline")
if err == nil {
t.Fatal("expected gitDiff failure for invalid baseline")
}
@@ -263,7 +264,8 @@ func TestGitDiffAndWriters(t *testing.T) {
}
jsonPath := filepath.Join(t.TempDir(), "report.json")
if err := writeJSON(jsonPath, report); err != nil {
err = writeJSON(jsonPath, report)
if err != nil {
t.Fatalf("writeJSON should succeed: %v", err)
}
// #nosec G304 -- Test reads artifact path created by this test.
@@ -276,7 +278,8 @@ func TestGitDiffAndWriters(t *testing.T) {
}
markdownPath := filepath.Join(t.TempDir(), "report.md")
if err := writeMarkdown(markdownPath, report, "backend/coverage.txt", "frontend/coverage/lcov.info"); err != nil {
err = writeMarkdown(markdownPath, report, "backend/coverage.txt", "frontend/coverage/lcov.info")
if err != nil {
t.Fatalf("writeMarkdown should succeed: %v", err)
}
// #nosec G304 -- Test reads artifact path created by this test.
+1 -1
View File
@@ -17,7 +17,7 @@ require (
github.com/sirupsen/logrus v1.9.4
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.48.0
golang.org/x/net v0.50.0
golang.org/x/net v0.51.0
golang.org/x/text v0.34.0
golang.org/x/time v0.14.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
+2
View File
@@ -200,6 +200,8 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
@@ -0,0 +1,124 @@
//go:build integration
// +build integration
package integration
import (
"context"
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"github.com/Wikid82/charon/backend/internal/notifications"
)
func TestNotificationHTTPWrapperIntegration_RetriesOn429AndSucceeds(t *testing.T) {
t.Parallel()
var calls int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
current := atomic.AddInt32(&calls, 1)
if current == 1 {
w.WriteHeader(http.StatusTooManyRequests)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(`{"ok":true}`))
}))
defer server.Close()
wrapper := notifications.NewNotifyHTTPWrapper()
result, err := wrapper.Send(context.Background(), notifications.HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err != nil {
t.Fatalf("expected retry success, got error: %v", err)
}
if result.Attempts != 2 {
t.Fatalf("expected 2 attempts, got %d", result.Attempts)
}
}
func TestNotificationHTTPWrapperIntegration_DoesNotRetryOn400(t *testing.T) {
t.Parallel()
var calls int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&calls, 1)
w.WriteHeader(http.StatusBadRequest)
}))
defer server.Close()
wrapper := notifications.NewNotifyHTTPWrapper()
_, err := wrapper.Send(context.Background(), notifications.HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil {
t.Fatalf("expected non-retryable 400 error")
}
if atomic.LoadInt32(&calls) != 1 {
t.Fatalf("expected one request attempt, got %d", calls)
}
}
func TestNotificationHTTPWrapperIntegration_RejectsTokenizedQueryWithoutEcho(t *testing.T) {
t.Parallel()
wrapper := notifications.NewNotifyHTTPWrapper()
secret := "pr1-secret-token-value"
_, err := wrapper.Send(context.Background(), notifications.HTTPWrapperRequest{
URL: "http://example.com/hook?token=" + secret,
Body: []byte(`{"message":"hello"}`),
})
if err == nil {
t.Fatalf("expected tokenized query rejection")
}
if !strings.Contains(err.Error(), "query authentication is not allowed") {
t.Fatalf("expected sanitized query-auth rejection, got: %v", err)
}
if strings.Contains(err.Error(), secret) {
t.Fatalf("error must not echo secret token")
}
}
func TestNotificationHTTPWrapperIntegration_HeaderAllowlistSafety(t *testing.T) {
t.Parallel()
var seenAuthHeader string
var seenCookieHeader string
var seenGotifyKey string
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
seenAuthHeader = r.Header.Get("Authorization")
seenCookieHeader = r.Header.Get("Cookie")
seenGotifyKey = r.Header.Get("X-Gotify-Key")
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
wrapper := notifications.NewNotifyHTTPWrapper()
_, err := wrapper.Send(context.Background(), notifications.HTTPWrapperRequest{
URL: server.URL,
Headers: map[string]string{
"Authorization": "Bearer should-not-leak",
"Cookie": "session=should-not-leak",
"X-Gotify-Key": "allowed-token",
},
Body: []byte(`{"message":"hello"}`),
})
if err != nil {
t.Fatalf("expected success, got error: %v", err)
}
if seenAuthHeader != "" {
t.Fatalf("authorization header must be stripped")
}
if seenCookieHeader != "" {
t.Fatalf("cookie header must be stripped")
}
if seenGotifyKey != "allowed-token" {
t.Fatalf("expected X-Gotify-Key to pass through")
}
}
@@ -170,6 +170,7 @@ func TestSecurityHandler_UpdateConfig_ApplyCaddyError(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("PUT", "/security/config", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
@@ -190,6 +191,7 @@ func TestSecurityHandler_GenerateBreakGlass_Error(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/security/breakglass", http.NoBody)
h.GenerateBreakGlass(c)
@@ -252,6 +254,7 @@ func TestSecurityHandler_UpsertRuleSet_Error(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/security/rulesets", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
@@ -277,6 +280,7 @@ func TestSecurityHandler_CreateDecision_LogError(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/security/decisions", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
@@ -297,6 +301,7 @@ func TestSecurityHandler_DeleteRuleSet_Error(t *testing.T) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Params = gin.Params{{Key: "id", Value: "999"}}
h.DeleteRuleSet(c)
@@ -127,18 +127,20 @@ func isLocalRequest(c *gin.Context) bool {
// setSecureCookie sets an auth cookie with security best practices
// - HttpOnly: prevents JavaScript access (XSS protection)
// - Secure: derived from request scheme to allow HTTP/IP logins when needed
// - Secure: true for HTTPS; false only for local non-HTTPS loopback flows
// - SameSite: Strict for HTTPS, Lax for HTTP/IP to allow forward-auth redirects
func setSecureCookie(c *gin.Context, name, value string, maxAge int) {
scheme := requestScheme(c)
secure := scheme == "https"
secure := true
sameSite := http.SameSiteStrictMode
if scheme != "https" {
sameSite = http.SameSiteLaxMode
if isLocalRequest(c) {
secure = false
}
}
if isLocalRequest(c) {
secure = false
sameSite = http.SameSiteLaxMode
}
@@ -152,7 +154,7 @@ func setSecureCookie(c *gin.Context, name, value string, maxAge int) {
maxAge, // maxAge in seconds
"/", // path
domain, // domain (empty = current host)
secure, // secure (HTTPS only in production)
secure, // secure (always true)
true, // httpOnly (no JS access)
)
}
@@ -94,10 +94,28 @@ func TestSetSecureCookie_HTTP_Lax(t *testing.T) {
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
c := cookies[0]
assert.False(t, c.Secure)
assert.True(t, c.Secure)
assert.Equal(t, http.SameSiteLaxMode, c.SameSite)
}
func TestSetSecureCookie_HTTP_Loopback_Insecure(t *testing.T) {
t.Parallel()
gin.SetMode(gin.TestMode)
recorder := httptest.NewRecorder()
ctx, _ := gin.CreateTestContext(recorder)
req := httptest.NewRequest("POST", "http://127.0.0.1:8080/login", http.NoBody)
req.Host = "127.0.0.1:8080"
req.Header.Set("X-Forwarded-Proto", "http")
ctx.Request = req
setSecureCookie(ctx, "auth_token", "abc", 60)
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
cookie := cookies[0]
assert.False(t, cookie.Secure)
assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite)
}
func TestSetSecureCookie_ForwardedHTTPS_LocalhostForcesInsecure(t *testing.T) {
t.Parallel()
gin.SetMode(gin.TestMode)
@@ -115,7 +133,7 @@ func TestSetSecureCookie_ForwardedHTTPS_LocalhostForcesInsecure(t *testing.T) {
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
cookie := cookies[0]
assert.False(t, cookie.Secure)
assert.True(t, cookie.Secure)
assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite)
}
@@ -136,7 +154,7 @@ func TestSetSecureCookie_ForwardedHTTPS_LoopbackForcesInsecure(t *testing.T) {
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
cookie := cookies[0]
assert.False(t, cookie.Secure)
assert.True(t, cookie.Secure)
assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite)
}
@@ -158,7 +176,7 @@ func TestSetSecureCookie_ForwardedHostLocalhostForcesInsecure(t *testing.T) {
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
cookie := cookies[0]
assert.False(t, cookie.Secure)
assert.True(t, cookie.Secure)
assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite)
}
@@ -180,7 +198,7 @@ func TestSetSecureCookie_OriginLoopbackForcesInsecure(t *testing.T) {
cookies := recorder.Result().Cookies()
require.Len(t, cookies, 1)
cookie := cookies[0]
assert.False(t, cookie.Secure)
assert.True(t, cookie.Secure)
assert.Equal(t, http.SameSiteLaxMode, cookie.SameSite)
}
@@ -71,10 +71,14 @@ func (h *DockerHandler) ListContainers(c *gin.Context) {
if err != nil {
var unavailableErr *services.DockerUnavailableError
if errors.As(err, &unavailableErr) {
details := unavailableErr.Details()
if details == "" {
details = "Cannot connect to Docker. Please ensure Docker is running and the socket is accessible (e.g., /var/run/docker.sock is mounted)."
}
log.WithFields(map[string]any{"server_id": util.SanitizeForLog(serverID), "host": util.SanitizeForLog(host), "error": util.SanitizeForLog(err.Error())}).Warn("docker unavailable")
c.JSON(http.StatusServiceUnavailable, gin.H{
"error": "Docker daemon unavailable",
"details": "Cannot connect to Docker. Please ensure Docker is running and the socket is accessible (e.g., /var/run/docker.sock is mounted).",
"details": details,
})
return
}
@@ -63,7 +63,7 @@ func TestDockerHandler_ListContainers_DockerUnavailableMappedTo503(t *testing.T)
gin.SetMode(gin.TestMode)
router := gin.New()
dockerSvc := &fakeDockerService{err: services.NewDockerUnavailableError(errors.New("no docker socket"))}
dockerSvc := &fakeDockerService{err: services.NewDockerUnavailableError(errors.New("no docker socket"), "Local Docker socket is mounted but not accessible by current process")}
remoteSvc := &fakeRemoteServerService{}
h := NewDockerHandler(dockerSvc, remoteSvc)
@@ -78,7 +78,7 @@ func TestDockerHandler_ListContainers_DockerUnavailableMappedTo503(t *testing.T)
assert.Contains(t, w.Body.String(), "Docker daemon unavailable")
// Verify the new details field is included in the response
assert.Contains(t, w.Body.String(), "details")
assert.Contains(t, w.Body.String(), "Docker is running")
assert.Contains(t, w.Body.String(), "not accessible by current process")
}
func TestDockerHandler_ListContainers_ServerIDResolvesToTCPHost(t *testing.T) {
@@ -360,3 +360,47 @@ func TestDockerHandler_ListContainers_GenericError(t *testing.T) {
})
}
}
func TestDockerHandler_ListContainers_503FallbackDetailsWhenEmpty(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
dockerSvc := &fakeDockerService{err: services.NewDockerUnavailableError(errors.New("socket error"))}
remoteSvc := &fakeRemoteServerService{}
h := NewDockerHandler(dockerSvc, remoteSvc)
api := router.Group("/api/v1")
h.RegisterRoutes(api)
req := httptest.NewRequest(http.MethodGet, "/api/v1/docker/containers", http.NoBody)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusServiceUnavailable, w.Code)
assert.Contains(t, w.Body.String(), "Docker daemon unavailable")
assert.Contains(t, w.Body.String(), "docker.sock is mounted")
}
func TestDockerHandler_ListContainers_503DetailsWithGroupGuidance(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
groupDetails := `Local Docker socket is mounted but not accessible by current process (uid=1000 gid=1000). Process groups (1000) do not include socket gid 988; run container with matching supplemental group (e.g., --group-add 988 or compose group_add: ["988"]).`
dockerSvc := &fakeDockerService{
err: services.NewDockerUnavailableError(errors.New("EACCES"), groupDetails),
}
remoteSvc := &fakeRemoteServerService{}
h := NewDockerHandler(dockerSvc, remoteSvc)
api := router.Group("/api/v1")
h.RegisterRoutes(api)
req := httptest.NewRequest(http.MethodGet, "/api/v1/docker/containers?host=local", http.NoBody)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusServiceUnavailable, w.Code)
assert.Contains(t, w.Body.String(), "Docker daemon unavailable")
assert.Contains(t, w.Body.String(), "--group-add 988")
assert.Contains(t, w.Body.String(), "group_add")
}
@@ -31,6 +31,7 @@ var defaultFlags = []string{
"feature.notifications.engine.notify_v1.enabled",
"feature.notifications.service.discord.enabled",
"feature.notifications.service.gotify.enabled",
"feature.notifications.service.webhook.enabled",
"feature.notifications.legacy.fallback_enabled",
"feature.notifications.security_provider_events.enabled", // Blocker 3: Add security_provider_events gate
}
@@ -42,6 +43,7 @@ var defaultFlagValues = map[string]bool{
"feature.notifications.engine.notify_v1.enabled": false,
"feature.notifications.service.discord.enabled": false,
"feature.notifications.service.gotify.enabled": false,
"feature.notifications.service.webhook.enabled": false,
"feature.notifications.legacy.fallback_enabled": false,
"feature.notifications.security_provider_events.enabled": false, // Blocker 3: Default disabled for this stage
}
@@ -93,6 +93,10 @@ func (h *ImportHandler) RegisterRoutes(router *gin.RouterGroup) {
// GetStatus returns current import session status.
func (h *ImportHandler) GetStatus(c *gin.Context) {
if !requireAuthenticatedAdmin(c) {
return
}
var session models.ImportSession
err := h.db.Where("status IN ?", []string{"pending", "reviewing"}).
Order("created_at DESC").
@@ -155,6 +159,10 @@ func (h *ImportHandler) GetStatus(c *gin.Context) {
// GetPreview returns parsed hosts and conflicts for review.
func (h *ImportHandler) GetPreview(c *gin.Context) {
if !requireAuthenticatedAdmin(c) {
return
}
var session models.ImportSession
err := h.db.Where("status IN ?", []string{"pending", "reviewing"}).
Order("created_at DESC").
@@ -3,6 +3,7 @@ package handlers
import (
"bytes"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
@@ -14,6 +15,7 @@ import (
"github.com/Wikid82/charon/backend/internal/models"
"github.com/Wikid82/charon/backend/internal/services"
"github.com/Wikid82/charon/backend/internal/trace"
)
func setupNotificationCoverageDB(t *testing.T) *gorm.DB {
@@ -319,6 +321,159 @@ func TestNotificationProviderHandler_Test_InvalidJSON(t *testing.T) {
assert.Equal(t, 400, w.Code)
}
func TestNotificationProviderHandler_Test_RejectsClientSuppliedGotifyToken(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
payload := map[string]any{
"type": "gotify",
"url": "https://gotify.example/message",
"token": "super-secret-client-token",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Set(string(trace.RequestIDKey), "req-token-reject-1")
c.Request = httptest.NewRequest(http.MethodPost, "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, http.StatusBadRequest, w.Code)
var resp map[string]any
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp))
assert.Equal(t, "TOKEN_WRITE_ONLY", resp["code"])
assert.Equal(t, "validation", resp["category"])
assert.Equal(t, "Gotify token is accepted only on provider create/update", resp["error"])
assert.Equal(t, "req-token-reject-1", resp["request_id"])
assert.NotContains(t, w.Body.String(), "super-secret-client-token")
}
func TestNotificationProviderHandler_Test_RejectsGotifyTokenWithWhitespace(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
payload := map[string]any{
"type": "gotify",
"token": " secret-with-space ",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest(http.MethodPost, "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "TOKEN_WRITE_ONLY")
assert.NotContains(t, w.Body.String(), "secret-with-space")
}
func TestClassifyProviderTestFailure_NilError(t *testing.T) {
code, category, message := classifyProviderTestFailure(nil)
assert.Equal(t, "PROVIDER_TEST_FAILED", code)
assert.Equal(t, "dispatch", category)
assert.Equal(t, "Provider test failed", message)
}
func TestClassifyProviderTestFailure_DefaultStatusCode(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("provider returned status 500"))
assert.Equal(t, "PROVIDER_TEST_REMOTE_REJECTED", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "HTTP 500")
}
func TestClassifyProviderTestFailure_GenericError(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("something completely unexpected"))
assert.Equal(t, "PROVIDER_TEST_FAILED", code)
assert.Equal(t, "dispatch", category)
assert.Equal(t, "Provider test failed", message)
}
func TestClassifyProviderTestFailure_InvalidDiscordWebhookURL(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("invalid discord webhook url"))
assert.Equal(t, "PROVIDER_TEST_URL_INVALID", code)
assert.Equal(t, "validation", category)
assert.Contains(t, message, "Provider URL")
}
func TestClassifyProviderTestFailure_URLValidation(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("destination URL validation failed"))
assert.Equal(t, "PROVIDER_TEST_URL_INVALID", code)
assert.Equal(t, "validation", category)
assert.Contains(t, message, "Provider URL")
}
func TestClassifyProviderTestFailure_AuthRejected(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: provider returned status 401"))
assert.Equal(t, "PROVIDER_TEST_AUTH_REJECTED", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "rejected authentication")
}
func TestClassifyProviderTestFailure_EndpointNotFound(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: provider returned status 404"))
assert.Equal(t, "PROVIDER_TEST_ENDPOINT_NOT_FOUND", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "endpoint was not found")
}
func TestClassifyProviderTestFailure_UnreachableEndpoint(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: outbound request failed"))
assert.Equal(t, "PROVIDER_TEST_UNREACHABLE", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "Could not reach provider endpoint")
}
func TestClassifyProviderTestFailure_DNSLookupFailed(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: outbound request failed: dns lookup failed"))
assert.Equal(t, "PROVIDER_TEST_DNS_FAILED", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "DNS lookup failed")
}
func TestClassifyProviderTestFailure_ConnectionRefused(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: outbound request failed: connection refused"))
assert.Equal(t, "PROVIDER_TEST_CONNECTION_REFUSED", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "refused the connection")
}
func TestClassifyProviderTestFailure_Timeout(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: outbound request failed: request timed out"))
assert.Equal(t, "PROVIDER_TEST_TIMEOUT", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "timed out")
}
func TestClassifyProviderTestFailure_TLSHandshakeFailed(t *testing.T) {
code, category, message := classifyProviderTestFailure(errors.New("failed to send webhook: outbound request failed: tls handshake failed"))
assert.Equal(t, "PROVIDER_TEST_TLS_FAILED", code)
assert.Equal(t, "dispatch", category)
assert.Contains(t, message, "TLS handshake failed")
}
func TestNotificationProviderHandler_Templates(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
@@ -625,3 +780,258 @@ func TestNotificationTemplateHandler_Preview_InvalidTemplate(t *testing.T) {
assert.Equal(t, 400, w.Code)
}
func TestNotificationProviderHandler_Preview_TokenWriteOnly(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
payload := map[string]any{
"template": "minimal",
"token": "secret-token-value",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/providers/preview", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Preview(c)
assert.Equal(t, 400, w.Code)
assert.Contains(t, w.Body.String(), "TOKEN_WRITE_ONLY")
}
func TestNotificationProviderHandler_Update_TypeChangeRejected(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
existing := models.NotificationProvider{
ID: "update-type-test",
Name: "Discord Provider",
Type: "discord",
URL: "https://discord.com/api/webhooks/123/abc",
}
require.NoError(t, db.Create(&existing).Error)
payload := map[string]any{
"name": "Changed Type Provider",
"type": "gotify",
"url": "https://gotify.example.com",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Params = gin.Params{{Key: "id", Value: "update-type-test"}}
c.Request = httptest.NewRequest("PUT", "/providers/update-type-test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Update(c)
assert.Equal(t, 400, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_TYPE_IMMUTABLE")
}
func TestNotificationProviderHandler_Test_MissingProviderID(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
payload := map[string]any{
"type": "discord",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, 400, w.Code)
assert.Contains(t, w.Body.String(), "MISSING_PROVIDER_ID")
}
func TestNotificationProviderHandler_Test_ProviderNotFound(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
payload := map[string]any{
"type": "discord",
"id": "nonexistent-provider",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, 404, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_NOT_FOUND")
}
func TestNotificationProviderHandler_Test_EmptyProviderURL(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
existing := models.NotificationProvider{
ID: "empty-url-test",
Name: "Empty URL Provider",
Type: "discord",
URL: "",
}
require.NoError(t, db.Create(&existing).Error)
payload := map[string]any{
"type": "discord",
"id": "empty-url-test",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, 400, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_CONFIG_MISSING")
}
func TestIsProviderValidationError_Comprehensive(t *testing.T) {
cases := []struct {
name string
err error
expect bool
}{
{"nil", nil, false},
{"invalid_custom_template", errors.New("invalid custom template: missing field"), true},
{"rendered_template", errors.New("rendered template exceeds maximum"), true},
{"failed_to_parse", errors.New("failed to parse template: unexpected end"), true},
{"failed_to_render", errors.New("failed to render template: missing key"), true},
{"invalid_discord_webhook", errors.New("invalid Discord webhook URL"), true},
{"unrelated_error", errors.New("database connection failed"), false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
assert.Equal(t, tc.expect, isProviderValidationError(tc.err))
})
}
}
func TestNotificationProviderHandler_Update_UnsupportedType(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
existing := models.NotificationProvider{
ID: "unsupported-type",
Name: "Custom Provider",
Type: "slack",
URL: "https://hooks.slack.com/test",
}
require.NoError(t, db.Create(&existing).Error)
payload := map[string]any{
"name": "Updated Slack Provider",
"url": "https://hooks.slack.com/updated",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Params = gin.Params{{Key: "id", Value: "unsupported-type"}}
c.Request = httptest.NewRequest("PUT", "/providers/unsupported-type", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Update(c)
assert.Equal(t, 400, w.Code)
assert.Contains(t, w.Body.String(), "UNSUPPORTED_PROVIDER_TYPE")
}
func TestNotificationProviderHandler_Update_GotifyKeepsExistingToken(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
existing := models.NotificationProvider{
ID: "gotify-keep-token",
Name: "Gotify Provider",
Type: "gotify",
URL: "https://gotify.example.com",
Token: "existing-secret-token",
}
require.NoError(t, db.Create(&existing).Error)
payload := map[string]any{
"name": "Updated Gotify",
"url": "https://gotify.example.com/new",
"template": "minimal",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Params = gin.Params{{Key: "id", Value: "gotify-keep-token"}}
c.Request = httptest.NewRequest("PUT", "/providers/gotify-keep-token", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Update(c)
assert.Equal(t, 200, w.Code)
var updated models.NotificationProvider
require.NoError(t, db.Where("id = ?", "gotify-keep-token").First(&updated).Error)
assert.Equal(t, "existing-secret-token", updated.Token)
}
func TestNotificationProviderHandler_Test_ReadDBError(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupNotificationCoverageDB(t)
svc := services.NewNotificationService(db)
h := NewNotificationProviderHandler(svc)
_ = db.Migrator().DropTable(&models.NotificationProvider{})
payload := map[string]any{
"type": "discord",
"id": "some-provider",
}
body, _ := json.Marshal(payload)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
setAdminContext(c)
c.Request = httptest.NewRequest("POST", "/providers/test", bytes.NewBuffer(body))
c.Request.Header.Set("Content-Type", "application/json")
h.Test(c)
assert.Equal(t, 500, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_READ_FAILED")
}
@@ -15,7 +15,7 @@ import (
"gorm.io/gorm"
)
// TestBlocker3_CreateProviderRejectsNonDiscordWithSecurityEvents tests that create rejects non-Discord providers with security events.
// TestBlocker3_CreateProviderValidationWithSecurityEvents verifies supported/unsupported provider handling with security events enabled.
func TestBlocker3_CreateProviderRejectsNonDiscordWithSecurityEvents(t *testing.T) {
gin.SetMode(gin.TestMode)
@@ -31,15 +31,16 @@ func TestBlocker3_CreateProviderRejectsNonDiscordWithSecurityEvents(t *testing.T
service := services.NewNotificationService(db)
handler := NewNotificationProviderHandler(service)
// Test cases: non-Discord provider types with security events enabled
// Test cases: provider types with security events enabled
testCases := []struct {
name string
providerType string
wantStatus int
}{
{"webhook", "webhook"},
{"slack", "slack"},
{"gotify", "gotify"},
{"email", "email"},
{"webhook", "webhook", http.StatusCreated},
{"gotify", "gotify", http.StatusCreated},
{"slack", "slack", http.StatusBadRequest},
{"email", "email", http.StatusBadRequest},
}
for _, tc := range testCases {
@@ -69,14 +70,15 @@ func TestBlocker3_CreateProviderRejectsNonDiscordWithSecurityEvents(t *testing.T
// Call Create
handler.Create(c)
// Blocker 3: Should reject with 400
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject non-Discord provider with security events")
assert.Equal(t, tc.wantStatus, w.Code)
// Verify error message
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Contains(t, response["error"], "discord", "Error should mention Discord")
if tc.wantStatus == http.StatusBadRequest {
assert.Contains(t, response["code"], "UNSUPPORTED_PROVIDER_TYPE")
}
})
}
}
@@ -129,8 +131,7 @@ func TestBlocker3_CreateProviderAcceptsDiscordWithSecurityEvents(t *testing.T) {
assert.Equal(t, http.StatusCreated, w.Code, "Should accept Discord provider with security events")
}
// TestBlocker3_CreateProviderAcceptsNonDiscordWithoutSecurityEvents tests that create NOW REJECTS non-Discord providers even without security events.
// NOTE: This test was updated for Discord-only rollout (current_spec.md) - now globally rejects all non-Discord.
// TestBlocker3_CreateProviderAcceptsNonDiscordWithoutSecurityEvents verifies webhook create without security events remains accepted.
func TestBlocker3_CreateProviderAcceptsNonDiscordWithoutSecurityEvents(t *testing.T) {
gin.SetMode(gin.TestMode)
@@ -172,17 +173,10 @@ func TestBlocker3_CreateProviderAcceptsNonDiscordWithoutSecurityEvents(t *testin
// Call Create
handler.Create(c)
// Discord-only rollout: Now REJECTS with 400
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject non-Discord provider (Discord-only rollout)")
// Verify error message
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Contains(t, response["error"], "discord", "Error should mention Discord")
assert.Equal(t, http.StatusCreated, w.Code)
}
// TestBlocker3_UpdateProviderRejectsNonDiscordWithSecurityEvents tests that update rejects non-Discord providers with security events.
// TestBlocker3_UpdateProviderRejectsNonDiscordWithSecurityEvents verifies webhook update with security events is allowed in PR-1 scope.
func TestBlocker3_UpdateProviderRejectsNonDiscordWithSecurityEvents(t *testing.T) {
gin.SetMode(gin.TestMode)
@@ -235,14 +229,7 @@ func TestBlocker3_UpdateProviderRejectsNonDiscordWithSecurityEvents(t *testing.T
// Call Update
handler.Update(c)
// Blocker 3: Should reject with 400
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject non-Discord provider update with security events")
// Verify error message
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Contains(t, response["error"], "discord", "Error should mention Discord")
assert.Equal(t, http.StatusOK, w.Code)
}
// TestBlocker3_UpdateProviderAcceptsDiscordWithSecurityEvents tests that update accepts Discord providers with security events.
@@ -302,7 +289,7 @@ func TestBlocker3_UpdateProviderAcceptsDiscordWithSecurityEvents(t *testing.T) {
assert.Equal(t, http.StatusOK, w.Code, "Should accept Discord provider update with security events")
}
// TestBlocker3_MultipleSecurityEventsEnforcesDiscordOnly tests that having any security event enabled enforces Discord-only.
// TestBlocker3_MultipleSecurityEventsEnforcesDiscordOnly tests webhook remains accepted with security flags in PR-1 scope.
func TestBlocker3_MultipleSecurityEventsEnforcesDiscordOnly(t *testing.T) {
gin.SetMode(gin.TestMode)
@@ -353,9 +340,8 @@ func TestBlocker3_MultipleSecurityEventsEnforcesDiscordOnly(t *testing.T) {
// Call Create
handler.Create(c)
// Blocker 3: Should reject with 400
assert.Equal(t, http.StatusBadRequest, w.Code,
"Should reject webhook provider with %s enabled", field)
assert.Equal(t, http.StatusCreated, w.Code,
"Should accept webhook provider with %s enabled", field)
})
}
}
@@ -407,5 +393,5 @@ func TestBlocker3_UpdateProvider_DatabaseError(t *testing.T) {
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Equal(t, "provider not found", response["error"])
assert.Equal(t, "Provider not found", response["error"])
}
@@ -16,7 +16,7 @@ import (
"gorm.io/gorm"
)
// TestDiscordOnly_CreateRejectsNonDiscord tests that create globally rejects non-Discord providers.
// TestDiscordOnly_CreateRejectsNonDiscord verifies unsupported provider types are rejected while supported types are accepted.
func TestDiscordOnly_CreateRejectsNonDiscord(t *testing.T) {
gin.SetMode(gin.TestMode)
@@ -30,13 +30,15 @@ func TestDiscordOnly_CreateRejectsNonDiscord(t *testing.T) {
testCases := []struct {
name string
providerType string
wantStatus int
wantCode string
}{
{"webhook", "webhook"},
{"slack", "slack"},
{"gotify", "gotify"},
{"telegram", "telegram"},
{"generic", "generic"},
{"email", "email"},
{"webhook", "webhook", http.StatusCreated, ""},
{"gotify", "gotify", http.StatusCreated, ""},
{"slack", "slack", http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE"},
{"telegram", "telegram", http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE"},
{"generic", "generic", http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE"},
{"email", "email", http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE"},
}
for _, tc := range testCases {
@@ -61,13 +63,14 @@ func TestDiscordOnly_CreateRejectsNonDiscord(t *testing.T) {
handler.Create(c)
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject non-Discord provider")
assert.Equal(t, tc.wantStatus, w.Code)
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "PROVIDER_TYPE_DISCORD_ONLY", response["code"])
assert.Contains(t, response["error"], "discord")
if tc.wantCode != "" {
assert.Equal(t, tc.wantCode, response["code"])
}
})
}
}
@@ -156,8 +159,8 @@ func TestDiscordOnly_UpdateRejectsTypeMutation(t *testing.T) {
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "DEPRECATED_PROVIDER_TYPE_IMMUTABLE", response["code"])
assert.Contains(t, response["error"], "cannot change provider type")
assert.Equal(t, "PROVIDER_TYPE_IMMUTABLE", response["code"])
assert.Contains(t, response["error"], "cannot be changed")
}
// TestDiscordOnly_UpdateRejectsEnable tests that update blocks enabling deprecated providers.
@@ -205,13 +208,7 @@ func TestDiscordOnly_UpdateRejectsEnable(t *testing.T) {
handler.Update(c)
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject enabling deprecated provider")
var response map[string]interface{}
err = json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "DEPRECATED_PROVIDER_CANNOT_ENABLE", response["code"])
assert.Contains(t, response["error"], "cannot enable deprecated")
assert.Equal(t, http.StatusOK, w.Code)
}
// TestDiscordOnly_UpdateAllowsDisabledDeprecated tests that update allows updating disabled deprecated providers (except type/enable).
@@ -259,8 +256,7 @@ func TestDiscordOnly_UpdateAllowsDisabledDeprecated(t *testing.T) {
handler.Update(c)
// Should still reject because type must be discord
assert.Equal(t, http.StatusBadRequest, w.Code, "Should reject non-Discord type even for read-only fields")
assert.Equal(t, http.StatusOK, w.Code)
}
// TestDiscordOnly_UpdateAcceptsDiscord tests that update accepts Discord provider updates.
@@ -360,21 +356,21 @@ func TestDiscordOnly_ErrorCodes(t *testing.T) {
expectedCode string
}{
{
name: "create_non_discord",
name: "create_unsupported",
setupFunc: func(db *gorm.DB) string {
return ""
},
requestFunc: func(id string) (*http.Request, gin.Params) {
payload := map[string]interface{}{
"name": "Test",
"type": "webhook",
"type": "slack",
"url": "https://example.com",
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", "/api/v1/notifications/providers", bytes.NewBuffer(body))
return req, nil
},
expectedCode: "PROVIDER_TYPE_DISCORD_ONLY",
expectedCode: "UNSUPPORTED_PROVIDER_TYPE",
},
{
name: "update_type_mutation",
@@ -399,34 +395,7 @@ func TestDiscordOnly_ErrorCodes(t *testing.T) {
req, _ := http.NewRequest("PUT", "/api/v1/notifications/providers/"+id, bytes.NewBuffer(body))
return req, []gin.Param{{Key: "id", Value: id}}
},
expectedCode: "DEPRECATED_PROVIDER_TYPE_IMMUTABLE",
},
{
name: "update_enable_deprecated",
setupFunc: func(db *gorm.DB) string {
provider := models.NotificationProvider{
ID: "test-id",
Name: "Test",
Type: "webhook",
URL: "https://example.com",
Enabled: false,
MigrationState: "deprecated",
}
db.Create(&provider)
return "test-id"
},
requestFunc: func(id string) (*http.Request, gin.Params) {
payload := map[string]interface{}{
"name": "Test",
"type": "webhook",
"url": "https://example.com",
"enabled": true,
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("PUT", "/api/v1/notifications/providers/"+id, bytes.NewBuffer(body))
return req, []gin.Param{{Key: "id", Value: id}}
},
expectedCode: "DEPRECATED_PROVIDER_CANNOT_ENABLE",
expectedCode: "PROVIDER_TYPE_IMMUTABLE",
},
}
@@ -4,11 +4,13 @@ import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"strings"
"time"
"github.com/Wikid82/charon/backend/internal/models"
"github.com/Wikid82/charon/backend/internal/services"
"github.com/Wikid82/charon/backend/internal/trace"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
)
@@ -25,6 +27,7 @@ type notificationProviderUpsertRequest struct {
URL string `json:"url"`
Config string `json:"config"`
Template string `json:"template"`
Token string `json:"token,omitempty"`
Enabled bool `json:"enabled"`
NotifyProxyHosts bool `json:"notify_proxy_hosts"`
NotifyRemoteServers bool `json:"notify_remote_servers"`
@@ -37,6 +40,16 @@ type notificationProviderUpsertRequest struct {
NotifySecurityCrowdSecDecisions bool `json:"notify_security_crowdsec_decisions"`
}
type notificationProviderTestRequest struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
URL string `json:"url"`
Config string `json:"config"`
Template string `json:"template"`
Token string `json:"token,omitempty"`
}
func (r notificationProviderUpsertRequest) toModel() models.NotificationProvider {
return models.NotificationProvider{
Name: r.Name,
@@ -44,6 +57,7 @@ func (r notificationProviderUpsertRequest) toModel() models.NotificationProvider
URL: r.URL,
Config: r.Config,
Template: r.Template,
Token: strings.TrimSpace(r.Token),
Enabled: r.Enabled,
NotifyProxyHosts: r.NotifyProxyHosts,
NotifyRemoteServers: r.NotifyRemoteServers,
@@ -57,6 +71,70 @@ func (r notificationProviderUpsertRequest) toModel() models.NotificationProvider
}
}
func providerRequestID(c *gin.Context) string {
if value, ok := c.Get(string(trace.RequestIDKey)); ok {
if requestID, ok := value.(string); ok {
return requestID
}
}
return ""
}
func respondSanitizedProviderError(c *gin.Context, status int, code, category, message string) {
response := gin.H{
"error": message,
"code": code,
"category": category,
}
if requestID := providerRequestID(c); requestID != "" {
response["request_id"] = requestID
}
c.JSON(status, response)
}
var providerStatusCodePattern = regexp.MustCompile(`provider returned status\s+(\d{3})`)
func classifyProviderTestFailure(err error) (code string, category string, message string) {
if err == nil {
return "PROVIDER_TEST_FAILED", "dispatch", "Provider test failed"
}
errText := strings.ToLower(strings.TrimSpace(err.Error()))
if strings.Contains(errText, "destination url validation failed") ||
strings.Contains(errText, "invalid webhook url") ||
strings.Contains(errText, "invalid discord webhook url") {
return "PROVIDER_TEST_URL_INVALID", "validation", "Provider URL is invalid or blocked. Verify the URL and try again"
}
if statusMatch := providerStatusCodePattern.FindStringSubmatch(errText); len(statusMatch) == 2 {
switch statusMatch[1] {
case "401", "403":
return "PROVIDER_TEST_AUTH_REJECTED", "dispatch", "Provider rejected authentication. Verify your Gotify token"
case "404":
return "PROVIDER_TEST_ENDPOINT_NOT_FOUND", "dispatch", "Provider endpoint was not found. Verify the provider URL path"
default:
return "PROVIDER_TEST_REMOTE_REJECTED", "dispatch", fmt.Sprintf("Provider rejected the test request (HTTP %s)", statusMatch[1])
}
}
if strings.Contains(errText, "outbound request failed") || strings.Contains(errText, "failed to send webhook") {
switch {
case strings.Contains(errText, "dns lookup failed"):
return "PROVIDER_TEST_DNS_FAILED", "dispatch", "DNS lookup failed for provider host. Verify the hostname in the provider URL"
case strings.Contains(errText, "connection refused"):
return "PROVIDER_TEST_CONNECTION_REFUSED", "dispatch", "Provider host refused the connection. Verify port and service availability"
case strings.Contains(errText, "request timed out"):
return "PROVIDER_TEST_TIMEOUT", "dispatch", "Provider request timed out. Verify network route and provider responsiveness"
case strings.Contains(errText, "tls handshake failed"):
return "PROVIDER_TEST_TLS_FAILED", "dispatch", "TLS handshake failed. Verify HTTPS certificate and URL scheme"
}
return "PROVIDER_TEST_UNREACHABLE", "dispatch", "Could not reach provider endpoint. Verify URL, DNS, and network connectivity"
}
return "PROVIDER_TEST_FAILED", "dispatch", "Provider test failed"
}
func NewNotificationProviderHandler(service *services.NotificationService) *NotificationProviderHandler {
return NewNotificationProviderHandlerWithDeps(service, nil, "")
}
@@ -71,6 +149,10 @@ func (h *NotificationProviderHandler) List(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list providers"})
return
}
for i := range providers {
providers[i].HasToken = providers[i].Token != ""
providers[i].Token = ""
}
c.JSON(http.StatusOK, providers)
}
@@ -81,16 +163,13 @@ func (h *NotificationProviderHandler) Create(c *gin.Context) {
var req notificationProviderUpsertRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
respondSanitizedProviderError(c, http.StatusBadRequest, "INVALID_REQUEST", "validation", "Invalid notification provider payload")
return
}
// Discord-only enforcement for this rollout
if req.Type != "discord" {
c.JSON(http.StatusBadRequest, gin.H{
"error": "only discord provider type is supported in this release; additional providers will be enabled in future releases after validation",
"code": "PROVIDER_TYPE_DISCORD_ONLY",
})
providerType := strings.ToLower(strings.TrimSpace(req.Type))
if providerType != "discord" && providerType != "gotify" && providerType != "webhook" {
respondSanitizedProviderError(c, http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE", "validation", "Unsupported notification provider type")
return
}
@@ -106,15 +185,17 @@ func (h *NotificationProviderHandler) Create(c *gin.Context) {
if err := h.service.CreateProvider(&provider); err != nil {
// If it's a validation error from template parsing, return 400
if isProviderValidationError(err) {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
respondSanitizedProviderError(c, http.StatusBadRequest, "PROVIDER_VALIDATION_FAILED", "validation", "Notification provider validation failed")
return
}
if respondPermissionError(c, h.securityService, "notification_provider_save_failed", err, h.dataRoot) {
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create provider"})
respondSanitizedProviderError(c, http.StatusInternalServerError, "PROVIDER_CREATE_FAILED", "internal", "Failed to create provider")
return
}
provider.HasToken = provider.Token != ""
provider.Token = ""
c.JSON(http.StatusCreated, provider)
}
@@ -126,7 +207,7 @@ func (h *NotificationProviderHandler) Update(c *gin.Context) {
id := c.Param("id")
var req notificationProviderUpsertRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
respondSanitizedProviderError(c, http.StatusBadRequest, "INVALID_REQUEST", "validation", "Invalid notification provider payload")
return
}
@@ -134,39 +215,29 @@ func (h *NotificationProviderHandler) Update(c *gin.Context) {
var existing models.NotificationProvider
if err := h.service.DB.Where("id = ?", id).First(&existing).Error; err != nil {
if err == gorm.ErrRecordNotFound {
c.JSON(http.StatusNotFound, gin.H{"error": "provider not found"})
respondSanitizedProviderError(c, http.StatusNotFound, "PROVIDER_NOT_FOUND", "validation", "Provider not found")
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to fetch provider"})
respondSanitizedProviderError(c, http.StatusInternalServerError, "PROVIDER_READ_FAILED", "internal", "Failed to read provider")
return
}
// Block type mutation for existing non-Discord providers
if existing.Type != "discord" && req.Type != existing.Type {
c.JSON(http.StatusBadRequest, gin.H{
"error": "cannot change provider type for deprecated non-discord providers; delete and recreate as discord provider instead",
"code": "DEPRECATED_PROVIDER_TYPE_IMMUTABLE",
})
if strings.TrimSpace(req.Type) != "" && strings.TrimSpace(req.Type) != existing.Type {
respondSanitizedProviderError(c, http.StatusBadRequest, "PROVIDER_TYPE_IMMUTABLE", "validation", "Provider type cannot be changed")
return
}
// Block enable mutation for existing non-Discord providers
if existing.Type != "discord" && req.Enabled && !existing.Enabled {
c.JSON(http.StatusBadRequest, gin.H{
"error": "cannot enable deprecated non-discord providers; only discord providers can be enabled",
"code": "DEPRECATED_PROVIDER_CANNOT_ENABLE",
})
providerType := strings.ToLower(strings.TrimSpace(existing.Type))
if providerType != "discord" && providerType != "gotify" && providerType != "webhook" {
respondSanitizedProviderError(c, http.StatusBadRequest, "UNSUPPORTED_PROVIDER_TYPE", "validation", "Unsupported notification provider type")
return
}
// Discord-only enforcement for this rollout (new providers or type changes)
if req.Type != "discord" {
c.JSON(http.StatusBadRequest, gin.H{
"error": "only discord provider type is supported in this release; additional providers will be enabled in future releases after validation",
"code": "PROVIDER_TYPE_DISCORD_ONLY",
})
return
if providerType == "gotify" && strings.TrimSpace(req.Token) == "" {
// Keep existing token if update payload omits token
req.Token = existing.Token
}
req.Type = existing.Type
provider := req.toModel()
provider.ID = id
@@ -179,15 +250,17 @@ func (h *NotificationProviderHandler) Update(c *gin.Context) {
if err := h.service.UpdateProvider(&provider); err != nil {
if isProviderValidationError(err) {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
respondSanitizedProviderError(c, http.StatusBadRequest, "PROVIDER_VALIDATION_FAILED", "validation", "Notification provider validation failed")
return
}
if respondPermissionError(c, h.securityService, "notification_provider_save_failed", err, h.dataRoot) {
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update provider"})
respondSanitizedProviderError(c, http.StatusInternalServerError, "PROVIDER_UPDATE_FAILED", "internal", "Failed to update provider")
return
}
provider.HasToken = provider.Token != ""
provider.Token = ""
c.JSON(http.StatusOK, provider)
}
@@ -221,16 +294,44 @@ func (h *NotificationProviderHandler) Delete(c *gin.Context) {
}
func (h *NotificationProviderHandler) Test(c *gin.Context) {
var req notificationProviderTestRequest
if err := c.ShouldBindJSON(&req); err != nil {
respondSanitizedProviderError(c, http.StatusBadRequest, "INVALID_REQUEST", "validation", "Invalid test payload")
return
}
providerType := strings.ToLower(strings.TrimSpace(req.Type))
if providerType == "gotify" && strings.TrimSpace(req.Token) != "" {
respondSanitizedProviderError(c, http.StatusBadRequest, "TOKEN_WRITE_ONLY", "validation", "Gotify token is accepted only on provider create/update")
return
}
providerID := strings.TrimSpace(req.ID)
if providerID == "" {
respondSanitizedProviderError(c, http.StatusBadRequest, "MISSING_PROVIDER_ID", "validation", "Trusted provider ID is required for test dispatch")
return
}
var provider models.NotificationProvider
if err := c.ShouldBindJSON(&provider); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
if err := h.service.DB.Where("id = ?", providerID).First(&provider).Error; err != nil {
if err == gorm.ErrRecordNotFound {
respondSanitizedProviderError(c, http.StatusNotFound, "PROVIDER_NOT_FOUND", "validation", "Provider not found")
return
}
respondSanitizedProviderError(c, http.StatusInternalServerError, "PROVIDER_READ_FAILED", "internal", "Failed to read provider")
return
}
if strings.TrimSpace(provider.URL) == "" {
respondSanitizedProviderError(c, http.StatusBadRequest, "PROVIDER_CONFIG_MISSING", "validation", "Trusted provider configuration is incomplete")
return
}
if err := h.service.TestProvider(provider); err != nil {
// Create internal notification for the failure
_, _ = h.service.Create(models.NotificationTypeError, "Test Failed", fmt.Sprintf("Provider %s test failed: %v", provider.Name, err))
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
_, _ = h.service.Create(models.NotificationTypeError, "Test Failed", fmt.Sprintf("Provider %s test failed", provider.Name))
code, category, message := classifyProviderTestFailure(err)
respondSanitizedProviderError(c, http.StatusBadRequest, code, category, message)
return
}
c.JSON(http.StatusOK, gin.H{"message": "Test notification sent"})
@@ -249,9 +350,15 @@ func (h *NotificationProviderHandler) Templates(c *gin.Context) {
func (h *NotificationProviderHandler) Preview(c *gin.Context) {
var raw map[string]any
if err := c.ShouldBindJSON(&raw); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
respondSanitizedProviderError(c, http.StatusBadRequest, "INVALID_REQUEST", "validation", "Invalid preview payload")
return
}
if tokenValue, ok := raw["token"]; ok {
if tokenText, isString := tokenValue.(string); isString && strings.TrimSpace(tokenText) != "" {
respondSanitizedProviderError(c, http.StatusBadRequest, "TOKEN_WRITE_ONLY", "validation", "Gotify token is accepted only on provider create/update")
return
}
}
var provider models.NotificationProvider
// Marshal raw into provider to get proper types
@@ -279,7 +386,8 @@ func (h *NotificationProviderHandler) Preview(c *gin.Context) {
rendered, parsed, err := h.service.RenderTemplate(provider, payload)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error(), "rendered": rendered})
_ = rendered
respondSanitizedProviderError(c, http.StatusBadRequest, "TEMPLATE_PREVIEW_FAILED", "validation", "Template preview failed")
return
}
c.JSON(http.StatusOK, gin.H{"rendered": rendered, "parsed": parsed})
@@ -120,25 +120,60 @@ func TestNotificationProviderHandler_Templates(t *testing.T) {
}
func TestNotificationProviderHandler_Test(t *testing.T) {
r, _ := setupNotificationProviderTest(t)
r, db := setupNotificationProviderTest(t)
// Test with invalid provider (should fail validation or service check)
// Since we don't have notification dispatch mocked easily here,
// we expect it might fail or pass depending on service implementation.
// Looking at service code, TestProvider should validate and dispatch.
// If URL is invalid, it should error.
provider := models.NotificationProvider{
Type: "discord",
URL: "invalid-url",
stored := models.NotificationProvider{
ID: "trusted-provider-id",
Name: "Stored Provider",
Type: "discord",
URL: "invalid-url",
Enabled: true,
}
body, _ := json.Marshal(provider)
require.NoError(t, db.Create(&stored).Error)
payload := map[string]any{
"id": stored.ID,
"type": "discord",
"url": "https://discord.com/api/webhooks/123/override",
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", "/api/v1/notifications/providers/test", bytes.NewBuffer(body))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
// It should probably fail with 400
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_TEST_URL_INVALID")
}
func TestNotificationProviderHandler_Test_RequiresTrustedProviderID(t *testing.T) {
r, _ := setupNotificationProviderTest(t)
payload := map[string]any{
"type": "discord",
"url": "https://discord.com/api/webhooks/123/abc",
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", "/api/v1/notifications/providers/test", bytes.NewBuffer(body))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "MISSING_PROVIDER_ID")
}
func TestNotificationProviderHandler_Test_ReturnsNotFoundForUnknownProvider(t *testing.T) {
r, _ := setupNotificationProviderTest(t)
payload := map[string]any{
"id": "missing-provider-id",
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", "/api/v1/notifications/providers/test", bytes.NewBuffer(body))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusNotFound, w.Code)
assert.Contains(t, w.Body.String(), "PROVIDER_NOT_FOUND")
}
func TestNotificationProviderHandler_Errors(t *testing.T) {
@@ -248,8 +283,8 @@ func TestNotificationProviderHandler_CreateRejectsDiscordIPHost(t *testing.T) {
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.Contains(t, w.Body.String(), "invalid Discord webhook URL")
assert.Contains(t, w.Body.String(), "IP address hosts are not allowed")
assert.Contains(t, w.Body.String(), "PROVIDER_VALIDATION_FAILED")
assert.Contains(t, w.Body.String(), "validation")
}
func TestNotificationProviderHandler_CreateAcceptsDiscordHostname(t *testing.T) {
@@ -378,3 +413,100 @@ func TestNotificationProviderHandler_UpdatePreservesServerManagedMigrationFields
require.NotNil(t, dbProvider.LastMigratedAt)
assert.Equal(t, now, dbProvider.LastMigratedAt.UTC().Round(time.Second))
}
func TestNotificationProviderHandler_List_ReturnsHasTokenTrue(t *testing.T) {
r, db := setupNotificationProviderTest(t)
p := models.NotificationProvider{
ID: "tok-true",
Name: "Gotify With Token",
Type: "gotify",
URL: "https://gotify.example.com",
Token: "secret-app-token",
}
require.NoError(t, db.Create(&p).Error)
req, _ := http.NewRequest("GET", "/api/v1/notifications/providers", http.NoBody)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var raw []map[string]interface{}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &raw))
require.Len(t, raw, 1)
assert.Equal(t, true, raw[0]["has_token"])
}
func TestNotificationProviderHandler_List_ReturnsHasTokenFalse(t *testing.T) {
r, db := setupNotificationProviderTest(t)
p := models.NotificationProvider{
ID: "tok-false",
Name: "Discord No Token",
Type: "discord",
URL: "https://discord.com/api/webhooks/123/abc",
}
require.NoError(t, db.Create(&p).Error)
req, _ := http.NewRequest("GET", "/api/v1/notifications/providers", http.NoBody)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var raw []map[string]interface{}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &raw))
require.Len(t, raw, 1)
assert.Equal(t, false, raw[0]["has_token"])
}
func TestNotificationProviderHandler_List_NeverExposesRawToken(t *testing.T) {
r, db := setupNotificationProviderTest(t)
p := models.NotificationProvider{
ID: "tok-hidden",
Name: "Secret Gotify",
Type: "gotify",
URL: "https://gotify.example.com",
Token: "super-secret-value",
}
require.NoError(t, db.Create(&p).Error)
req, _ := http.NewRequest("GET", "/api/v1/notifications/providers", http.NoBody)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
assert.NotContains(t, w.Body.String(), "super-secret-value")
var raw []map[string]interface{}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &raw))
require.Len(t, raw, 1)
_, hasTokenField := raw[0]["token"]
assert.False(t, hasTokenField, "raw token field must not appear in JSON response")
}
func TestNotificationProviderHandler_Create_ResponseHasHasToken(t *testing.T) {
r, _ := setupNotificationProviderTest(t)
payload := map[string]interface{}{
"name": "New Gotify",
"type": "gotify",
"url": "https://gotify.example.com",
"token": "app-token-123",
"template": "minimal",
}
body, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", "/api/v1/notifications/providers", bytes.NewBuffer(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusCreated, w.Code)
var raw map[string]interface{}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &raw))
assert.Equal(t, true, raw["has_token"])
assert.NotContains(t, w.Body.String(), "app-token-123")
}
@@ -65,7 +65,7 @@ func TestUpdate_BlockTypeMutationForNonDiscord(t *testing.T) {
err = json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "DEPRECATED_PROVIDER_TYPE_IMMUTABLE", response["code"])
assert.Equal(t, "PROVIDER_TYPE_IMMUTABLE", response["code"])
}
// TestUpdate_AllowTypeMutationForDiscord verifies Discord can be updated
@@ -24,6 +24,17 @@ func requireAdmin(c *gin.Context) bool {
return false
}
func requireAuthenticatedAdmin(c *gin.Context) bool {
if _, exists := c.Get("userID"); !exists {
c.JSON(http.StatusUnauthorized, gin.H{
"error": "Authorization header required",
})
return false
}
return requireAdmin(c)
}
func isAdmin(c *gin.Context) bool {
role, _ := c.Get("role")
roleStr, _ := role.(string)
@@ -168,3 +168,34 @@ func TestLogPermissionAudit_ActorFallback(t *testing.T) {
assert.Equal(t, "permissions", audit.EventCategory)
assert.Contains(t, audit.Details, fmt.Sprintf("\"admin\":%v", false))
}
func TestRequireAuthenticatedAdmin_NoUserID(t *testing.T) {
t.Parallel()
ctx, rec := newTestContextWithRequest()
result := requireAuthenticatedAdmin(ctx)
assert.False(t, result)
assert.Equal(t, http.StatusUnauthorized, rec.Code)
assert.Contains(t, rec.Body.String(), "Authorization header required")
}
func TestRequireAuthenticatedAdmin_UserIDPresentAndAdmin(t *testing.T) {
t.Parallel()
ctx, _ := newTestContextWithRequest()
ctx.Set("userID", uint(1))
ctx.Set("role", "admin")
result := requireAuthenticatedAdmin(ctx)
assert.True(t, result)
}
func TestRequireAuthenticatedAdmin_UserIDPresentButNotAdmin(t *testing.T) {
t.Parallel()
ctx, rec := newTestContextWithRequest()
ctx.Set("userID", uint(1))
ctx.Set("role", "user")
result := requireAuthenticatedAdmin(ctx)
assert.False(t, result)
assert.Equal(t, http.StatusForbidden, rec.Code)
}
@@ -59,6 +59,10 @@ func TestSecurityHandler_ReloadGeoIP_NotInitialized(t *testing.T) {
h := NewSecurityHandler(config.SecurityConfig{}, nil, nil)
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
r.POST("/security/geoip/reload", h.ReloadGeoIP)
w := httptest.NewRecorder()
@@ -75,6 +79,10 @@ func TestSecurityHandler_ReloadGeoIP_LoadError(t *testing.T) {
h.SetGeoIPService(&services.GeoIPService{}) // dbPath empty => Load() will error
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
r.POST("/security/geoip/reload", h.ReloadGeoIP)
w := httptest.NewRecorder()
@@ -90,6 +98,10 @@ func TestSecurityHandler_LookupGeoIP_MissingIPAddress(t *testing.T) {
h := NewSecurityHandler(config.SecurityConfig{}, nil, nil)
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
r.POST("/security/geoip/lookup", h.LookupGeoIP)
payload := []byte(`{}`)
@@ -109,6 +121,10 @@ func TestSecurityHandler_LookupGeoIP_ServiceUnavailable(t *testing.T) {
h.SetGeoIPService(&services.GeoIPService{}) // present but not loaded
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
r.POST("/security/geoip/lookup", h.LookupGeoIP)
payload, _ := json.Marshal(map[string]string{"ip_address": "8.8.8.8"})
@@ -261,6 +261,10 @@ func (h *SecurityHandler) GetConfig(c *gin.Context) {
// UpdateConfig creates or updates the SecurityConfig in DB
func (h *SecurityHandler) UpdateConfig(c *gin.Context) {
if !requireAdmin(c) {
return
}
var payload models.SecurityConfig
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid payload"})
@@ -290,6 +294,10 @@ func (h *SecurityHandler) UpdateConfig(c *gin.Context) {
// GenerateBreakGlass generates a break-glass token and returns the plaintext token once
func (h *SecurityHandler) GenerateBreakGlass(c *gin.Context) {
if !requireAdmin(c) {
return
}
token, err := h.svc.GenerateBreakGlassToken("default")
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate break-glass token"})
@@ -316,6 +324,10 @@ func (h *SecurityHandler) ListDecisions(c *gin.Context) {
// CreateDecision creates a manual decision (override) - for now no checks besides payload
func (h *SecurityHandler) CreateDecision(c *gin.Context) {
if !requireAdmin(c) {
return
}
var payload models.SecurityDecision
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid payload"})
@@ -371,6 +383,10 @@ func (h *SecurityHandler) ListRuleSets(c *gin.Context) {
// UpsertRuleSet uploads or updates a ruleset
func (h *SecurityHandler) UpsertRuleSet(c *gin.Context) {
if !requireAdmin(c) {
return
}
var payload models.SecurityRuleSet
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid payload"})
@@ -401,6 +417,10 @@ func (h *SecurityHandler) UpsertRuleSet(c *gin.Context) {
// DeleteRuleSet removes a ruleset by id
func (h *SecurityHandler) DeleteRuleSet(c *gin.Context) {
if !requireAdmin(c) {
return
}
idParam := c.Param("id")
if idParam == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "id is required"})
@@ -610,6 +630,10 @@ func (h *SecurityHandler) GetGeoIPStatus(c *gin.Context) {
// ReloadGeoIP reloads the GeoIP database from disk.
func (h *SecurityHandler) ReloadGeoIP(c *gin.Context) {
if !requireAdmin(c) {
return
}
if h.geoipSvc == nil {
c.JSON(http.StatusServiceUnavailable, gin.H{
"error": "GeoIP service not initialized",
@@ -641,6 +665,10 @@ func (h *SecurityHandler) ReloadGeoIP(c *gin.Context) {
// LookupGeoIP performs a GeoIP lookup for a given IP address.
func (h *SecurityHandler) LookupGeoIP(c *gin.Context) {
if !requireAdmin(c) {
return
}
var req struct {
IPAddress string `json:"ip_address" binding:"required"`
}
@@ -707,6 +735,10 @@ func (h *SecurityHandler) GetWAFExclusions(c *gin.Context) {
// AddWAFExclusion adds a rule exclusion to the WAF configuration
func (h *SecurityHandler) AddWAFExclusion(c *gin.Context) {
if !requireAdmin(c) {
return
}
var req WAFExclusionRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "rule_id is required"})
@@ -786,6 +818,10 @@ func (h *SecurityHandler) AddWAFExclusion(c *gin.Context) {
// DeleteWAFExclusion removes a rule exclusion by rule_id
func (h *SecurityHandler) DeleteWAFExclusion(c *gin.Context) {
if !requireAdmin(c) {
return
}
ruleIDParam := c.Param("rule_id")
if ruleIDParam == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "rule_id is required"})
@@ -100,6 +100,10 @@ func TestSecurityHandler_CreateDecision_SQLInjection(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/api/v1/security/decisions", h.CreateDecision)
// Attempt SQL injection via payload fields
@@ -143,6 +147,10 @@ func TestSecurityHandler_UpsertRuleSet_MassivePayload(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/api/v1/security/rulesets", h.UpsertRuleSet)
// Try to submit a 3MB payload (should be rejected by service)
@@ -175,6 +183,10 @@ func TestSecurityHandler_UpsertRuleSet_EmptyName(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/api/v1/security/rulesets", h.UpsertRuleSet)
payload := map[string]any{
@@ -203,6 +215,10 @@ func TestSecurityHandler_CreateDecision_EmptyFields(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/api/v1/security/decisions", h.CreateDecision)
testCases := []struct {
@@ -347,6 +363,10 @@ func TestSecurityAudit_DeleteRuleSet_InvalidID(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/api/v1/security/rulesets/:id", h.DeleteRuleSet)
testCases := []struct {
@@ -388,6 +408,10 @@ func TestSecurityHandler_UpsertRuleSet_XSSInContent(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/api/v1/security/rulesets", h.UpsertRuleSet)
router.GET("/api/v1/security/rulesets", h.ListRuleSets)
@@ -433,6 +457,10 @@ func TestSecurityHandler_UpdateConfig_RateLimitBounds(t *testing.T) {
h := NewSecurityHandler(cfg, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.PUT("/api/v1/security/config", h.UpdateConfig)
testCases := []struct {
@@ -0,0 +1,58 @@
package handlers
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/Wikid82/charon/backend/internal/config"
"github.com/Wikid82/charon/backend/internal/models"
)
func TestSecurityHandler_MutatorsRequireAdmin(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupTestDB(t)
require.NoError(t, db.AutoMigrate(&models.SecurityConfig{}, &models.SecurityRuleSet{}, &models.SecurityDecision{}, &models.SecurityAudit{}))
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("userID", uint(123))
c.Set("role", "user")
c.Next()
})
router.POST("/security/config", handler.UpdateConfig)
router.POST("/security/breakglass/generate", handler.GenerateBreakGlass)
router.POST("/security/decisions", handler.CreateDecision)
router.POST("/security/rulesets", handler.UpsertRuleSet)
router.DELETE("/security/rulesets/:id", handler.DeleteRuleSet)
testCases := []struct {
name string
method string
url string
body string
}{
{name: "update-config", method: http.MethodPost, url: "/security/config", body: `{"name":"default"}`},
{name: "generate-breakglass", method: http.MethodPost, url: "/security/breakglass/generate", body: `{}`},
{name: "create-decision", method: http.MethodPost, url: "/security/decisions", body: `{"ip":"1.2.3.4","action":"block"}`},
{name: "upsert-ruleset", method: http.MethodPost, url: "/security/rulesets", body: `{"name":"owasp-crs","mode":"block","content":"x"}`},
{name: "delete-ruleset", method: http.MethodDelete, url: "/security/rulesets/1", body: ""},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(tc.method, tc.url, bytes.NewBufferString(tc.body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusForbidden, w.Code)
})
}
}
@@ -120,6 +120,10 @@ func TestSecurityHandler_GenerateBreakGlass_ReturnsToken(t *testing.T) {
db := setupTestDB(t)
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/breakglass/generate", handler.GenerateBreakGlass)
w := httptest.NewRecorder()
@@ -251,6 +255,10 @@ func TestSecurityHandler_Enable_Disable_WithAdminWhitelistAndToken(t *testing.T)
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
api := router.Group("/api/v1")
api.POST("/security/enable", handler.Enable)
api.POST("/security/disable", handler.Disable)
@@ -27,6 +27,10 @@ func TestSecurityHandler_UpdateConfig_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/config", handler.UpdateConfig)
payload := map[string]any{
@@ -55,6 +59,10 @@ func TestSecurityHandler_UpdateConfig_DefaultName(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/config", handler.UpdateConfig)
// Payload without name - should default to "default"
@@ -78,6 +86,10 @@ func TestSecurityHandler_UpdateConfig_InvalidPayload(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/config", handler.UpdateConfig)
w := httptest.NewRecorder()
@@ -193,6 +205,10 @@ func TestSecurityHandler_CreateDecision_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/decisions", handler.CreateDecision)
payload := map[string]any{
@@ -218,6 +234,10 @@ func TestSecurityHandler_CreateDecision_MissingIP(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/decisions", handler.CreateDecision)
payload := map[string]any{
@@ -240,6 +260,10 @@ func TestSecurityHandler_CreateDecision_MissingAction(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/decisions", handler.CreateDecision)
payload := map[string]any{
@@ -262,6 +286,10 @@ func TestSecurityHandler_CreateDecision_InvalidPayload(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/decisions", handler.CreateDecision)
w := httptest.NewRecorder()
@@ -306,6 +334,10 @@ func TestSecurityHandler_UpsertRuleSet_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/rulesets", handler.UpsertRuleSet)
payload := map[string]any{
@@ -330,6 +362,10 @@ func TestSecurityHandler_UpsertRuleSet_MissingName(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/rulesets", handler.UpsertRuleSet)
payload := map[string]any{
@@ -353,6 +389,10 @@ func TestSecurityHandler_UpsertRuleSet_InvalidPayload(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/rulesets", handler.UpsertRuleSet)
w := httptest.NewRecorder()
@@ -375,6 +415,10 @@ func TestSecurityHandler_DeleteRuleSet_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/rulesets/:id", handler.DeleteRuleSet)
w := httptest.NewRecorder()
@@ -395,6 +439,10 @@ func TestSecurityHandler_DeleteRuleSet_NotFound(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/rulesets/:id", handler.DeleteRuleSet)
w := httptest.NewRecorder()
@@ -411,6 +459,10 @@ func TestSecurityHandler_DeleteRuleSet_InvalidID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/rulesets/:id", handler.DeleteRuleSet)
w := httptest.NewRecorder()
@@ -427,6 +479,10 @@ func TestSecurityHandler_DeleteRuleSet_EmptyID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
// Note: This route pattern won't match empty ID, but testing the handler directly
router.DELETE("/security/rulesets/:id", handler.DeleteRuleSet)
@@ -509,6 +565,10 @@ func TestSecurityHandler_Enable_WithValidBreakGlassToken(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/breakglass/generate", handler.GenerateBreakGlass)
router.POST("/security/enable", handler.Enable)
@@ -600,6 +660,10 @@ func TestSecurityHandler_Disable_FromRemoteWithToken(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/breakglass/generate", handler.GenerateBreakGlass)
router.POST("/security/disable", func(c *gin.Context) {
c.Request.RemoteAddr = "192.168.1.100:12345" // Remote IP
@@ -689,6 +753,10 @@ func TestSecurityHandler_GenerateBreakGlass_NoConfig(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/breakglass/generate", handler.GenerateBreakGlass)
w := httptest.NewRecorder()
@@ -30,6 +30,10 @@ func setupSecurityTestRouterWithExtras(t *testing.T) (*gin.Engine, *gorm.DB) {
require.NoError(t, db.AutoMigrate(&models.ProxyHost{}, &models.Location{}, &models.Setting{}, &models.CaddyConfig{}, &models.SSLCertificate{}, &models.AccessList{}, &models.SecurityConfig{}, &models.SecurityDecision{}, &models.SecurityAudit{}, &models.SecurityRuleSet{}))
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
api := r.Group("/api/v1")
cfg := config.SecurityConfig{}
h := NewSecurityHandler(cfg, db, nil)
@@ -148,6 +152,10 @@ func TestSecurityHandler_UpsertDeleteTriggersApplyConfig(t *testing.T) {
m := caddy.NewManager(client, db, tmp, "", false, config.SecurityConfig{CerberusEnabled: true, WAFMode: "block"})
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
api := r.Group("/api/v1")
cfg := config.SecurityConfig{}
h := NewSecurityHandler(cfg, db, m)
@@ -110,6 +110,10 @@ func TestSecurityHandler_AddWAFExclusion_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
payload := map[string]any{
@@ -140,6 +144,10 @@ func TestSecurityHandler_AddWAFExclusion_WithTarget(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
payload := map[string]any{
@@ -175,6 +183,10 @@ func TestSecurityHandler_AddWAFExclusion_ToExistingConfig(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
router.GET("/security/waf/exclusions", handler.GetWAFExclusions)
@@ -215,6 +227,10 @@ func TestSecurityHandler_AddWAFExclusion_Duplicate(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
// Try to add duplicate
@@ -244,6 +260,10 @@ func TestSecurityHandler_AddWAFExclusion_DuplicateWithDifferentTarget(t *testing
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
// Add same rule_id with different target - should succeed
@@ -268,6 +288,10 @@ func TestSecurityHandler_AddWAFExclusion_MissingRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
payload := map[string]any{
@@ -290,6 +314,10 @@ func TestSecurityHandler_AddWAFExclusion_InvalidRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
// Zero rule_id
@@ -313,6 +341,10 @@ func TestSecurityHandler_AddWAFExclusion_NegativeRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
payload := map[string]any{
@@ -335,6 +367,10 @@ func TestSecurityHandler_AddWAFExclusion_InvalidPayload(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
w := httptest.NewRecorder()
@@ -358,6 +394,10 @@ func TestSecurityHandler_DeleteWAFExclusion_Success(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
router.GET("/security/waf/exclusions", handler.GetWAFExclusions)
@@ -394,6 +434,10 @@ func TestSecurityHandler_DeleteWAFExclusion_WithTarget(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
router.GET("/security/waf/exclusions", handler.GetWAFExclusions)
@@ -430,6 +474,10 @@ func TestSecurityHandler_DeleteWAFExclusion_NotFound(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
w := httptest.NewRecorder()
@@ -446,6 +494,10 @@ func TestSecurityHandler_DeleteWAFExclusion_NoConfig(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
w := httptest.NewRecorder()
@@ -462,6 +514,10 @@ func TestSecurityHandler_DeleteWAFExclusion_InvalidRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
w := httptest.NewRecorder()
@@ -478,6 +534,10 @@ func TestSecurityHandler_DeleteWAFExclusion_ZeroRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
w := httptest.NewRecorder()
@@ -494,6 +554,10 @@ func TestSecurityHandler_DeleteWAFExclusion_NegativeRuleID(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
w := httptest.NewRecorder()
@@ -533,6 +597,10 @@ func TestSecurityHandler_WAFExclusion_FullWorkflow(t *testing.T) {
handler := NewSecurityHandler(config.SecurityConfig{}, db, nil)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
router.GET("/security/waf/exclusions", handler.GetWAFExclusions)
router.POST("/security/waf/exclusions", handler.AddWAFExclusion)
router.DELETE("/security/waf/exclusions/:rule_id", handler.DeleteWAFExclusion)
@@ -75,14 +75,43 @@ func (h *SettingsHandler) GetSettings(c *gin.Context) {
}
// Convert to map for easier frontend consumption
settingsMap := make(map[string]string)
settingsMap := make(map[string]any)
for _, s := range settings {
if isSensitiveSettingKey(s.Key) {
hasSecret := strings.TrimSpace(s.Value) != ""
settingsMap[s.Key] = "********"
settingsMap[s.Key+".has_secret"] = hasSecret
settingsMap[s.Key+".last_updated"] = s.UpdatedAt.UTC().Format(time.RFC3339)
continue
}
settingsMap[s.Key] = s.Value
}
c.JSON(http.StatusOK, settingsMap)
}
func isSensitiveSettingKey(key string) bool {
normalizedKey := strings.ToLower(strings.TrimSpace(key))
sensitiveFragments := []string{
"password",
"secret",
"token",
"api_key",
"apikey",
"webhook",
}
for _, fragment := range sensitiveFragments {
if strings.Contains(normalizedKey, fragment) {
return true
}
}
return false
}
type UpdateSettingRequest struct {
Key string `json:"key" binding:"required"`
Value string `json:"value" binding:"required"`
@@ -503,6 +532,10 @@ type SMTPConfigRequest struct {
// GetSMTPConfig returns the current SMTP configuration.
func (h *SettingsHandler) GetSMTPConfig(c *gin.Context) {
if !requireAdmin(c) {
return
}
config, err := h.MailService.GetSMTPConfig()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to fetch SMTP configuration"})
@@ -182,6 +182,31 @@ func TestSettingsHandler_GetSettings(t *testing.T) {
assert.Equal(t, "test_value", response["test_key"])
}
func TestSettingsHandler_GetSettings_MasksSensitiveValues(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupSettingsTestDB(t)
db.Create(&models.Setting{Key: "smtp_password", Value: "super-secret-password", Category: "smtp", Type: "string"})
handler := handlers.NewSettingsHandler(db)
router := newAdminRouter()
router.GET("/settings", handler.GetSettings)
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/settings", http.NoBody)
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var response map[string]any
err := json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Equal(t, "********", response["smtp_password"])
assert.Equal(t, true, response["smtp_password.has_secret"])
_, hasRaw := response["super-secret-password"]
assert.False(t, hasRaw)
}
func TestSettingsHandler_GetSettings_DatabaseError(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupSettingsTestDB(t)
@@ -974,6 +999,25 @@ func TestSettingsHandler_GetSMTPConfig_DatabaseError(t *testing.T) {
assert.Equal(t, http.StatusInternalServerError, w.Code)
}
func TestSettingsHandler_GetSMTPConfig_NonAdminForbidden(t *testing.T) {
gin.SetMode(gin.TestMode)
handler, _ := setupSettingsHandlerWithMail(t)
router := gin.New()
router.Use(func(c *gin.Context) {
c.Set("role", "user")
c.Set("userID", uint(2))
c.Next()
})
router.GET("/api/v1/settings/smtp", handler.GetSMTPConfig)
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/api/v1/settings/smtp", http.NoBody)
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusForbidden, w.Code)
}
func TestSettingsHandler_UpdateSMTPConfig_NonAdmin(t *testing.T) {
gin.SetMode(gin.TestMode)
handler, _ := setupSettingsHandlerWithMail(t)
+66 -21
View File
@@ -103,6 +103,18 @@ type SetupRequest struct {
Password string `json:"password" binding:"required,min=8"`
}
func isSetupConflictError(err error) bool {
if err == nil {
return false
}
errText := strings.ToLower(err.Error())
return strings.Contains(errText, "unique constraint failed") ||
strings.Contains(errText, "duplicate key") ||
strings.Contains(errText, "database is locked") ||
strings.Contains(errText, "database table is locked")
}
// Setup creates the initial admin user and configures the ACME email.
func (h *UserHandler) Setup(c *gin.Context) {
// 1. Check if setup is allowed
@@ -160,6 +172,17 @@ func (h *UserHandler) Setup(c *gin.Context) {
})
if err != nil {
var postTxCount int64
if countErr := h.DB.Model(&models.User{}).Count(&postTxCount).Error; countErr == nil && postTxCount > 0 {
c.JSON(http.StatusForbidden, gin.H{"error": "Setup already completed"})
return
}
if isSetupConflictError(err) {
c.JSON(http.StatusConflict, gin.H{"error": "Setup conflict: setup already in progress or completed"})
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to complete setup: " + err.Error()})
return
}
@@ -189,7 +212,12 @@ func (h *UserHandler) RegenerateAPIKey(c *gin.Context) {
return
}
c.JSON(http.StatusOK, gin.H{"api_key": apiKey})
c.JSON(http.StatusOK, gin.H{
"message": "API key regenerated successfully",
"has_api_key": true,
"api_key_masked": maskSecretForResponse(apiKey),
"api_key_updated": time.Now().UTC().Format(time.RFC3339),
})
}
// GetProfile returns the current user's profile including API key.
@@ -207,11 +235,12 @@ func (h *UserHandler) GetProfile(c *gin.Context) {
}
c.JSON(http.StatusOK, gin.H{
"id": user.ID,
"email": user.Email,
"name": user.Name,
"role": user.Role,
"api_key": user.APIKey,
"id": user.ID,
"email": user.Email,
"name": user.Name,
"role": user.Role,
"has_api_key": strings.TrimSpace(user.APIKey) != "",
"api_key_masked": maskSecretForResponse(user.APIKey),
})
}
@@ -548,14 +577,14 @@ func (h *UserHandler) InviteUser(c *gin.Context) {
}
c.JSON(http.StatusCreated, gin.H{
"id": user.ID,
"uuid": user.UUID,
"email": user.Email,
"role": user.Role,
"invite_token": inviteToken, // Return token in case email fails
"invite_url": inviteURL,
"email_sent": emailSent,
"expires_at": inviteExpires,
"id": user.ID,
"uuid": user.UUID,
"email": user.Email,
"role": user.Role,
"invite_token_masked": maskSecretForResponse(inviteToken),
"invite_url": redactInviteURL(inviteURL),
"email_sent": emailSent,
"expires_at": inviteExpires,
})
}
@@ -862,16 +891,32 @@ func (h *UserHandler) ResendInvite(c *gin.Context) {
}
c.JSON(http.StatusOK, gin.H{
"id": user.ID,
"uuid": user.UUID,
"email": user.Email,
"role": user.Role,
"invite_token": inviteToken,
"email_sent": emailSent,
"expires_at": inviteExpires,
"id": user.ID,
"uuid": user.UUID,
"email": user.Email,
"role": user.Role,
"invite_token_masked": maskSecretForResponse(inviteToken),
"email_sent": emailSent,
"expires_at": inviteExpires,
})
}
func maskSecretForResponse(value string) string {
if strings.TrimSpace(value) == "" {
return ""
}
return "********"
}
func redactInviteURL(inviteURL string) string {
if strings.TrimSpace(inviteURL) == "" {
return ""
}
return "[REDACTED]"
}
// UpdateUserPermissions updates a user's permission mode and host exceptions (admin only).
func (h *UserHandler) UpdateUserPermissions(c *gin.Context) {
role, _ := c.Get("role")
@@ -3,9 +3,11 @@ package handlers
import (
"bytes"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"strconv"
"sync"
"testing"
"time"
@@ -15,15 +17,11 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func setupUserHandler(t *testing.T) (*UserHandler, *gorm.DB) {
// Use unique DB for each test to avoid pollution
dbName := "file:" + t.Name() + "?mode=memory&cache=shared"
db, err := gorm.Open(sqlite.Open(dbName), &gorm.Config{})
require.NoError(t, err)
db := OpenTestDB(t)
_ = db.AutoMigrate(&models.User{}, &models.Setting{}, &models.SecurityAudit{})
return NewUserHandler(db), db
}
@@ -131,6 +129,224 @@ func TestUserHandler_Setup(t *testing.T) {
assert.Equal(t, http.StatusForbidden, w.Code)
}
func TestUserHandler_Setup_OneWayInvariant_ReentryRejectedAndSingleUser(t *testing.T) {
handler, db := setupUserHandler(t)
gin.SetMode(gin.TestMode)
r := gin.New()
r.POST("/setup", handler.Setup)
initialBody := map[string]string{
"name": "Admin",
"email": "admin@example.com",
"password": "password123",
}
initialJSON, _ := json.Marshal(initialBody)
firstReq := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBuffer(initialJSON))
firstReq.Header.Set("Content-Type", "application/json")
firstResp := httptest.NewRecorder()
r.ServeHTTP(firstResp, firstReq)
require.Equal(t, http.StatusCreated, firstResp.Code)
secondBody := map[string]string{
"name": "Different Admin",
"email": "different@example.com",
"password": "password123",
}
secondJSON, _ := json.Marshal(secondBody)
secondReq := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBuffer(secondJSON))
secondReq.Header.Set("Content-Type", "application/json")
secondResp := httptest.NewRecorder()
r.ServeHTTP(secondResp, secondReq)
require.Equal(t, http.StatusForbidden, secondResp.Code)
var userCount int64
require.NoError(t, db.Model(&models.User{}).Count(&userCount).Error)
assert.Equal(t, int64(1), userCount)
}
func TestUserHandler_Setup_ConcurrentAttemptInvariant(t *testing.T) {
handler, db := setupUserHandler(t)
gin.SetMode(gin.TestMode)
r := gin.New()
r.POST("/setup", handler.Setup)
concurrency := 6
start := make(chan struct{})
statuses := make(chan int, concurrency)
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
<-start
body := map[string]string{
"name": "Admin",
"email": "admin@example.com",
"password": "password123",
}
jsonBody, _ := json.Marshal(body)
req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBuffer(jsonBody))
req.Header.Set("Content-Type", "application/json")
resp := httptest.NewRecorder()
r.ServeHTTP(resp, req)
statuses <- resp.Code
}()
}
close(start)
wg.Wait()
close(statuses)
createdCount := 0
forbiddenOrConflictCount := 0
for status := range statuses {
if status == http.StatusCreated {
createdCount++
continue
}
if status == http.StatusForbidden || status == http.StatusConflict {
forbiddenOrConflictCount++
continue
}
t.Fatalf("unexpected setup concurrency status: %d", status)
}
assert.Equal(t, 1, createdCount)
assert.Equal(t, concurrency-1, forbiddenOrConflictCount)
var userCount int64
require.NoError(t, db.Model(&models.User{}).Count(&userCount).Error)
assert.Equal(t, int64(1), userCount)
}
func TestUserHandler_Setup_ResponseSecretEchoContract(t *testing.T) {
handler, _ := setupUserHandler(t)
gin.SetMode(gin.TestMode)
r := gin.New()
r.POST("/setup", handler.Setup)
body := map[string]string{
"name": "Admin",
"email": "admin@example.com",
"password": "password123",
}
jsonBody, _ := json.Marshal(body)
req := httptest.NewRequest(http.MethodPost, "/setup", bytes.NewBuffer(jsonBody))
req.Header.Set("Content-Type", "application/json")
resp := httptest.NewRecorder()
r.ServeHTTP(resp, req)
require.Equal(t, http.StatusCreated, resp.Code)
var payload map[string]any
require.NoError(t, json.Unmarshal(resp.Body.Bytes(), &payload))
userValue, ok := payload["user"]
require.True(t, ok)
userMap, ok := userValue.(map[string]any)
require.True(t, ok)
_, hasAPIKey := userMap["api_key"]
_, hasPassword := userMap["password"]
_, hasPasswordHash := userMap["password_hash"]
_, hasInviteToken := userMap["invite_token"]
assert.False(t, hasAPIKey)
assert.False(t, hasPassword)
assert.False(t, hasPasswordHash)
assert.False(t, hasInviteToken)
}
func TestUserHandler_GetProfile_SecretEchoContract(t *testing.T) {
handler, db := setupUserHandler(t)
user := &models.User{
UUID: uuid.NewString(),
Email: "profile@example.com",
Name: "Profile User",
APIKey: "real-secret-api-key",
InviteToken: "invite-secret-token",
PasswordHash: "hashed-password-value",
}
require.NoError(t, db.Create(user).Error)
gin.SetMode(gin.TestMode)
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("userID", user.ID)
c.Next()
})
r.GET("/profile", handler.GetProfile)
req := httptest.NewRequest(http.MethodGet, "/profile", http.NoBody)
resp := httptest.NewRecorder()
r.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
var payload map[string]any
require.NoError(t, json.Unmarshal(resp.Body.Bytes(), &payload))
_, hasAPIKey := payload["api_key"]
_, hasPassword := payload["password"]
_, hasPasswordHash := payload["password_hash"]
_, hasInviteToken := payload["invite_token"]
assert.False(t, hasAPIKey)
assert.False(t, hasPassword)
assert.False(t, hasPasswordHash)
assert.False(t, hasInviteToken)
assert.Equal(t, "********", payload["api_key_masked"])
}
func TestUserHandler_ListUsers_SecretEchoContract(t *testing.T) {
handler, db := setupUserHandlerWithProxyHosts(t)
user := &models.User{
UUID: uuid.NewString(),
Email: "user@example.com",
Name: "User",
Role: "user",
APIKey: "raw-api-key",
InviteToken: "raw-invite-token",
PasswordHash: "raw-password-hash",
}
require.NoError(t, db.Create(user).Error)
gin.SetMode(gin.TestMode)
r := gin.New()
r.Use(func(c *gin.Context) {
c.Set("role", "admin")
c.Next()
})
r.GET("/users", handler.ListUsers)
req := httptest.NewRequest(http.MethodGet, "/users", http.NoBody)
resp := httptest.NewRecorder()
r.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
var users []map[string]any
require.NoError(t, json.Unmarshal(resp.Body.Bytes(), &users))
require.Len(t, users, 1)
_, hasAPIKey := users[0]["api_key"]
_, hasPassword := users[0]["password"]
_, hasPasswordHash := users[0]["password_hash"]
_, hasInviteToken := users[0]["invite_token"]
assert.False(t, hasAPIKey)
assert.False(t, hasPassword)
assert.False(t, hasPasswordHash)
assert.False(t, hasInviteToken)
}
func TestUserHandler_Setup_DBError(t *testing.T) {
// Can't easily mock DB error with sqlite memory unless we close it or something.
// But we can try to insert duplicate email if we had a unique constraint and pre-seeded data,
@@ -162,15 +378,16 @@ func TestUserHandler_RegenerateAPIKey(t *testing.T) {
r.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var resp map[string]string
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["api_key"])
assert.Equal(t, "API key regenerated successfully", resp["message"])
assert.Equal(t, "********", resp["api_key_masked"])
// Verify DB
var updatedUser models.User
db.First(&updatedUser, user.ID)
assert.Equal(t, resp["api_key"], updatedUser.APIKey)
assert.NotEmpty(t, updatedUser.APIKey)
}
func TestUserHandler_GetProfile(t *testing.T) {
@@ -442,9 +659,7 @@ func TestUserHandler_UpdateProfile_Errors(t *testing.T) {
// ============= User Management Tests (Admin functions) =============
func setupUserHandlerWithProxyHosts(t *testing.T) (*UserHandler, *gorm.DB) {
dbName := "file:" + t.Name() + "?mode=memory&cache=shared"
db, err := gorm.Open(sqlite.Open(dbName), &gorm.Config{})
require.NoError(t, err)
db := OpenTestDB(t)
_ = db.AutoMigrate(&models.User{}, &models.Setting{}, &models.ProxyHost{}, &models.SecurityAudit{})
return NewUserHandler(db), db
}
@@ -1376,7 +1591,7 @@ func TestUserHandler_InviteUser_Success(t *testing.T) {
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
assert.Equal(t, "", resp["invite_url"])
// email_sent is false because no SMTP is configured
assert.Equal(t, false, resp["email_sent"].(bool))
@@ -1500,7 +1715,7 @@ func TestUserHandler_InviteUser_WithSMTPConfigured(t *testing.T) {
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
assert.Equal(t, "", resp["invite_url"])
assert.Equal(t, false, resp["email_sent"].(bool))
}
@@ -1553,8 +1768,8 @@ func TestUserHandler_InviteUser_WithSMTPAndConfiguredPublicURL_IncludesInviteURL
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
token := resp["invite_token"].(string)
assert.Equal(t, "https://charon.example.com/accept-invite?token="+token, resp["invite_url"])
assert.Equal(t, "********", resp["invite_token_masked"])
assert.Equal(t, "[REDACTED]", resp["invite_url"])
assert.Equal(t, true, resp["email_sent"].(bool))
}
@@ -1606,7 +1821,7 @@ func TestUserHandler_InviteUser_WithSMTPAndMalformedPublicURL_DoesNotExposeInvit
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
assert.Equal(t, "", resp["invite_url"])
assert.Equal(t, false, resp["email_sent"].(bool))
}
@@ -1668,7 +1883,7 @@ func TestUserHandler_InviteUser_WithSMTPConfigured_DefaultAppName(t *testing.T)
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
}
// Note: TestGetBaseURL and TestGetAppName have been removed as these internal helper
@@ -2372,8 +2587,7 @@ func TestResendInvite_Success(t *testing.T) {
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.NotEqual(t, "oldtoken123", resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
assert.Equal(t, "pending-user@example.com", resp["email"])
assert.Equal(t, false, resp["email_sent"].(bool)) // No SMTP configured
@@ -2381,7 +2595,7 @@ func TestResendInvite_Success(t *testing.T) {
var updatedUser models.User
db.First(&updatedUser, user.ID)
assert.NotEqual(t, "oldtoken123", updatedUser.InviteToken)
assert.Equal(t, resp["invite_token"], updatedUser.InviteToken)
assert.NotEmpty(t, updatedUser.InviteToken)
}
func TestResendInvite_WithExpiredInvite(t *testing.T) {
@@ -2419,11 +2633,75 @@ func TestResendInvite_WithExpiredInvite(t *testing.T) {
var resp map[string]any
err := json.Unmarshal(w.Body.Bytes(), &resp)
require.NoError(t, err, "Failed to unmarshal response")
assert.NotEmpty(t, resp["invite_token"])
assert.NotEqual(t, "expiredtoken", resp["invite_token"])
assert.Equal(t, "********", resp["invite_token_masked"])
// Verify new expiration is in the future
var updatedUser models.User
db.First(&updatedUser, user.ID)
assert.True(t, updatedUser.InviteExpires.After(time.Now()))
}
// ===== Additional coverage for uncovered utility functions =====
func TestIsSetupConflictError(t *testing.T) {
tests := []struct {
name string
err error
expected bool
}{
{"nil error", nil, false},
{"unique constraint failed", errors.New("UNIQUE constraint failed: users.email"), true},
{"duplicate key", errors.New("duplicate key value violates unique constraint"), true},
{"database is locked", errors.New("database is locked"), true},
{"database table is locked", errors.New("database table is locked"), true},
{"case insensitive", errors.New("UNIQUE CONSTRAINT FAILED"), true},
{"unrelated error", errors.New("connection refused"), false},
{"empty error", errors.New(""), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isSetupConflictError(tt.err)
assert.Equal(t, tt.expected, result)
})
}
}
func TestMaskSecretForResponse(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{"non-empty secret", "my-secret-key", "********"},
{"empty string", "", ""},
{"whitespace only", " ", ""},
{"single char", "x", "********"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := maskSecretForResponse(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestRedactInviteURL(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{"non-empty url", "https://example.com/invite/abc123", "[REDACTED]"},
{"empty string", "", ""},
{"whitespace only", " ", ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := redactInviteURL(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
+37 -31
View File
@@ -277,7 +277,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM
protected.PATCH("/config", settingsHandler.PatchConfig) // Bulk configuration update
// SMTP Configuration
protected.GET("/settings/smtp", settingsHandler.GetSMTPConfig)
protected.GET("/settings/smtp", middleware.RequireRole("admin"), settingsHandler.GetSMTPConfig)
protected.POST("/settings/smtp", settingsHandler.UpdateSMTPConfig)
protected.POST("/settings/smtp/test", settingsHandler.TestSMTPConfig)
protected.POST("/settings/smtp/test-email", settingsHandler.SendTestEmail)
@@ -520,40 +520,43 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM
protected.GET("/security/status", securityHandler.GetStatus)
// Security Config management
protected.GET("/security/config", securityHandler.GetConfig)
protected.POST("/security/config", securityHandler.UpdateConfig)
protected.POST("/security/enable", securityHandler.Enable)
protected.POST("/security/disable", securityHandler.Disable)
protected.POST("/security/breakglass/generate", securityHandler.GenerateBreakGlass)
protected.GET("/security/decisions", securityHandler.ListDecisions)
protected.POST("/security/decisions", securityHandler.CreateDecision)
protected.GET("/security/rulesets", securityHandler.ListRuleSets)
protected.POST("/security/rulesets", securityHandler.UpsertRuleSet)
protected.DELETE("/security/rulesets/:id", securityHandler.DeleteRuleSet)
protected.GET("/security/rate-limit/presets", securityHandler.GetRateLimitPresets)
// GeoIP endpoints
protected.GET("/security/geoip/status", securityHandler.GetGeoIPStatus)
protected.POST("/security/geoip/reload", securityHandler.ReloadGeoIP)
protected.POST("/security/geoip/lookup", securityHandler.LookupGeoIP)
// WAF exclusion endpoints
protected.GET("/security/waf/exclusions", securityHandler.GetWAFExclusions)
protected.POST("/security/waf/exclusions", securityHandler.AddWAFExclusion)
protected.DELETE("/security/waf/exclusions/:rule_id", securityHandler.DeleteWAFExclusion)
securityAdmin := protected.Group("/security")
securityAdmin.Use(middleware.RequireRole("admin"))
securityAdmin.POST("/config", securityHandler.UpdateConfig)
securityAdmin.POST("/enable", securityHandler.Enable)
securityAdmin.POST("/disable", securityHandler.Disable)
securityAdmin.POST("/breakglass/generate", securityHandler.GenerateBreakGlass)
securityAdmin.POST("/decisions", securityHandler.CreateDecision)
securityAdmin.POST("/rulesets", securityHandler.UpsertRuleSet)
securityAdmin.DELETE("/rulesets/:id", securityHandler.DeleteRuleSet)
securityAdmin.POST("/geoip/reload", securityHandler.ReloadGeoIP)
securityAdmin.POST("/geoip/lookup", securityHandler.LookupGeoIP)
securityAdmin.POST("/waf/exclusions", securityHandler.AddWAFExclusion)
securityAdmin.DELETE("/waf/exclusions/:rule_id", securityHandler.DeleteWAFExclusion)
// Security module enable/disable endpoints (granular control)
protected.POST("/security/acl/enable", securityHandler.EnableACL)
protected.POST("/security/acl/disable", securityHandler.DisableACL)
protected.PATCH("/security/acl", securityHandler.PatchACL) // E2E tests use PATCH
protected.POST("/security/waf/enable", securityHandler.EnableWAF)
protected.POST("/security/waf/disable", securityHandler.DisableWAF)
protected.PATCH("/security/waf", securityHandler.PatchWAF) // E2E tests use PATCH
protected.POST("/security/cerberus/enable", securityHandler.EnableCerberus)
protected.POST("/security/cerberus/disable", securityHandler.DisableCerberus)
protected.POST("/security/crowdsec/enable", securityHandler.EnableCrowdSec)
protected.POST("/security/crowdsec/disable", securityHandler.DisableCrowdSec)
protected.PATCH("/security/crowdsec", securityHandler.PatchCrowdSec) // E2E tests use PATCH
protected.POST("/security/rate-limit/enable", securityHandler.EnableRateLimit)
protected.POST("/security/rate-limit/disable", securityHandler.DisableRateLimit)
protected.PATCH("/security/rate-limit", securityHandler.PatchRateLimit) // E2E tests use PATCH
securityAdmin.POST("/acl/enable", securityHandler.EnableACL)
securityAdmin.POST("/acl/disable", securityHandler.DisableACL)
securityAdmin.PATCH("/acl", securityHandler.PatchACL) // E2E tests use PATCH
securityAdmin.POST("/waf/enable", securityHandler.EnableWAF)
securityAdmin.POST("/waf/disable", securityHandler.DisableWAF)
securityAdmin.PATCH("/waf", securityHandler.PatchWAF) // E2E tests use PATCH
securityAdmin.POST("/cerberus/enable", securityHandler.EnableCerberus)
securityAdmin.POST("/cerberus/disable", securityHandler.DisableCerberus)
securityAdmin.POST("/crowdsec/enable", securityHandler.EnableCrowdSec)
securityAdmin.POST("/crowdsec/disable", securityHandler.DisableCrowdSec)
securityAdmin.PATCH("/crowdsec", securityHandler.PatchCrowdSec) // E2E tests use PATCH
securityAdmin.POST("/rate-limit/enable", securityHandler.EnableRateLimit)
securityAdmin.POST("/rate-limit/disable", securityHandler.DisableRateLimit)
securityAdmin.PATCH("/rate-limit", securityHandler.PatchRateLimit) // E2E tests use PATCH
// CrowdSec process management and import
// Data dir for crowdsec (persisted on host via volumes)
@@ -635,7 +638,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM
proxyHostHandler.RegisterRoutes(protected)
remoteServerHandler := handlers.NewRemoteServerHandler(remoteServerService, notificationService)
remoteServerHandler.RegisterRoutes(api)
remoteServerHandler.RegisterRoutes(protected)
// Initial Caddy Config Sync
go func() {
@@ -674,17 +677,20 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM
}
// RegisterImportHandler wires up import routes with config dependencies.
func RegisterImportHandler(router *gin.Engine, db *gorm.DB, caddyBinary, importDir, mountPath string) {
func RegisterImportHandler(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyBinary, importDir, mountPath string) {
securityService := services.NewSecurityService(db)
importHandler := handlers.NewImportHandlerWithDeps(db, caddyBinary, importDir, mountPath, securityService)
api := router.Group("/api/v1")
importHandler.RegisterRoutes(api)
authService := services.NewAuthService(db, cfg)
authenticatedAdmin := api.Group("/")
authenticatedAdmin.Use(middleware.AuthMiddleware(authService), middleware.RequireRole("admin"))
importHandler.RegisterRoutes(authenticatedAdmin)
// NPM Import Handler - supports Nginx Proxy Manager export format
npmImportHandler := handlers.NewNPMImportHandler(db)
npmImportHandler.RegisterRoutes(api)
npmImportHandler.RegisterRoutes(authenticatedAdmin)
// JSON Import Handler - supports both Charon and NPM export formats
jsonImportHandler := handlers.NewJSONImportHandler(db)
jsonImportHandler.RegisterRoutes(api)
jsonImportHandler.RegisterRoutes(authenticatedAdmin)
}
@@ -1,15 +1,20 @@
package routes_test
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"github.com/Wikid82/charon/backend/internal/api/routes"
"github.com/Wikid82/charon/backend/internal/config"
"github.com/Wikid82/charon/backend/internal/models"
"github.com/Wikid82/charon/backend/internal/services"
)
func setupTestImportDB(t *testing.T) *gorm.DB {
@@ -27,7 +32,7 @@ func TestRegisterImportHandler(t *testing.T) {
db := setupTestImportDB(t)
router := gin.New()
routes.RegisterImportHandler(router, db, "echo", "/tmp", "/import/Caddyfile")
routes.RegisterImportHandler(router, db, config.Config{JWTSecret: "test-secret"}, "echo", "/tmp", "/import/Caddyfile")
// Verify routes are registered by checking the routes list
routeInfo := router.Routes()
@@ -53,3 +58,30 @@ func TestRegisterImportHandler(t *testing.T) {
assert.True(t, found, "route %s should be registered", route)
}
}
func TestRegisterImportHandler_AuthzGuards(t *testing.T) {
gin.SetMode(gin.TestMode)
db := setupTestImportDB(t)
require.NoError(t, db.AutoMigrate(&models.User{}))
cfg := config.Config{JWTSecret: "test-secret"}
router := gin.New()
routes.RegisterImportHandler(router, db, cfg, "echo", "/tmp", "/import/Caddyfile")
unauthReq := httptest.NewRequest(http.MethodGet, "/api/v1/import/status", http.NoBody)
unauthW := httptest.NewRecorder()
router.ServeHTTP(unauthW, unauthReq)
assert.Equal(t, http.StatusUnauthorized, unauthW.Code)
nonAdmin := &models.User{Email: "user@example.com", Role: "user", Enabled: true}
require.NoError(t, db.Create(nonAdmin).Error)
authSvc := services.NewAuthService(db, cfg)
token, err := authSvc.GenerateToken(nonAdmin)
require.NoError(t, err)
nonAdminReq := httptest.NewRequest(http.MethodGet, "/api/v1/import/preview", http.NoBody)
nonAdminReq.Header.Set("Authorization", "Bearer "+token)
nonAdminW := httptest.NewRecorder()
router.ServeHTTP(nonAdminW, nonAdminReq)
assert.Equal(t, http.StatusForbidden, nonAdminW.Code)
}
+117 -2
View File
@@ -1,6 +1,7 @@
package routes
import (
"io"
"net/http"
"net/http/httptest"
"os"
@@ -16,6 +17,16 @@ import (
"gorm.io/gorm"
)
func materializeRoutePath(path string) string {
segments := strings.Split(path, "/")
for i, segment := range segments {
if strings.HasPrefix(segment, ":") {
segments[i] = "1"
}
}
return strings.Join(segments, "/")
}
func TestRegister(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
@@ -103,11 +114,13 @@ func TestRegisterImportHandler(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
cfg := config.Config{JWTSecret: "test-secret"}
db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared&_test_import"), &gorm.Config{})
require.NoError(t, err)
// RegisterImportHandler should not panic
RegisterImportHandler(router, db, "/usr/bin/caddy", "/tmp/imports", "/tmp/mount")
RegisterImportHandler(router, db, cfg, "/usr/bin/caddy", "/tmp/imports", "/tmp/mount")
// Verify import routes exist
routes := router.Routes()
@@ -177,6 +190,70 @@ func TestRegister_ProxyHostsRequireAuth(t *testing.T) {
assert.Contains(t, w.Body.String(), "Authorization header required")
}
func TestRegister_StateChangingRoutesDenyByDefaultWithExplicitAllowlist(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared&_test_mutation_auth_guard"), &gorm.Config{})
require.NoError(t, err)
cfg := config.Config{JWTSecret: "test-secret"}
require.NoError(t, Register(router, db, cfg))
mutatingMethods := map[string]bool{
http.MethodPost: true,
http.MethodPut: true,
http.MethodPatch: true,
http.MethodDelete: true,
}
publicMutationAllowlist := map[string]bool{
http.MethodPost + " /api/v1/auth/login": true,
http.MethodPost + " /api/v1/auth/register": true,
http.MethodPost + " /api/v1/setup": true,
http.MethodPost + " /api/v1/invite/accept": true,
http.MethodPost + " /api/v1/security/events": true,
http.MethodPost + " /api/v1/emergency/security-reset": true,
}
for _, route := range router.Routes() {
if !strings.HasPrefix(route.Path, "/api/v1/") {
continue
}
if !mutatingMethods[route.Method] {
continue
}
key := route.Method + " " + route.Path
if publicMutationAllowlist[key] {
continue
}
requestPath := materializeRoutePath(route.Path)
var body io.Reader = http.NoBody
if route.Method == http.MethodPost || route.Method == http.MethodPut || route.Method == http.MethodPatch {
body = strings.NewReader("{}")
}
req := httptest.NewRequest(route.Method, requestPath, body)
if route.Method == http.MethodPost || route.Method == http.MethodPut || route.Method == http.MethodPatch {
req.Header.Set("Content-Type", "application/json")
}
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
assert.Contains(
t,
[]int{http.StatusUnauthorized, http.StatusForbidden},
w.Code,
"state-changing endpoint must deny unauthenticated access unless explicitly allowlisted: %s (materialized path: %s)",
key,
requestPath,
)
}
}
func TestRegister_DNSProviders_NotRegisteredWhenEncryptionKeyMissing(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
@@ -362,6 +439,42 @@ func TestRegister_AuthenticatedRoutes(t *testing.T) {
}
}
func TestRegister_StateChangingRoutesRequireAuthentication(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared&_test_mutating_auth_routes"), &gorm.Config{})
require.NoError(t, err)
cfg := config.Config{JWTSecret: "test-secret"}
require.NoError(t, Register(router, db, cfg))
stateChangingPaths := []struct {
method string
path string
}{
{http.MethodPost, "/api/v1/backups"},
{http.MethodPost, "/api/v1/settings"},
{http.MethodPatch, "/api/v1/settings"},
{http.MethodPatch, "/api/v1/config"},
{http.MethodPost, "/api/v1/user/profile"},
{http.MethodPost, "/api/v1/remote-servers"},
{http.MethodPost, "/api/v1/remote-servers/test"},
{http.MethodPut, "/api/v1/remote-servers/1"},
{http.MethodDelete, "/api/v1/remote-servers/1"},
{http.MethodPost, "/api/v1/remote-servers/1/test"},
}
for _, tc := range stateChangingPaths {
t.Run(tc.method+"_"+tc.path, func(t *testing.T) {
w := httptest.NewRecorder()
req := httptest.NewRequest(tc.method, tc.path, nil)
router.ServeHTTP(w, req)
assert.Equal(t, http.StatusUnauthorized, w.Code, "State-changing route %s %s should require auth", tc.method, tc.path)
})
}
}
func TestRegister_AdminRoutes(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
@@ -915,10 +1028,12 @@ func TestRegisterImportHandler_RoutesExist(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
cfg := config.Config{JWTSecret: "test-secret"}
db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared&_test_import_routes"), &gorm.Config{})
require.NoError(t, err)
RegisterImportHandler(router, db, "/usr/bin/caddy", "/tmp/imports", "/tmp/mount")
RegisterImportHandler(router, db, cfg, "/usr/bin/caddy", "/tmp/imports", "/tmp/mount")
routes := router.Routes()
routeMap := make(map[string]bool)
@@ -100,7 +100,10 @@ func TestInviteToken_MustBeUnguessable(t *testing.T) {
var resp map[string]any
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp))
token := resp["invite_token"].(string)
var invitedUser models.User
require.NoError(t, db.Where("email = ?", "user@test.com").First(&invitedUser).Error)
token := invitedUser.InviteToken
require.NotEmpty(t, token)
// Token MUST be at least 32 chars (64 hex = 32 bytes = 256 bits)
assert.GreaterOrEqual(t, len(token), 64, "Invite token must be at least 64 hex chars (256 bits)")
@@ -14,6 +14,7 @@ type NotificationProvider struct {
Type string `json:"type" gorm:"index"` // discord (only supported type in current rollout)
URL string `json:"url"` // Discord webhook URL (HTTPS format required)
Token string `json:"-"` // Auth token for providers (e.g., Gotify) - never exposed in API
HasToken bool `json:"has_token" gorm:"-"` // Computed: indicates whether a token is set (never exposes raw value)
Engine string `json:"engine,omitempty" gorm:"index"` // notify_v1 (notify-only runtime)
Config string `json:"config"` // JSON payload template for custom webhooks
ServiceConfig string `json:"service_config,omitempty" gorm:"type:text"` // JSON blob for typed service config
@@ -4,5 +4,6 @@ const (
FlagNotifyEngineEnabled = "feature.notifications.engine.notify_v1.enabled"
FlagDiscordServiceEnabled = "feature.notifications.service.discord.enabled"
FlagGotifyServiceEnabled = "feature.notifications.service.gotify.enabled"
FlagWebhookServiceEnabled = "feature.notifications.service.webhook.enabled"
FlagSecurityProviderEventsEnabled = "feature.notifications.security_provider_events.enabled"
)
@@ -0,0 +1,7 @@
package notifications
import "net/http"
func executeNotifyRequest(client *http.Client, req *http.Request) (*http.Response, error) {
return client.Do(req)
}
@@ -0,0 +1,507 @@
package notifications
import (
"bytes"
"context"
crand "crypto/rand"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
neturl "net/url"
"os"
"strconv"
"strings"
"time"
"github.com/Wikid82/charon/backend/internal/network"
"github.com/Wikid82/charon/backend/internal/security"
)
const (
MaxNotifyRequestBodyBytes = 256 * 1024
MaxNotifyResponseBodyBytes = 1024 * 1024
)
type RetryPolicy struct {
MaxAttempts int
BaseDelay time.Duration
MaxDelay time.Duration
}
type HTTPWrapperRequest struct {
URL string
Headers map[string]string
Body []byte
}
type HTTPWrapperResult struct {
StatusCode int
ResponseBody []byte
Attempts int
}
type HTTPWrapper struct {
retryPolicy RetryPolicy
allowHTTP bool
maxRedirects int
httpClientFactory func(allowHTTP bool, maxRedirects int) *http.Client
sleep func(time.Duration)
jitterNanos func(int64) int64
}
func NewNotifyHTTPWrapper() *HTTPWrapper {
return &HTTPWrapper{
retryPolicy: RetryPolicy{
MaxAttempts: 3,
BaseDelay: 200 * time.Millisecond,
MaxDelay: 2 * time.Second,
},
allowHTTP: allowNotifyHTTPOverride(),
maxRedirects: notifyMaxRedirects(),
httpClientFactory: func(allowHTTP bool, maxRedirects int) *http.Client {
opts := []network.Option{network.WithTimeout(10 * time.Second), network.WithMaxRedirects(maxRedirects)}
if allowHTTP {
opts = append(opts, network.WithAllowLocalhost())
}
return network.NewSafeHTTPClient(opts...)
},
sleep: time.Sleep,
}
}
func (w *HTTPWrapper) Send(ctx context.Context, request HTTPWrapperRequest) (*HTTPWrapperResult, error) {
if len(request.Body) > MaxNotifyRequestBodyBytes {
return nil, fmt.Errorf("request payload exceeds maximum size")
}
validatedURL, err := w.validateURL(request.URL)
if err != nil {
return nil, err
}
parsedValidatedURL, err := neturl.Parse(validatedURL)
if err != nil {
return nil, fmt.Errorf("destination URL validation failed")
}
validationOptions := []security.ValidationOption{}
if w.allowHTTP {
validationOptions = append(validationOptions, security.WithAllowHTTP(), security.WithAllowLocalhost())
}
safeURL, safeURLErr := security.ValidateExternalURL(parsedValidatedURL.String(), validationOptions...)
if safeURLErr != nil {
return nil, fmt.Errorf("destination URL validation failed")
}
safeParsedURL, safeParseErr := neturl.Parse(safeURL)
if safeParseErr != nil {
return nil, fmt.Errorf("destination URL validation failed")
}
if err := w.guardDestination(safeParsedURL); err != nil {
return nil, err
}
safeRequestURL, hostHeader, safeRequestErr := w.buildSafeRequestURL(safeParsedURL)
if safeRequestErr != nil {
return nil, safeRequestErr
}
headers := sanitizeOutboundHeaders(request.Headers)
client := w.httpClientFactory(w.allowHTTP, w.maxRedirects)
w.applyRedirectGuard(client)
var lastErr error
for attempt := 1; attempt <= w.retryPolicy.MaxAttempts; attempt++ {
httpReq, reqErr := http.NewRequestWithContext(ctx, http.MethodPost, safeRequestURL.String(), bytes.NewReader(request.Body))
if reqErr != nil {
return nil, fmt.Errorf("create outbound request: %w", reqErr)
}
httpReq.Host = hostHeader
for key, value := range headers {
httpReq.Header.Set(key, value)
}
if httpReq.Header.Get("Content-Type") == "" {
httpReq.Header.Set("Content-Type", "application/json")
}
resp, doErr := executeNotifyRequest(client, httpReq)
if doErr != nil {
lastErr = doErr
if attempt < w.retryPolicy.MaxAttempts && shouldRetry(nil, doErr) {
w.waitBeforeRetry(attempt)
continue
}
return nil, fmt.Errorf("outbound request failed: %s", sanitizeTransportErrorReason(doErr))
}
body, bodyErr := readCappedResponseBody(resp.Body)
closeErr := resp.Body.Close()
if bodyErr != nil {
return nil, bodyErr
}
if closeErr != nil {
return nil, fmt.Errorf("close response body: %w", closeErr)
}
if shouldRetry(resp, nil) && attempt < w.retryPolicy.MaxAttempts {
w.waitBeforeRetry(attempt)
continue
}
if resp.StatusCode >= http.StatusBadRequest {
return nil, fmt.Errorf("provider returned status %d", resp.StatusCode)
}
return &HTTPWrapperResult{
StatusCode: resp.StatusCode,
ResponseBody: body,
Attempts: attempt,
}, nil
}
if lastErr != nil {
return nil, fmt.Errorf("provider request failed after retries: %s", sanitizeTransportErrorReason(lastErr))
}
return nil, fmt.Errorf("provider request failed")
}
func sanitizeTransportErrorReason(err error) string {
if err == nil {
return "connection failed"
}
errText := strings.ToLower(strings.TrimSpace(err.Error()))
switch {
case strings.Contains(errText, "no such host"):
return "dns lookup failed"
case strings.Contains(errText, "connection refused"):
return "connection refused"
case strings.Contains(errText, "no route to host") || strings.Contains(errText, "network is unreachable"):
return "network unreachable"
case strings.Contains(errText, "timeout") || strings.Contains(errText, "deadline exceeded"):
return "request timed out"
case strings.Contains(errText, "tls") || strings.Contains(errText, "certificate") || strings.Contains(errText, "x509"):
return "tls handshake failed"
default:
return "connection failed"
}
}
func (w *HTTPWrapper) applyRedirectGuard(client *http.Client) {
if client == nil {
return
}
originalCheckRedirect := client.CheckRedirect
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if originalCheckRedirect != nil {
if err := originalCheckRedirect(req, via); err != nil {
return err
}
}
return w.guardOutboundRequestURL(req)
}
}
func (w *HTTPWrapper) validateURL(rawURL string) (string, error) {
parsedURL, err := neturl.Parse(rawURL)
if err != nil {
return "", fmt.Errorf("invalid destination URL")
}
if hasDisallowedQueryAuthKey(parsedURL.Query()) {
return "", fmt.Errorf("destination URL query authentication is not allowed")
}
options := []security.ValidationOption{}
if w.allowHTTP {
options = append(options, security.WithAllowHTTP(), security.WithAllowLocalhost())
}
validatedURL, err := security.ValidateExternalURL(rawURL, options...)
if err != nil {
return "", fmt.Errorf("destination URL validation failed")
}
return validatedURL, nil
}
func hasDisallowedQueryAuthKey(query neturl.Values) bool {
for key := range query {
normalizedKey := strings.ToLower(strings.TrimSpace(key))
switch normalizedKey {
case "token", "auth", "apikey", "api_key":
return true
}
}
return false
}
func (w *HTTPWrapper) guardOutboundRequestURL(httpReq *http.Request) error {
if httpReq == nil || httpReq.URL == nil {
return fmt.Errorf("destination URL validation failed")
}
reqURL := httpReq.URL.String()
validatedURL, err := w.validateURL(reqURL)
if err != nil {
return err
}
parsedValidatedURL, err := neturl.Parse(validatedURL)
if err != nil {
return fmt.Errorf("destination URL validation failed")
}
return w.guardDestination(parsedValidatedURL)
}
func (w *HTTPWrapper) guardDestination(destinationURL *neturl.URL) error {
if destinationURL == nil {
return fmt.Errorf("destination URL validation failed")
}
if destinationURL.User != nil || destinationURL.Fragment != "" {
return fmt.Errorf("destination URL validation failed")
}
hostname := strings.TrimSpace(destinationURL.Hostname())
if hostname == "" {
return fmt.Errorf("destination URL validation failed")
}
if parsedIP := net.ParseIP(hostname); parsedIP != nil {
if !w.isAllowedDestinationIP(hostname, parsedIP) {
return fmt.Errorf("destination URL validation failed")
}
return nil
}
resolvedIPs, err := net.LookupIP(hostname)
if err != nil || len(resolvedIPs) == 0 {
return fmt.Errorf("destination URL validation failed")
}
for _, resolvedIP := range resolvedIPs {
if !w.isAllowedDestinationIP(hostname, resolvedIP) {
return fmt.Errorf("destination URL validation failed")
}
}
return nil
}
func (w *HTTPWrapper) isAllowedDestinationIP(hostname string, ip net.IP) bool {
if ip == nil {
return false
}
if ip.IsUnspecified() || ip.IsMulticast() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return false
}
if ip.IsLoopback() {
return w.allowHTTP && isLocalDestinationHost(hostname)
}
if network.IsPrivateIP(ip) {
return false
}
return true
}
func (w *HTTPWrapper) buildSafeRequestURL(destinationURL *neturl.URL) (*neturl.URL, string, error) {
if destinationURL == nil {
return nil, "", fmt.Errorf("destination URL validation failed")
}
hostname := strings.TrimSpace(destinationURL.Hostname())
if hostname == "" {
return nil, "", fmt.Errorf("destination URL validation failed")
}
// Validate destination IPs are allowed (defense-in-depth alongside safeDialer).
_, err := w.resolveAllowedDestinationIP(hostname)
if err != nil {
return nil, "", err
}
// Preserve the original hostname in the URL so Go's TLS layer derives the
// correct ServerName for SNI and certificate verification. The safeDialer
// resolves DNS, validates IPs against SSRF rules, and connects to a
// validated IP at dial time, so protection is maintained without
// IP-pinning in the URL.
safeRequestURL := &neturl.URL{
Scheme: destinationURL.Scheme,
Host: destinationURL.Host,
Path: destinationURL.EscapedPath(),
RawQuery: destinationURL.RawQuery,
}
if safeRequestURL.Path == "" {
safeRequestURL.Path = "/"
}
return safeRequestURL, destinationURL.Host, nil
}
func (w *HTTPWrapper) resolveAllowedDestinationIP(hostname string) (net.IP, error) {
if parsedIP := net.ParseIP(hostname); parsedIP != nil {
if !w.isAllowedDestinationIP(hostname, parsedIP) {
return nil, fmt.Errorf("destination URL validation failed")
}
return parsedIP, nil
}
resolvedIPs, err := net.LookupIP(hostname)
if err != nil || len(resolvedIPs) == 0 {
return nil, fmt.Errorf("destination URL validation failed")
}
for _, resolvedIP := range resolvedIPs {
if w.isAllowedDestinationIP(hostname, resolvedIP) {
return resolvedIP, nil
}
}
return nil, fmt.Errorf("destination URL validation failed")
}
func isLocalDestinationHost(host string) bool {
trimmedHost := strings.TrimSpace(host)
if strings.EqualFold(trimmedHost, "localhost") {
return true
}
parsedIP := net.ParseIP(trimmedHost)
return parsedIP != nil && parsedIP.IsLoopback()
}
func shouldRetry(resp *http.Response, err error) bool {
if err != nil {
var netErr net.Error
if isNetErr := strings.Contains(strings.ToLower(err.Error()), "timeout") || strings.Contains(strings.ToLower(err.Error()), "connection"); isNetErr {
return true
}
return errors.As(err, &netErr)
}
if resp == nil {
return false
}
if resp.StatusCode == http.StatusTooManyRequests {
return true
}
return resp.StatusCode >= http.StatusInternalServerError
}
func readCappedResponseBody(body io.Reader) ([]byte, error) {
limited := io.LimitReader(body, MaxNotifyResponseBodyBytes+1)
content, err := io.ReadAll(limited)
if err != nil {
return nil, fmt.Errorf("read response body: %w", err)
}
if len(content) > MaxNotifyResponseBodyBytes {
return nil, fmt.Errorf("response payload exceeds maximum size")
}
return content, nil
}
func sanitizeOutboundHeaders(headers map[string]string) map[string]string {
allowed := map[string]struct{}{
"content-type": {},
"user-agent": {},
"x-request-id": {},
"x-gotify-key": {},
}
sanitized := make(map[string]string)
for key, value := range headers {
normalizedKey := strings.ToLower(strings.TrimSpace(key))
if _, ok := allowed[normalizedKey]; !ok {
continue
}
sanitized[http.CanonicalHeaderKey(normalizedKey)] = strings.TrimSpace(value)
}
return sanitized
}
func (w *HTTPWrapper) waitBeforeRetry(attempt int) {
delay := w.retryPolicy.BaseDelay << (attempt - 1)
if delay > w.retryPolicy.MaxDelay {
delay = w.retryPolicy.MaxDelay
}
jitterFn := w.jitterNanos
if jitterFn == nil {
jitterFn = func(max int64) int64 {
if max <= 0 {
return 0
}
n, err := crand.Int(crand.Reader, big.NewInt(max))
if err != nil {
return 0
}
return n.Int64()
}
}
jitter := time.Duration(jitterFn(int64(delay) / 2))
sleepFn := w.sleep
if sleepFn == nil {
sleepFn = time.Sleep
}
sleepFn(delay + jitter)
}
func allowNotifyHTTPOverride() bool {
if strings.HasSuffix(os.Args[0], ".test") {
return true
}
allowHTTP := strings.EqualFold(strings.TrimSpace(os.Getenv("CHARON_NOTIFY_ALLOW_HTTP")), "true")
if !allowHTTP {
return false
}
environment := strings.ToLower(strings.TrimSpace(os.Getenv("CHARON_ENV")))
return environment == "development" || environment == "test"
}
func notifyMaxRedirects() int {
raw := strings.TrimSpace(os.Getenv("CHARON_NOTIFY_MAX_REDIRECTS"))
if raw == "" {
return 0
}
value, err := strconv.Atoi(raw)
if err != nil {
return 0
}
if value < 0 {
return 0
}
if value > 5 {
return 5
}
return value
}
@@ -0,0 +1,923 @@
package notifications
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
neturl "net/url"
"strings"
"sync/atomic"
"testing"
"time"
)
func TestHTTPWrapperRejectsOversizedRequestBody(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
payload := make([]byte, MaxNotifyRequestBodyBytes+1)
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "http://example.com/hook",
Body: payload,
})
if err == nil || !strings.Contains(err.Error(), "request payload exceeds") {
t.Fatalf("expected oversized request body error, got: %v", err)
}
}
func TestHTTPWrapperRejectsTokenizedQueryURL(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "http://example.com/hook?token=secret",
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "query authentication is not allowed") {
t.Fatalf("expected query token rejection, got: %v", err)
}
}
func TestHTTPWrapperRejectsQueryAuthCaseVariants(t *testing.T) {
testCases := []string{
"http://example.com/hook?Token=secret",
"http://example.com/hook?AUTH=secret",
"http://example.com/hook?apiKey=secret",
}
for _, testURL := range testCases {
t.Run(testURL, func(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: testURL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "query authentication is not allowed") {
t.Fatalf("expected query auth rejection for %q, got: %v", testURL, err)
}
})
}
}
func TestHTTPWrapperSendRejectsRedirectTargetWithDisallowedScheme(t *testing.T) {
var attempts int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
http.Redirect(w, r, "ftp://example.com/redirected", http.StatusFound)
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.maxRedirects = 3
wrapper.retryPolicy.MaxAttempts = 1
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "outbound request failed") {
t.Fatalf("expected outbound failure due to redirect target validation, got: %v", err)
}
if got := atomic.LoadInt32(&attempts); got != 1 {
t.Fatalf("expected only initial request due to blocked redirect, got %d attempts", got)
}
}
func TestHTTPWrapperSendRejectsRedirectTargetWithMixedCaseQueryAuth(t *testing.T) {
var attempts int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
http.Redirect(w, r, "https://example.com/redirected?Token=secret", http.StatusFound)
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.maxRedirects = 3
wrapper.retryPolicy.MaxAttempts = 1
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "outbound request failed") {
t.Fatalf("expected outbound failure due to redirect query auth validation, got: %v", err)
}
if got := atomic.LoadInt32(&attempts); got != 1 {
t.Fatalf("expected only initial request due to blocked redirect, got %d attempts", got)
}
}
func TestHTTPWrapperRetriesOn429ThenSucceeds(t *testing.T) {
var calls int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
current := atomic.AddInt32(&calls, 1)
if current == 1 {
w.WriteHeader(http.StatusTooManyRequests)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.sleep = func(time.Duration) {}
wrapper.jitterNanos = func(int64) int64 { return 0 }
result, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err != nil {
t.Fatalf("expected success after retry, got error: %v", err)
}
if result.Attempts != 2 {
t.Fatalf("expected 2 attempts, got %d", result.Attempts)
}
}
func TestHTTPWrapperSendSuccessWithValidatedDestination(t *testing.T) {
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if got := r.Header.Get("Content-Type"); got != "application/json" {
t.Fatalf("expected default content-type, got %q", got)
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.retryPolicy.MaxAttempts = 1
wrapper.httpClientFactory = func(bool, int) *http.Client {
return server.Client()
}
result, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err != nil {
t.Fatalf("expected successful send, got error: %v", err)
}
if result.Attempts != 1 {
t.Fatalf("expected 1 attempt, got %d", result.Attempts)
}
if result.StatusCode != http.StatusOK {
t.Fatalf("expected status %d, got %d", http.StatusOK, result.StatusCode)
}
}
func TestHTTPWrapperSendRejectsUserInfoInDestinationURL(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "https://user:pass@example.com/hook",
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected destination validation failure, got: %v", err)
}
}
func TestHTTPWrapperSendRejectsFragmentInDestinationURL(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "https://example.com/hook#fragment",
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected destination validation failure, got: %v", err)
}
}
func TestHTTPWrapperDoesNotRetryOn400(t *testing.T) {
var calls int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&calls, 1)
w.WriteHeader(http.StatusBadRequest)
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.sleep = func(time.Duration) {}
wrapper.jitterNanos = func(int64) int64 { return 0 }
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "status 400") {
t.Fatalf("expected non-retryable 400 error, got: %v", err)
}
if atomic.LoadInt32(&calls) != 1 {
t.Fatalf("expected exactly one request attempt, got %d", calls)
}
}
func TestHTTPWrapperResponseBodyCap(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = io.WriteString(w, strings.Repeat("x", MaxNotifyResponseBodyBytes+8))
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"message":"hello"}`),
})
if err == nil || !strings.Contains(err.Error(), "response payload exceeds") {
t.Fatalf("expected capped response body error, got: %v", err)
}
}
func TestSanitizeOutboundHeadersAllowlist(t *testing.T) {
headers := sanitizeOutboundHeaders(map[string]string{
"Content-Type": "application/json",
"User-Agent": "Charon",
"X-Request-ID": "abc",
"X-Gotify-Key": "secret",
"Authorization": "Bearer token",
"Cookie": "sid=1",
})
if len(headers) != 4 {
t.Fatalf("expected 4 allowed headers, got %d", len(headers))
}
if _, ok := headers["Authorization"]; ok {
t.Fatalf("authorization header must be stripped")
}
if _, ok := headers["Cookie"]; ok {
t.Fatalf("cookie header must be stripped")
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsNilRequest(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
err := wrapper.guardOutboundRequestURL(nil)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected validation failure for nil request, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsQueryAuth(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
httpReq := &http.Request{URL: &neturl.URL{Scheme: "http", Host: "example.com", Path: "/hook", RawQuery: "token=secret"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err == nil || !strings.Contains(err.Error(), "query authentication is not allowed") {
t.Fatalf("expected query auth rejection, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsMixedCaseQueryAuth(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
httpReq := &http.Request{URL: &neturl.URL{Scheme: "http", Host: "example.com", Path: "/hook", RawQuery: "apiKey=secret"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err == nil || !strings.Contains(err.Error(), "query authentication is not allowed") {
t.Fatalf("expected query auth rejection, got: %v", err)
}
}
func TestHTTPWrapperApplyRedirectGuardPreservesOriginalBehavior(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
baseErr := fmt.Errorf("base redirect policy")
client := &http.Client{CheckRedirect: func(*http.Request, []*http.Request) error {
return baseErr
}}
wrapper.applyRedirectGuard(client)
err := client.CheckRedirect(&http.Request{URL: &neturl.URL{Scheme: "https", Host: "example.com"}}, nil)
if !errors.Is(err, baseErr) {
t.Fatalf("expected original redirect policy error, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsUnsafeDestination(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
httpReq := &http.Request{URL: &neturl.URL{Scheme: "http", Host: "example.com", Path: "/hook"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected destination validation failure, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLAllowsValidatedDestination(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
httpReq := &http.Request{URL: &neturl.URL{Scheme: "https", Host: "example.com", Path: "/hook"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err != nil {
t.Fatalf("expected validated destination to pass guard, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsUserInfo(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
httpReq := &http.Request{URL: &neturl.URL{Scheme: "http", Host: "127.0.0.1", User: neturl.UserPassword("user", "pass"), Path: "/hook"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected userinfo rejection, got: %v", err)
}
}
func TestHTTPWrapperGuardOutboundRequestURLRejectsFragment(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
httpReq := &http.Request{URL: &neturl.URL{Scheme: "https", Host: "example.com", Path: "/hook", Fragment: "frag"}}
err := wrapper.guardOutboundRequestURL(httpReq)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected fragment rejection, got: %v", err)
}
}
func TestSanitizeTransportErrorReason(t *testing.T) {
tests := []struct {
name string
err error
expected string
}{
{name: "nil error", err: nil, expected: "connection failed"},
{name: "dns error", err: errors.New("dial tcp: lookup gotify.example: no such host"), expected: "dns lookup failed"},
{name: "connection refused", err: errors.New("connect: connection refused"), expected: "connection refused"},
{name: "network unreachable", err: errors.New("connect: no route to host"), expected: "network unreachable"},
{name: "timeout", err: errors.New("context deadline exceeded"), expected: "request timed out"},
{name: "tls failure", err: errors.New("tls: handshake failure"), expected: "tls handshake failed"},
{name: "fallback", err: errors.New("some unexpected transport error"), expected: "connection failed"},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
actual := sanitizeTransportErrorReason(testCase.err)
if actual != testCase.expected {
t.Fatalf("expected %q, got %q", testCase.expected, actual)
}
})
}
}
func TestBuildSafeRequestURLPreservesHostnameForTLS(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
destinationURL := &neturl.URL{
Scheme: "https",
Host: "example.com",
Path: "/webhook",
}
safeURL, hostHeader, err := wrapper.buildSafeRequestURL(destinationURL)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if safeURL.Hostname() != "example.com" {
t.Fatalf("expected hostname 'example.com' preserved in URL for TLS SNI, got %q", safeURL.Hostname())
}
if hostHeader != "example.com" {
t.Fatalf("expected host header 'example.com', got %q", hostHeader)
}
if safeURL.Scheme != "https" {
t.Fatalf("expected scheme 'https', got %q", safeURL.Scheme)
}
if safeURL.Path != "/webhook" {
t.Fatalf("expected path '/webhook', got %q", safeURL.Path)
}
}
func TestBuildSafeRequestURLDefaultsEmptyPathToSlash(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
destinationURL := &neturl.URL{
Scheme: "http",
Host: "localhost",
}
safeURL, _, err := wrapper.buildSafeRequestURL(destinationURL)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if safeURL.Path != "/" {
t.Fatalf("expected default path '/', got %q", safeURL.Path)
}
}
func TestBuildSafeRequestURLPreservesQueryString(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
destinationURL := &neturl.URL{
Scheme: "https",
Host: "example.com",
Path: "/hook",
RawQuery: "key=value",
}
safeURL, _, err := wrapper.buildSafeRequestURL(destinationURL)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if safeURL.RawQuery != "key=value" {
t.Fatalf("expected query 'key=value', got %q", safeURL.RawQuery)
}
}
func TestBuildSafeRequestURLRejectsNilDestination(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
_, _, err := wrapper.buildSafeRequestURL(nil)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected validation failure for nil URL, got: %v", err)
}
}
func TestBuildSafeRequestURLRejectsEmptyHostname(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
destinationURL := &neturl.URL{
Scheme: "https",
Host: "",
Path: "/hook",
}
_, _, err := wrapper.buildSafeRequestURL(destinationURL)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected validation failure for empty hostname, got: %v", err)
}
}
func TestBuildSafeRequestURLWithTLSServer(t *testing.T) {
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
serverURL, _ := neturl.Parse(server.URL)
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
safeURL, hostHeader, err := wrapper.buildSafeRequestURL(serverURL)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if safeURL.Host != serverURL.Host {
t.Fatalf("expected host %q preserved for TLS, got %q", serverURL.Host, safeURL.Host)
}
if hostHeader != serverURL.Host {
t.Fatalf("expected host header %q, got %q", serverURL.Host, hostHeader)
}
}
// ===== Additional coverage for uncovered paths =====
type errReader struct{}
func (errReader) Read([]byte) (int, error) {
return 0, errors.New("simulated read error")
}
type roundTripFunc func(*http.Request) (*http.Response, error)
func (f roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req)
}
func TestApplyRedirectGuardNilClient(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.applyRedirectGuard(nil)
}
func TestGuardDestinationNilURL(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
err := wrapper.guardDestination(nil)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected validation failure for nil URL, got: %v", err)
}
}
func TestGuardDestinationEmptyHostname(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
err := wrapper.guardDestination(&neturl.URL{Scheme: "https", Host: ""})
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected validation failure for empty hostname, got: %v", err)
}
}
func TestGuardDestinationUserInfoRejection(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
u := &neturl.URL{Scheme: "https", Host: "example.com", User: neturl.User("admin")}
err := wrapper.guardDestination(u)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected userinfo rejection, got: %v", err)
}
}
func TestGuardDestinationFragmentRejection(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
u := &neturl.URL{Scheme: "https", Host: "example.com", Fragment: "section"}
err := wrapper.guardDestination(u)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected fragment rejection, got: %v", err)
}
}
func TestGuardDestinationPrivateIPRejection(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
err := wrapper.guardDestination(&neturl.URL{Scheme: "https", Host: "192.168.1.1"})
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected private IP rejection, got: %v", err)
}
}
func TestIsAllowedDestinationIPEdgeCases(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
tests := []struct {
name string
hostname string
ip net.IP
expected bool
}{
{"nil IP", "", nil, false},
{"unspecified", "0.0.0.0", net.IPv4zero, false},
{"multicast", "224.0.0.1", net.ParseIP("224.0.0.1"), false},
{"link-local unicast", "169.254.1.1", net.ParseIP("169.254.1.1"), false},
{"loopback without allowHTTP", "127.0.0.1", net.ParseIP("127.0.0.1"), false},
{"private 10.x", "10.0.0.1", net.ParseIP("10.0.0.1"), false},
{"private 172.16.x", "172.16.0.1", net.ParseIP("172.16.0.1"), false},
{"private 192.168.x", "192.168.1.1", net.ParseIP("192.168.1.1"), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := wrapper.isAllowedDestinationIP(tt.hostname, tt.ip)
if result != tt.expected {
t.Fatalf("isAllowedDestinationIP(%q, %v) = %v, want %v", tt.hostname, tt.ip, result, tt.expected)
}
})
}
}
func TestIsAllowedDestinationIPLoopbackAllowHTTP(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
if !wrapper.isAllowedDestinationIP("localhost", net.ParseIP("127.0.0.1")) {
t.Fatal("expected loopback allowed for localhost with allowHTTP")
}
if wrapper.isAllowedDestinationIP("not-localhost", net.ParseIP("127.0.0.1")) {
t.Fatal("expected loopback rejected for non-localhost hostname")
}
}
func TestIsLocalDestinationHost(t *testing.T) {
tests := []struct {
host string
expected bool
}{
{"localhost", true},
{"LOCALHOST", true},
{"127.0.0.1", true},
{"::1", true},
{"example.com", false},
{"", false},
}
for _, tt := range tests {
t.Run(tt.host, func(t *testing.T) {
if got := isLocalDestinationHost(tt.host); got != tt.expected {
t.Fatalf("isLocalDestinationHost(%q) = %v, want %v", tt.host, got, tt.expected)
}
})
}
}
func TestShouldRetryComprehensive(t *testing.T) {
tests := []struct {
name string
resp *http.Response
err error
expected bool
}{
{"nil resp nil err", nil, nil, false},
{"timeout error string", nil, errors.New("operation timeout"), true},
{"connection error string", nil, errors.New("connection reset"), true},
{"unrelated error", nil, errors.New("json parse error"), false},
{"500 response", &http.Response{StatusCode: 500}, nil, true},
{"502 response", &http.Response{StatusCode: 502}, nil, true},
{"503 response", &http.Response{StatusCode: 503}, nil, true},
{"429 response", &http.Response{StatusCode: 429}, nil, true},
{"200 response", &http.Response{StatusCode: 200}, nil, false},
{"400 response", &http.Response{StatusCode: 400}, nil, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldRetry(tt.resp, tt.err); got != tt.expected {
t.Fatalf("shouldRetry = %v, want %v", got, tt.expected)
}
})
}
}
func TestShouldRetryNetError(t *testing.T) {
netErr := &net.DNSError{Err: "no such host", Name: "example.invalid"}
if !shouldRetry(nil, netErr) {
t.Fatal("expected net.Error to trigger retry via errors.As fallback")
}
}
func TestReadCappedResponseBodyReadError(t *testing.T) {
_, err := readCappedResponseBody(errReader{})
if err == nil || !strings.Contains(err.Error(), "read response body") {
t.Fatalf("expected read body error, got: %v", err)
}
}
func TestReadCappedResponseBodyOversize(t *testing.T) {
oversized := strings.NewReader(strings.Repeat("x", MaxNotifyResponseBodyBytes+10))
_, err := readCappedResponseBody(oversized)
if err == nil || !strings.Contains(err.Error(), "response payload exceeds") {
t.Fatalf("expected oversize error, got: %v", err)
}
}
func TestReadCappedResponseBodySuccess(t *testing.T) {
content, err := readCappedResponseBody(strings.NewReader("hello"))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if string(content) != "hello" {
t.Fatalf("expected 'hello', got %q", string(content))
}
}
func TestHasDisallowedQueryAuthKeyAllVariants(t *testing.T) {
tests := []struct {
name string
key string
expected bool
}{
{"token", "token", true},
{"auth", "auth", true},
{"apikey", "apikey", true},
{"api_key", "api_key", true},
{"TOKEN uppercase", "TOKEN", true},
{"Api_Key mixed", "Api_Key", true},
{"safe key", "callback", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
query := neturl.Values{}
query.Set(tt.key, "secret")
if got := hasDisallowedQueryAuthKey(query); got != tt.expected {
t.Fatalf("hasDisallowedQueryAuthKey with key %q = %v, want %v", tt.key, got, tt.expected)
}
})
}
}
func TestHasDisallowedQueryAuthKeyEmptyQuery(t *testing.T) {
if hasDisallowedQueryAuthKey(neturl.Values{}) {
t.Fatal("expected empty query to be safe")
}
}
func TestNotifyMaxRedirects(t *testing.T) {
tests := []struct {
name string
envValue string
expected int
}{
{"empty", "", 0},
{"valid 3", "3", 3},
{"zero", "0", 0},
{"negative", "-1", 0},
{"above max", "10", 5},
{"exactly 5", "5", 5},
{"invalid", "abc", 0},
{"whitespace", " 2 ", 2},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("CHARON_NOTIFY_MAX_REDIRECTS", tt.envValue)
if got := notifyMaxRedirects(); got != tt.expected {
t.Fatalf("notifyMaxRedirects() = %d, want %d", got, tt.expected)
}
})
}
}
func TestResolveAllowedDestinationIPRejectsPrivateIP(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
_, err := wrapper.resolveAllowedDestinationIP("192.168.1.1")
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected private IP rejection, got: %v", err)
}
}
func TestResolveAllowedDestinationIPRejectsLoopback(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
_, err := wrapper.resolveAllowedDestinationIP("127.0.0.1")
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected loopback rejection, got: %v", err)
}
}
func TestResolveAllowedDestinationIPAllowsPublic(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
ip, err := wrapper.resolveAllowedDestinationIP("1.1.1.1")
if err != nil {
t.Fatalf("expected public IP to be allowed, got: %v", err)
}
if !ip.Equal(net.ParseIP("1.1.1.1")) {
t.Fatalf("expected 1.1.1.1, got %v", ip)
}
}
func TestBuildSafeRequestURLRejectsPrivateHostname(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = false
u := &neturl.URL{Scheme: "https", Host: "192.168.1.1", Path: "/hook"}
_, _, err := wrapper.buildSafeRequestURL(u)
if err == nil || !strings.Contains(err.Error(), "destination URL validation failed") {
t.Fatalf("expected private host rejection, got: %v", err)
}
}
func TestWaitBeforeRetryBasic(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
var sleptDuration time.Duration
wrapper.sleep = func(d time.Duration) { sleptDuration = d }
wrapper.jitterNanos = func(int64) int64 { return 0 }
wrapper.retryPolicy.BaseDelay = 100 * time.Millisecond
wrapper.retryPolicy.MaxDelay = 1 * time.Second
wrapper.waitBeforeRetry(1)
if sleptDuration != 100*time.Millisecond {
t.Fatalf("expected 100ms delay for attempt 1, got %v", sleptDuration)
}
wrapper.waitBeforeRetry(2)
if sleptDuration != 200*time.Millisecond {
t.Fatalf("expected 200ms delay for attempt 2, got %v", sleptDuration)
}
}
func TestWaitBeforeRetryClampedToMax(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
var sleptDuration time.Duration
wrapper.sleep = func(d time.Duration) { sleptDuration = d }
wrapper.jitterNanos = func(int64) int64 { return 0 }
wrapper.retryPolicy.BaseDelay = 1 * time.Second
wrapper.retryPolicy.MaxDelay = 2 * time.Second
wrapper.waitBeforeRetry(5)
if sleptDuration != 2*time.Second {
t.Fatalf("expected clamped delay of 2s, got %v", sleptDuration)
}
}
func TestWaitBeforeRetryDefaultJitter(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.jitterNanos = nil
wrapper.sleep = func(time.Duration) {}
wrapper.retryPolicy.BaseDelay = 100 * time.Millisecond
wrapper.retryPolicy.MaxDelay = 1 * time.Second
wrapper.waitBeforeRetry(1)
}
func TestHTTPWrapperSendExhaustsRetriesOnTransportError(t *testing.T) {
var calls int32
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.sleep = func(time.Duration) {}
wrapper.jitterNanos = func(int64) int64 { return 0 }
wrapper.httpClientFactory = func(bool, int) *http.Client {
return &http.Client{
Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
atomic.AddInt32(&calls, 1)
return nil, errors.New("connection timeout failure")
}),
}
}
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "http://localhost:19999/hook",
Body: []byte(`{"msg":"test"}`),
})
if err == nil {
t.Fatal("expected error after transport failures")
}
if !strings.Contains(err.Error(), "outbound request failed") {
t.Fatalf("expected outbound request failed message, got: %v", err)
}
if got := atomic.LoadInt32(&calls); got != 3 {
t.Fatalf("expected 3 attempts, got %d", got)
}
}
func TestHTTPWrapperSendExhaustsRetriesOn500(t *testing.T) {
var calls int32
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&calls, 1)
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.sleep = func(time.Duration) {}
wrapper.jitterNanos = func(int64) int64 { return 0 }
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: server.URL,
Body: []byte(`{"msg":"test"}`),
})
if err == nil || !strings.Contains(err.Error(), "status 500") {
t.Fatalf("expected 500 status error, got: %v", err)
}
if got := atomic.LoadInt32(&calls); got != 3 {
t.Fatalf("expected 3 attempts for 500 retries, got %d", got)
}
}
func TestHTTPWrapperSendTransportErrorNoRetry(t *testing.T) {
wrapper := NewNotifyHTTPWrapper()
wrapper.allowHTTP = true
wrapper.retryPolicy.MaxAttempts = 1
wrapper.httpClientFactory = func(bool, int) *http.Client {
return &http.Client{
Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("some unretryable error")
}),
}
}
_, err := wrapper.Send(context.Background(), HTTPWrapperRequest{
URL: "http://localhost:19999/hook",
Body: []byte(`{"msg":"test"}`),
})
if err == nil || !strings.Contains(err.Error(), "outbound request failed") {
t.Fatalf("expected outbound request failed, got: %v", err)
}
}
func TestSanitizeTransportErrorReasonNetworkUnreachable(t *testing.T) {
result := sanitizeTransportErrorReason(errors.New("connect: network is unreachable"))
if result != "network unreachable" {
t.Fatalf("expected 'network unreachable', got %q", result)
}
}
func TestSanitizeTransportErrorReasonCertificate(t *testing.T) {
result := sanitizeTransportErrorReason(errors.New("x509: certificate signed by unknown authority"))
if result != "tls handshake failed" {
t.Fatalf("expected 'tls handshake failed', got %q", result)
}
}
func TestAllowNotifyHTTPOverride(t *testing.T) {
result := allowNotifyHTTPOverride()
if !result {
t.Fatal("expected allowHTTP to be true in test binary")
}
}
+2
View File
@@ -22,6 +22,8 @@ func (r *Router) ShouldUseNotify(providerType, providerEngine string, flags map[
return flags[FlagDiscordServiceEnabled]
case "gotify":
return flags[FlagGotifyServiceEnabled]
case "webhook":
return flags[FlagWebhookServiceEnabled]
default:
return false
}
@@ -90,3 +90,21 @@ func TestRouter_ShouldUseNotify_GotifyServiceFlag(t *testing.T) {
t.Fatalf("expected notify routing disabled for gotify when FlagGotifyServiceEnabled is false")
}
}
func TestRouter_ShouldUseNotify_WebhookServiceFlag(t *testing.T) {
router := NewRouter()
flags := map[string]bool{
FlagNotifyEngineEnabled: true,
FlagWebhookServiceEnabled: true,
}
if !router.ShouldUseNotify("webhook", EngineNotifyV1, flags) {
t.Fatalf("expected notify routing enabled for webhook when FlagWebhookServiceEnabled is true")
}
flags[FlagWebhookServiceEnabled] = false
if router.ShouldUseNotify("webhook", EngineNotifyV1, flags) {
t.Fatalf("expected notify routing disabled for webhook when FlagWebhookServiceEnabled is false")
}
}
+157 -11
View File
@@ -7,6 +7,8 @@ import (
"net"
"net/url"
"os"
"slices"
"strconv"
"strings"
"syscall"
@@ -16,11 +18,17 @@ import (
)
type DockerUnavailableError struct {
err error
err error
details string
}
func NewDockerUnavailableError(err error) *DockerUnavailableError {
return &DockerUnavailableError{err: err}
func NewDockerUnavailableError(err error, details ...string) *DockerUnavailableError {
detailMsg := ""
if len(details) > 0 {
detailMsg = details[0]
}
return &DockerUnavailableError{err: err, details: detailMsg}
}
func (e *DockerUnavailableError) Error() string {
@@ -37,6 +45,13 @@ func (e *DockerUnavailableError) Unwrap() error {
return e.err
}
func (e *DockerUnavailableError) Details() string {
if e == nil {
return ""
}
return e.details
}
type DockerPort struct {
PrivatePort uint16 `json:"private_port"`
PublicPort uint16 `json:"public_port"`
@@ -55,8 +70,9 @@ type DockerContainer struct {
}
type DockerService struct {
client *client.Client
initErr error // Stores initialization error if Docker is unavailable
client *client.Client
initErr error // Stores initialization error if Docker is unavailable
localHost string
}
// NewDockerService creates a new Docker service instance.
@@ -64,21 +80,33 @@ type DockerService struct {
// DockerUnavailableError for all operations. This allows routes to be registered
// and provide helpful error messages to users.
func NewDockerService() *DockerService {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
envHost := strings.TrimSpace(os.Getenv("DOCKER_HOST"))
localHost := resolveLocalDockerHost()
if envHost != "" && !strings.HasPrefix(envHost, "unix://") {
logger.Log().WithFields(map[string]any{"docker_host_env": envHost, "local_host": localHost}).Info("ignoring non-unix DOCKER_HOST for local docker mode")
}
cli, err := client.NewClientWithOpts(client.WithHost(localHost), client.WithAPIVersionNegotiation())
if err != nil {
logger.Log().WithError(err).Warn("Failed to initialize Docker client - Docker features will be unavailable")
unavailableErr := NewDockerUnavailableError(err, buildLocalDockerUnavailableDetails(err, localHost))
return &DockerService{
client: nil,
initErr: err,
client: nil,
initErr: unavailableErr,
localHost: localHost,
}
}
return &DockerService{client: cli, initErr: nil}
return &DockerService{client: cli, initErr: nil, localHost: localHost}
}
func (s *DockerService) ListContainers(ctx context.Context, host string) ([]DockerContainer, error) {
// Check if Docker was available during initialization
if s.initErr != nil {
return nil, &DockerUnavailableError{err: s.initErr}
var unavailableErr *DockerUnavailableError
if errors.As(s.initErr, &unavailableErr) {
return nil, unavailableErr
}
return nil, NewDockerUnavailableError(s.initErr, buildLocalDockerUnavailableDetails(s.initErr, s.localHost))
}
var cli *client.Client
@@ -101,7 +129,10 @@ func (s *DockerService) ListContainers(ctx context.Context, host string) ([]Dock
containers, err := cli.ContainerList(ctx, container.ListOptions{All: false})
if err != nil {
if isDockerConnectivityError(err) {
return nil, &DockerUnavailableError{err: err}
if host == "" || host == "local" {
return nil, NewDockerUnavailableError(err, buildLocalDockerUnavailableDetails(err, s.localHost))
}
return nil, NewDockerUnavailableError(err)
}
return nil, fmt.Errorf("failed to list containers: %w", err)
}
@@ -206,3 +237,118 @@ func isDockerConnectivityError(err error) bool {
return false
}
func resolveLocalDockerHost() string {
envHost := strings.TrimSpace(os.Getenv("DOCKER_HOST"))
if strings.HasPrefix(envHost, "unix://") {
socketPath := socketPathFromDockerHost(envHost)
if socketPath != "" {
if _, err := os.Stat(socketPath); err == nil {
return envHost
}
}
}
defaultSocketPath := "/var/run/docker.sock"
if _, err := os.Stat(defaultSocketPath); err == nil {
return "unix:///var/run/docker.sock"
}
rootlessSocketPath := fmt.Sprintf("/run/user/%d/docker.sock", os.Getuid())
if _, err := os.Stat(rootlessSocketPath); err == nil {
return "unix://" + rootlessSocketPath
}
return "unix:///var/run/docker.sock"
}
func socketPathFromDockerHost(host string) string {
trimmedHost := strings.TrimSpace(host)
if !strings.HasPrefix(trimmedHost, "unix://") {
return ""
}
return strings.TrimPrefix(trimmedHost, "unix://")
}
func buildLocalDockerUnavailableDetails(err error, localHost string) string {
socketPath := socketPathFromDockerHost(localHost)
if socketPath == "" {
socketPath = "/var/run/docker.sock"
}
uid := os.Getuid()
gid := os.Getgid()
groups, _ := os.Getgroups()
groupsStr := ""
if len(groups) > 0 {
groupValues := make([]string, 0, len(groups))
for _, groupID := range groups {
groupValues = append(groupValues, strconv.Itoa(groupID))
}
groupsStr = strings.Join(groupValues, ",")
}
if errno, ok := extractErrno(err); ok {
switch errno {
case syscall.ENOENT:
return fmt.Sprintf("Local Docker socket not found at %s (local host selector uses %s). Mount %s as read-only or read-write.", socketPath, localHost, socketPath)
case syscall.ECONNREFUSED:
return fmt.Sprintf("Docker daemon is not accepting connections at %s.", socketPath)
case syscall.EACCES, syscall.EPERM:
infoMsg, socketGID := localSocketStatSummary(socketPath)
permissionHint := ""
if socketGID >= 0 && !slices.Contains(groups, socketGID) {
permissionHint = fmt.Sprintf(" Process groups (%s) do not include socket gid %d; run container with matching supplemental group (e.g., --group-add %d or compose group_add: [\"%d\"]).", groupsStr, socketGID, socketGID, socketGID)
}
return fmt.Sprintf("Local Docker socket is mounted but not accessible by current process (uid=%d gid=%d). %s%s", uid, gid, infoMsg, permissionHint)
}
}
if errors.Is(err, os.ErrNotExist) {
return fmt.Sprintf("Local Docker socket not found at %s (local host selector uses %s).", socketPath, localHost)
}
return fmt.Sprintf("Cannot connect to local Docker via %s. Ensure Docker is running and the mounted socket permissions allow uid=%d gid=%d access.", localHost, uid, gid)
}
func extractErrno(err error) (syscall.Errno, bool) {
if err == nil {
return 0, false
}
var urlErr *url.Error
if errors.As(err, &urlErr) {
err = urlErr.Unwrap()
}
var syscallErr *os.SyscallError
if errors.As(err, &syscallErr) {
err = syscallErr.Unwrap()
}
var opErr *net.OpError
if errors.As(err, &opErr) {
err = opErr.Unwrap()
}
var errno syscall.Errno
if errors.As(err, &errno) {
return errno, true
}
return 0, false
}
func localSocketStatSummary(socketPath string) (string, int) {
info, statErr := os.Stat(socketPath)
if statErr != nil {
return fmt.Sprintf("Socket path %s could not be stat'ed: %v.", socketPath, statErr), -1
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
return fmt.Sprintf("Socket path %s has mode %s.", socketPath, info.Mode().String()), -1
}
return fmt.Sprintf("Socket path %s has mode %s owner uid=%d gid=%d.", socketPath, info.Mode().String(), stat.Uid, stat.Gid), int(stat.Gid)
}
@@ -3,13 +3,17 @@ package services
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDockerService_New(t *testing.T) {
@@ -58,6 +62,10 @@ func TestDockerUnavailableError_ErrorMethods(t *testing.T) {
unwrapped := err.Unwrap()
assert.Equal(t, baseErr, unwrapped)
// Test Details()
errWithDetails := NewDockerUnavailableError(baseErr, "socket permission mismatch")
assert.Equal(t, "socket permission mismatch", errWithDetails.Details())
// Test nil receiver cases
var nilErr *DockerUnavailableError
assert.Equal(t, "docker unavailable", nilErr.Error())
@@ -67,6 +75,7 @@ func TestDockerUnavailableError_ErrorMethods(t *testing.T) {
nilBaseErr := NewDockerUnavailableError(nil)
assert.Equal(t, "docker unavailable", nilBaseErr.Error())
assert.Nil(t, nilBaseErr.Unwrap())
assert.Equal(t, "", nilBaseErr.Details())
}
func TestIsDockerConnectivityError(t *testing.T) {
@@ -165,3 +174,184 @@ func TestIsDockerConnectivityError_NetErrorTimeout(t *testing.T) {
result := isDockerConnectivityError(netErr)
assert.True(t, result, "net.Error with Timeout() should return true")
}
func TestResolveLocalDockerHost_IgnoresRemoteTCPEnv(t *testing.T) {
t.Setenv("DOCKER_HOST", "tcp://docker-proxy:2375")
host := resolveLocalDockerHost()
assert.Equal(t, "unix:///var/run/docker.sock", host)
}
func TestResolveLocalDockerHost_UsesExistingUnixSocketFromEnv(t *testing.T) {
tmpDir := t.TempDir()
socketFile := filepath.Join(tmpDir, "docker.sock")
require.NoError(t, os.WriteFile(socketFile, []byte(""), 0o600))
t.Setenv("DOCKER_HOST", "unix://"+socketFile)
host := resolveLocalDockerHost()
assert.Equal(t, "unix://"+socketFile, host)
}
func TestBuildLocalDockerUnavailableDetails_PermissionDeniedIncludesGroupHint(t *testing.T) {
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.EACCES}
details := buildLocalDockerUnavailableDetails(err, "unix:///var/run/docker.sock")
assert.Contains(t, details, "not accessible")
assert.Contains(t, details, "uid=")
assert.Contains(t, details, "gid=")
assert.NotContains(t, strings.ToLower(details), "token")
// When docker socket exists with a GID not in process groups, verify both
// CLI and compose supplemental-group guidance are present.
if strings.Contains(details, "--group-add") {
assert.Contains(t, details, "group_add",
"when supplemental group hint is present, it should include compose group_add syntax")
}
}
func TestBuildLocalDockerUnavailableDetails_MissingSocket(t *testing.T) {
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.ENOENT}
host := "unix:///tmp/nonexistent-docker.sock"
details := buildLocalDockerUnavailableDetails(err, host)
assert.Contains(t, details, "not found")
assert.Contains(t, details, "/tmp/nonexistent-docker.sock")
assert.Contains(t, details, host)
assert.Contains(t, details, "Mount", "ENOENT path should include mount guidance")
}
func TestBuildLocalDockerUnavailableDetails_PermissionDeniedSocketGIDInGroups(t *testing.T) {
// Temp file GID = our primary GID (already in process groups) → no group hint
tmpDir := t.TempDir()
socketFile := filepath.Join(tmpDir, "docker.sock")
require.NoError(t, os.WriteFile(socketFile, []byte(""), 0o660))
host := "unix://" + socketFile
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.EACCES}
details := buildLocalDockerUnavailableDetails(err, host)
assert.Contains(t, details, "not accessible")
assert.Contains(t, details, "uid=")
assert.NotContains(t, details, "--group-add",
"group-add hint should not appear when socket GID is already in process groups")
}
func TestBuildLocalDockerUnavailableDetails_PermissionDeniedStatFails(t *testing.T) {
// EACCES with a socket path that doesn't exist → stat fails
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.EACCES}
details := buildLocalDockerUnavailableDetails(err, "unix:///tmp/nonexistent-stat-fail.sock")
assert.Contains(t, details, "not accessible")
assert.Contains(t, details, "could not be stat")
}
func TestBuildLocalDockerUnavailableDetails_ConnectionRefused(t *testing.T) {
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.ECONNREFUSED}
details := buildLocalDockerUnavailableDetails(err, "unix:///var/run/docker.sock")
assert.Contains(t, details, "not accepting connections")
}
func TestBuildLocalDockerUnavailableDetails_GenericError(t *testing.T) {
err := errors.New("some unknown docker error")
details := buildLocalDockerUnavailableDetails(err, "unix:///var/run/docker.sock")
assert.Contains(t, details, "Cannot connect")
assert.Contains(t, details, "uid=")
assert.Contains(t, details, "gid=")
}
// ===== Additional coverage for uncovered paths =====
func TestDockerUnavailableError_NilDetails(t *testing.T) {
var nilErr *DockerUnavailableError
assert.Equal(t, "", nilErr.Details())
}
func TestExtractErrno_UrlErrorWrapping(t *testing.T) {
urlErr := &url.Error{Op: "dial", URL: "unix:///var/run/docker.sock", Err: syscall.EACCES}
errno, ok := extractErrno(urlErr)
assert.True(t, ok)
assert.Equal(t, syscall.EACCES, errno)
}
func TestExtractErrno_SyscallError(t *testing.T) {
scErr := &os.SyscallError{Syscall: "connect", Err: syscall.ECONNREFUSED}
errno, ok := extractErrno(scErr)
assert.True(t, ok)
assert.Equal(t, syscall.ECONNREFUSED, errno)
}
func TestExtractErrno_NilError(t *testing.T) {
_, ok := extractErrno(nil)
assert.False(t, ok)
}
func TestExtractErrno_NonSyscallError(t *testing.T) {
_, ok := extractErrno(errors.New("some generic error"))
assert.False(t, ok)
}
func TestExtractErrno_OpErrorWrapping(t *testing.T) {
opErr := &net.OpError{Op: "dial", Net: "unix", Err: syscall.EPERM}
errno, ok := extractErrno(opErr)
assert.True(t, ok)
assert.Equal(t, syscall.EPERM, errno)
}
func TestExtractErrno_NestedUrlSyscallOpError(t *testing.T) {
innerErr := &net.OpError{
Op: "dial",
Net: "unix",
Err: &os.SyscallError{Syscall: "connect", Err: syscall.EACCES},
}
urlErr := &url.Error{Op: "Get", URL: "unix:///var/run/docker.sock", Err: innerErr}
errno, ok := extractErrno(urlErr)
assert.True(t, ok)
assert.Equal(t, syscall.EACCES, errno)
}
func TestSocketPathFromDockerHost(t *testing.T) {
tests := []struct {
name string
host string
expected string
}{
{"unix socket", "unix:///var/run/docker.sock", "/var/run/docker.sock"},
{"tcp host", "tcp://192.168.1.1:2375", ""},
{"empty", "", ""},
{"whitespace unix", " unix:///tmp/docker.sock ", "/tmp/docker.sock"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := socketPathFromDockerHost(tt.host)
assert.Equal(t, tt.expected, result)
})
}
}
func TestBuildLocalDockerUnavailableDetails_OsErrNotExist(t *testing.T) {
err := fmt.Errorf("wrapped: %w", os.ErrNotExist)
details := buildLocalDockerUnavailableDetails(err, "unix:///var/run/docker.sock")
assert.Contains(t, details, "not found")
assert.Contains(t, details, "/var/run/docker.sock")
}
func TestBuildLocalDockerUnavailableDetails_NonUnixHost(t *testing.T) {
err := errors.New("cannot connect")
details := buildLocalDockerUnavailableDetails(err, "tcp://192.168.1.1:2375")
assert.Contains(t, details, "Cannot connect")
assert.Contains(t, details, "tcp://192.168.1.1:2375")
}
func TestBuildLocalDockerUnavailableDetails_EPERMWithStatFail(t *testing.T) {
err := &net.OpError{Op: "dial", Net: "unix", Err: syscall.EPERM}
details := buildLocalDockerUnavailableDetails(err, "unix:///tmp/nonexistent-eperm.sock")
assert.Contains(t, details, "not accessible")
assert.Contains(t, details, "could not be stat")
}
@@ -394,8 +394,8 @@ func (s *EnhancedSecurityNotificationService) MigrateFromLegacyConfig() error {
NotifySecurityRateLimitHits: legacyConfig.NotifyRateLimitHits,
URL: legacyConfig.WebhookURL,
}
if err := tx.Create(&provider).Error; err != nil {
return fmt.Errorf("create managed provider: %w", err)
if createErr := tx.Create(&provider).Error; createErr != nil {
return fmt.Errorf("create managed provider: %w", createErr)
}
} else if err != nil {
return fmt.Errorf("query managed provider: %w", err)
@@ -405,8 +405,8 @@ func (s *EnhancedSecurityNotificationService) MigrateFromLegacyConfig() error {
provider.NotifySecurityACLDenies = legacyConfig.NotifyACLDenies
provider.NotifySecurityRateLimitHits = legacyConfig.NotifyRateLimitHits
provider.URL = legacyConfig.WebhookURL
if err := tx.Save(&provider).Error; err != nil {
return fmt.Errorf("update managed provider: %w", err)
if saveErr := tx.Save(&provider).Error; saveErr != nil {
return fmt.Errorf("update managed provider: %w", saveErr)
}
}
@@ -430,7 +430,7 @@ func (s *EnhancedSecurityNotificationService) MigrateFromLegacyConfig() error {
}
// Upsert marker
if err := tx.Where("key = ?", newMarkerSetting.Key).First(&markerSetting).Error; err == gorm.ErrRecordNotFound {
if queryErr := tx.Where("key = ?", newMarkerSetting.Key).First(&markerSetting).Error; queryErr == gorm.ErrRecordNotFound {
return tx.Create(&newMarkerSetting).Error
}
newMarkerSetting.ID = markerSetting.ID
@@ -60,8 +60,8 @@ func TestDiscordOnly_DispatchToProviderAcceptsDiscord(t *testing.T) {
// Verify payload structure
var payload models.SecurityEvent
err := json.NewDecoder(r.Body).Decode(&payload)
assert.NoError(t, err)
decodeErr := json.NewDecoder(r.Body).Decode(&payload)
assert.NoError(t, decodeErr)
assert.Equal(t, "waf_block", payload.EventType)
w.WriteHeader(http.StatusOK)
@@ -1141,7 +1141,7 @@ func newTestTLSConfig(t *testing.T) (*tls.Config, []byte) {
return &tls.Config{Certificates: []tls.Certificate{cert}, MinVersion: tls.VersionTLS12}, caPEM
}
func trustTestCertificate(t *testing.T, certPEM []byte) {
func trustTestCertificate(t *testing.T, _ []byte) {
t.Helper()
// SSL_CERT_FILE is already set globally by TestMain.
// This function kept for API compatibility but no longer needs to set environment.
+115 -64
View File
@@ -16,6 +16,7 @@ import (
"github.com/Wikid82/charon/backend/internal/logger"
"github.com/Wikid82/charon/backend/internal/network"
"github.com/Wikid82/charon/backend/internal/notifications"
"github.com/Wikid82/charon/backend/internal/security"
"github.com/Wikid82/charon/backend/internal/trace"
@@ -25,11 +26,15 @@ import (
)
type NotificationService struct {
DB *gorm.DB
DB *gorm.DB
httpWrapper *notifications.HTTPWrapper
}
func NewNotificationService(db *gorm.DB) *NotificationService {
return &NotificationService{DB: db}
return &NotificationService{
DB: db,
httpWrapper: notifications.NewNotifyHTTPWrapper(),
}
}
var discordWebhookRegex = regexp.MustCompile(`^https://discord(?:app)?\.com/api/webhooks/(\d+)/([a-zA-Z0-9_-]+)`)
@@ -98,15 +103,46 @@ func validateDiscordProviderURL(providerType, rawURL string) error {
// supportsJSONTemplates returns true if the provider type can use JSON templates
func supportsJSONTemplates(providerType string) bool {
switch strings.ToLower(providerType) {
case "webhook", "discord", "slack", "gotify", "generic":
case "webhook", "discord", "gotify", "slack", "generic":
return true
case "telegram":
return false // Telegram uses URL parameters
default:
return false
}
}
func isSupportedNotificationProviderType(providerType string) bool {
switch strings.ToLower(strings.TrimSpace(providerType)) {
case "discord", "gotify", "webhook":
return true
default:
return false
}
}
func (s *NotificationService) isDispatchEnabled(providerType string) bool {
switch strings.ToLower(strings.TrimSpace(providerType)) {
case "discord":
return true
case "gotify":
return s.getFeatureFlagValue(notifications.FlagGotifyServiceEnabled, true)
case "webhook":
return s.getFeatureFlagValue(notifications.FlagWebhookServiceEnabled, true)
default:
return false
}
}
func (s *NotificationService) getFeatureFlagValue(key string, fallback bool) bool {
var setting models.Setting
err := s.DB.Where("key = ?", key).First(&setting).Error
if err != nil {
return fallback
}
v := strings.ToLower(strings.TrimSpace(setting.Value))
return v == "1" || v == "true" || v == "yes"
}
// Internal Notifications (DB)
func (s *NotificationService) Create(nType models.NotificationType, title, message string) (*models.Notification, error) {
@@ -188,11 +224,10 @@ func (s *NotificationService) SendExternal(ctx context.Context, eventType, title
if !shouldSend {
continue
}
// Non-dispatch policy for deprecated providers
if provider.Type != "discord" {
if !s.isDispatchEnabled(provider.Type) {
logger.Log().WithField("provider", util.SanitizeForLog(provider.Name)).
WithField("type", provider.Type).
Warn("Skipping dispatch to deprecated non-discord provider")
Warn("Skipping dispatch because provider type is disabled for notify dispatch")
continue
}
go func(p models.NotificationProvider) {
@@ -253,31 +288,15 @@ func (s *NotificationService) sendJSONPayload(ctx context.Context, p models.Noti
return fmt.Errorf("template size exceeds maximum limit of %d bytes", maxTemplateSize)
}
// Validate webhook URL using the security package's SSRF-safe validator.
// ValidateExternalURL performs comprehensive validation including:
// - URL format and scheme validation (http/https only)
// - DNS resolution and IP blocking for private/reserved ranges
// - Protection against cloud metadata endpoints (169.254.169.254)
// Using the security package's function helps CodeQL recognize the sanitization.
//
// Additionally, we apply `isValidRedirectURL` as a barrier-guard style predicate.
// CodeQL recognizes this pattern as a sanitizer for untrusted URL values, while
// the real SSRF protection remains `security.ValidateExternalURL`.
if err := validateDiscordProviderURLFunc(p.Type, p.URL); err != nil {
return err
}
providerType := strings.ToLower(strings.TrimSpace(p.Type))
if providerType == "discord" {
if err := validateDiscordProviderURLFunc(p.Type, p.URL); err != nil {
return err
}
webhookURL := p.URL
if !isValidRedirectURL(webhookURL) {
return fmt.Errorf("invalid webhook url")
}
validatedURLStr, err := security.ValidateExternalURL(webhookURL,
security.WithAllowHTTP(), // Allow both http and https for webhooks
security.WithAllowLocalhost(), // Allow localhost for testing
)
if err != nil {
return fmt.Errorf("invalid webhook url: %w", err)
if !isValidRedirectURL(p.URL) {
return fmt.Errorf("invalid webhook url")
}
}
// Parse template and add helper funcs
@@ -348,11 +367,43 @@ func (s *NotificationService) sendJSONPayload(ctx context.Context, p models.Noti
}
}
// Send Request with a safe client (SSRF protection, timeout, no auto-redirect)
// Using network.NewSafeHTTPClient() for defense-in-depth against SSRF attacks.
if providerType == "gotify" || providerType == "webhook" {
headers := map[string]string{
"Content-Type": "application/json",
"User-Agent": "Charon-Notify/1.0",
}
if rid := ctx.Value(trace.RequestIDKey); rid != nil {
if ridStr, ok := rid.(string); ok {
headers["X-Request-ID"] = ridStr
}
}
if providerType == "gotify" {
if strings.TrimSpace(p.Token) != "" {
headers["X-Gotify-Key"] = strings.TrimSpace(p.Token)
}
}
if _, sendErr := s.httpWrapper.Send(ctx, notifications.HTTPWrapperRequest{
URL: p.URL,
Headers: headers,
Body: body.Bytes(),
}); sendErr != nil {
return fmt.Errorf("failed to send webhook: %w", sendErr)
}
return nil
}
validatedURLStr, err := security.ValidateExternalURL(p.URL,
security.WithAllowHTTP(),
security.WithAllowLocalhost(),
)
if err != nil {
return fmt.Errorf("invalid webhook url: %w", err)
}
client := network.NewSafeHTTPClient(
network.WithTimeout(10*time.Second),
network.WithAllowLocalhost(), // Allow localhost for testing
network.WithAllowLocalhost(),
)
req, err := http.NewRequestWithContext(ctx, "POST", validatedURLStr, &body)
@@ -360,20 +411,12 @@ func (s *NotificationService) sendJSONPayload(ctx context.Context, p models.Noti
return fmt.Errorf("failed to create webhook request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
// Propagate request id header if present in context
if rid := ctx.Value(trace.RequestIDKey); rid != nil {
if ridStr, ok := rid.(string); ok {
req.Header.Set("X-Request-ID", ridStr)
}
}
// Safe: URL validated by security.ValidateExternalURL() which validates URL
// format/scheme and blocks private/reserved destinations through DNS+dial-time checks.
// Safe: URL validated by security.ValidateExternalURL() which:
// 1. Validates URL format and scheme (HTTPS required in production)
// 2. Resolves DNS and blocks private/reserved IPs (RFC 1918, loopback, link-local)
// 3. Uses ssrfSafeDialer for connection-time IP revalidation (TOCTOU protection)
// 4. No redirect following allowed
// See: internal/security/url_validator.go
resp, err := webhookDoRequestFunc(client, req)
if err != nil {
return fmt.Errorf("failed to send webhook: %w", err)
@@ -411,17 +454,17 @@ func isValidRedirectURL(rawURL string) bool {
}
func (s *NotificationService) TestProvider(provider models.NotificationProvider) error {
// Discord-only enforcement for this rollout
if provider.Type != "discord" {
return fmt.Errorf("only discord provider type is supported in this release")
providerType := strings.ToLower(strings.TrimSpace(provider.Type))
if !isSupportedNotificationProviderType(providerType) {
return fmt.Errorf("unsupported provider type: %s", providerType)
}
if err := validateDiscordProviderURLFunc(provider.Type, provider.URL); err != nil {
if err := validateDiscordProviderURLFunc(providerType, provider.URL); err != nil {
return err
}
if !supportsJSONTemplates(provider.Type) {
return legacyFallbackInvocationError(provider.Type)
if !supportsJSONTemplates(providerType) {
return legacyFallbackInvocationError(providerType)
}
data := map[string]any{
@@ -523,15 +566,19 @@ func (s *NotificationService) ListProviders() ([]models.NotificationProvider, er
}
func (s *NotificationService) CreateProvider(provider *models.NotificationProvider) error {
// Discord-only enforcement for this rollout
if provider.Type != "discord" {
return fmt.Errorf("only discord provider type is supported in this release")
provider.Type = strings.ToLower(strings.TrimSpace(provider.Type))
if !isSupportedNotificationProviderType(provider.Type) {
return fmt.Errorf("unsupported provider type")
}
if err := validateDiscordProviderURLFunc(provider.Type, provider.URL); err != nil {
return err
}
if provider.Type != "gotify" {
provider.Token = ""
}
// Validate custom template before creating
if strings.ToLower(strings.TrimSpace(provider.Template)) == "custom" && strings.TrimSpace(provider.Config) != "" {
// Provide a minimal preview payload
@@ -550,25 +597,28 @@ func (s *NotificationService) UpdateProvider(provider *models.NotificationProvid
return err
}
// Block type mutation for non-Discord providers
if existing.Type != "discord" && provider.Type != existing.Type {
return fmt.Errorf("cannot change provider type for deprecated non-discord providers")
// Block type mutation for existing providers to avoid cross-provider token/schema confusion
if strings.TrimSpace(provider.Type) != "" && provider.Type != existing.Type {
return fmt.Errorf("cannot change provider type for existing providers")
}
provider.Type = existing.Type
// Block enable mutation for non-Discord providers
if existing.Type != "discord" && provider.Enabled && !existing.Enabled {
return fmt.Errorf("cannot enable deprecated non-discord providers")
}
// Discord-only enforcement for type changes
if provider.Type != "discord" {
return fmt.Errorf("only discord provider type is supported in this release")
if !isSupportedNotificationProviderType(provider.Type) {
return fmt.Errorf("unsupported provider type")
}
if err := validateDiscordProviderURLFunc(provider.Type, provider.URL); err != nil {
return err
}
if provider.Type == "gotify" {
if strings.TrimSpace(provider.Token) == "" {
provider.Token = existing.Token
}
} else {
provider.Token = ""
}
// Validate custom template before saving
if strings.ToLower(strings.TrimSpace(provider.Template)) == "custom" && strings.TrimSpace(provider.Config) != "" {
payload := map[string]any{"Title": "Preview", "Message": "Preview", "Time": time.Now().Format(time.RFC3339), "EventType": "preview"}
@@ -581,6 +631,7 @@ func (s *NotificationService) UpdateProvider(provider *models.NotificationProvid
"name": provider.Name,
"type": provider.Type,
"url": provider.URL,
"token": provider.Token,
"config": provider.Config,
"template": provider.Template,
"enabled": provider.Enabled,
@@ -2,6 +2,8 @@ package services
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"time"
@@ -12,15 +14,15 @@ import (
"gorm.io/gorm"
)
// TestDiscordOnly_CreateProviderRejectsNonDiscord tests service-level Discord-only enforcement for create.
func TestDiscordOnly_CreateProviderRejectsNonDiscord(t *testing.T) {
// TestDiscordOnly_CreateProviderRejectsUnsupported tests service-level provider allowlist for create.
func TestDiscordOnly_CreateProviderRejectsUnsupported(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
service := NewNotificationService(db)
testCases := []string{"webhook", "slack", "gotify", "telegram", "generic"}
testCases := []string{"slack", "telegram", "generic", "email"}
for _, providerType := range testCases {
t.Run(providerType, func(t *testing.T) {
@@ -31,8 +33,8 @@ func TestDiscordOnly_CreateProviderRejectsNonDiscord(t *testing.T) {
}
err := service.CreateProvider(provider)
assert.Error(t, err, "Should reject non-Discord provider")
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.Error(t, err, "Should reject unsupported provider")
assert.Contains(t, err.Error(), "unsupported provider type")
})
}
}
@@ -60,76 +62,81 @@ func TestDiscordOnly_CreateProviderAcceptsDiscord(t *testing.T) {
assert.Equal(t, "discord", created.Type)
}
// TestDiscordOnly_UpdateProviderRejectsNonDiscord tests service-level Discord-only enforcement for update.
func TestDiscordOnly_UpdateProviderRejectsNonDiscord(t *testing.T) {
func TestDiscordOnly_CreateProviderAcceptsWebhook(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
// Create a deprecated webhook provider
deprecatedProvider := models.NotificationProvider{
ID: "test-id",
Name: "Test Webhook",
Type: "webhook",
URL: "https://example.com/webhook",
MigrationState: "deprecated",
}
require.NoError(t, db.Create(&deprecatedProvider).Error)
service := NewNotificationService(db)
// Try to update with webhook type
provider := &models.NotificationProvider{
ID: "test-id",
Name: "Updated",
Name: "Test Webhook",
Type: "webhook",
URL: "https://example.com/webhook",
}
err = service.UpdateProvider(provider)
assert.Error(t, err, "Should reject non-Discord provider update")
assert.Contains(t, err.Error(), "only discord provider type is supported")
err = service.CreateProvider(provider)
assert.NoError(t, err, "Should accept webhook provider")
}
// TestDiscordOnly_UpdateProviderRejectsTypeMutation tests that service blocks type mutation for deprecated providers.
func TestDiscordOnly_CreateProviderAcceptsGotifyWithOrWithoutToken(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
service := NewNotificationService(db)
provider := &models.NotificationProvider{
Name: "Test Gotify",
Type: "gotify",
URL: "https://gotify.example.com/message",
}
err = service.CreateProvider(provider)
assert.NoError(t, err)
provider.ID = ""
provider.Token = "secret"
err = service.CreateProvider(provider)
assert.NoError(t, err)
}
// TestDiscordOnly_UpdateProviderRejectsTypeMutation tests immutable provider type on update.
func TestDiscordOnly_UpdateProviderRejectsTypeMutation(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
// Create a deprecated webhook provider
deprecatedProvider := models.NotificationProvider{
provider := models.NotificationProvider{
ID: "test-id",
Name: "Test Webhook",
Type: "webhook",
URL: "https://example.com/webhook",
MigrationState: "deprecated",
}
require.NoError(t, db.Create(&deprecatedProvider).Error)
require.NoError(t, db.Create(&provider).Error)
service := NewNotificationService(db)
// Try to change type to discord
provider := &models.NotificationProvider{
updatedProvider := &models.NotificationProvider{
ID: "test-id",
Name: "Test Webhook",
Name: "Updated",
Type: "discord",
URL: "https://discord.com/api/webhooks/123/abc",
}
err = service.UpdateProvider(provider)
err = service.UpdateProvider(updatedProvider)
assert.Error(t, err, "Should reject type mutation")
assert.Contains(t, err.Error(), "cannot change provider type")
}
// TestDiscordOnly_UpdateProviderRejectsEnable tests that service blocks enabling deprecated providers.
func TestDiscordOnly_UpdateProviderRejectsEnable(t *testing.T) {
// TestDiscordOnly_UpdateProviderAllowsWebhookUpdates tests supported provider updates.
func TestDiscordOnly_UpdateProviderAllowsWebhookUpdates(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
// Create a deprecated webhook provider (disabled)
deprecatedProvider := models.NotificationProvider{
provider := models.NotificationProvider{
ID: "test-id",
Name: "Test Webhook",
Type: "webhook",
@@ -137,12 +144,11 @@ func TestDiscordOnly_UpdateProviderRejectsEnable(t *testing.T) {
Enabled: false,
MigrationState: "deprecated",
}
require.NoError(t, db.Create(&deprecatedProvider).Error)
require.NoError(t, db.Create(&provider).Error)
service := NewNotificationService(db)
// Try to enable
provider := &models.NotificationProvider{
updatedProvider := &models.NotificationProvider{
ID: "test-id",
Name: "Test Webhook",
Type: "webhook",
@@ -150,28 +156,33 @@ func TestDiscordOnly_UpdateProviderRejectsEnable(t *testing.T) {
Enabled: true,
}
err = service.UpdateProvider(provider)
assert.Error(t, err, "Should reject enabling deprecated provider")
assert.Contains(t, err.Error(), "cannot enable deprecated")
err = service.UpdateProvider(updatedProvider)
assert.NoError(t, err)
}
// TestDiscordOnly_TestProviderRejectsNonDiscord tests that TestProvider enforces Discord-only.
func TestDiscordOnly_TestProviderRejectsNonDiscord(t *testing.T) {
// TestDiscordOnly_TestProviderAllowsWebhookWithoutFeatureFlag tests that webhook TestProvider
// works without explicit feature flag (bypasses dispatch gate).
func TestDiscordOnly_TestProviderAllowsWebhookWithoutFeatureFlag(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}))
require.NoError(t, db.AutoMigrate(&models.NotificationProvider{}, &models.Setting{}))
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
service := NewNotificationService(db)
provider := models.NotificationProvider{
Name: "Test Webhook",
Type: "webhook",
URL: "https://example.com/webhook",
Name: "Test Webhook",
Type: "webhook",
URL: ts.URL + "/webhook",
Template: "minimal",
}
err = service.TestProvider(provider)
assert.Error(t, err, "Should reject non-Discord provider test")
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.NoError(t, err)
}
// TestDiscordOnly_MigrationDeprecatesNonDiscord tests that migration marks non-Discord as deprecated.
@@ -231,6 +231,7 @@ func TestSendJSONPayload_Gotify(t *testing.T) {
provider := models.NotificationProvider{
Type: "gotify",
URL: server.URL,
Token: "test-token",
Template: "custom",
Config: `{"message": {{toJSON .Message}}, "title": {{toJSON .Title}}}`,
}
@@ -262,7 +263,7 @@ func TestSendJSONPayload_TemplateTimeout(t *testing.T) {
Type: "discord",
URL: "http://10.0.0.1:9999",
Template: "custom",
Config: `{"data": {{toJSON .}}}`,
Config: `{"content": {{toJSON .Message}}, "data": {{toJSON .}}}`,
}
// Create data that will be processed
@@ -528,17 +528,7 @@ func TestNotificationService_TestProvider_Errors(t *testing.T) {
}
err := svc.TestProvider(provider)
assert.Error(t, err)
assert.Contains(t, err.Error(), "only discord provider type is supported")
})
t.Run("webhook type not supported", func(t *testing.T) {
provider := models.NotificationProvider{
Type: "webhook",
URL: "https://example.com/webhook",
}
err := svc.TestProvider(provider)
assert.Error(t, err)
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.Contains(t, err.Error(), "unsupported provider type")
})
t.Run("discord with invalid URL format", func(t *testing.T) {
@@ -557,7 +547,7 @@ func TestNotificationService_TestProvider_Errors(t *testing.T) {
}
err := svc.TestProvider(provider)
assert.Error(t, err)
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.Contains(t, err.Error(), "unsupported provider type")
})
t.Run("webhook success", func(t *testing.T) {
@@ -663,7 +653,7 @@ func TestSSRF_WebhookIntegration(t *testing.T) {
data := map[string]any{"Title": "Test", "Message": "Test Message"}
err := svc.sendJSONPayload(context.Background(), provider, data)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid webhook url")
assert.Contains(t, err.Error(), "destination URL validation failed")
})
t.Run("blocks cloud metadata endpoint", func(t *testing.T) {
@@ -674,7 +664,7 @@ func TestSSRF_WebhookIntegration(t *testing.T) {
data := map[string]any{"Title": "Test", "Message": "Test Message"}
err := svc.sendJSONPayload(context.Background(), provider, data)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid webhook url")
assert.Contains(t, err.Error(), "destination URL validation failed")
})
t.Run("allows localhost for testing", func(t *testing.T) {
@@ -1795,13 +1785,13 @@ func TestLegacyFallbackInvocationError(t *testing.T) {
db := setupNotificationTestDB(t)
svc := NewNotificationService(db)
// Test non-discord providers are rejected with discord-only error
// Test non-supported providers are rejected
err := svc.TestProvider(models.NotificationProvider{
Type: "telegram",
URL: "telegram://token@telegram?chats=1",
})
require.Error(t, err)
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.Contains(t, err.Error(), "unsupported provider type")
}
func TestLegacyFallbackInvocationError_DirectHelperAndHook(t *testing.T) {
@@ -1962,16 +1952,14 @@ func TestTestProvider_NotifyOnlyRejectsUnsupportedProvider(t *testing.T) {
db := setupNotificationTestDB(t)
svc := NewNotificationService(db)
// Test non-discord providers are rejected
// Test truly unsupported providers are rejected
tests := []struct {
name string
providerType string
url string
}{
{"telegram", "telegram", "telegram://token@telegram?chats=123"},
{"webhook", "webhook", "https://example.com/webhook"},
{"slack", "slack", "https://hooks.slack.com/services/T/B/X"},
{"gotify", "gotify", "https://gotify.example.com/message"},
{"pushover", "pushover", "pushover://token@user"},
}
@@ -1985,7 +1973,7 @@ func TestTestProvider_NotifyOnlyRejectsUnsupportedProvider(t *testing.T) {
err := svc.TestProvider(provider)
require.Error(t, err)
assert.Contains(t, err.Error(), "only discord provider type is supported")
assert.Contains(t, err.Error(), "unsupported provider type")
})
}
}
@@ -2444,3 +2432,185 @@ func TestNotificationService_EnsureNotifyOnlyProviderMigration_FailsClosed(t *te
// - No log-and-continue pattern present
// - Boot will treat migration incompleteness as failure
}
func TestIsDispatchEnabled_GotifyDefaultTrue(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
// No feature flag row exists — should default to true
assert.True(t, svc.isDispatchEnabled("gotify"))
}
func TestIsDispatchEnabled_WebhookDefaultTrue(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
// No feature flag row exists — should default to true
assert.True(t, svc.isDispatchEnabled("webhook"))
}
func TestTestProvider_GotifyWorksWithoutFeatureFlag(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
provider := models.NotificationProvider{
Type: "gotify",
URL: ts.URL + "/message",
Template: "minimal",
}
err := svc.TestProvider(provider)
assert.NoError(t, err)
}
func TestTestProvider_WebhookWorksWithoutFeatureFlag(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
provider := models.NotificationProvider{
Type: "webhook",
URL: ts.URL + "/webhook",
Template: "minimal",
}
err := svc.TestProvider(provider)
assert.NoError(t, err)
}
func TestTestProvider_GotifyWorksWhenFlagExplicitlyFalse(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
// Explicitly set feature flag to false
db.Create(&models.Setting{Key: "feature.notifications.service.gotify.enabled", Value: "false"})
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
provider := models.NotificationProvider{
Type: "gotify",
URL: ts.URL + "/message",
Template: "minimal",
}
// TestProvider bypasses the dispatch gate, so even with flag=false it should work
err := svc.TestProvider(provider)
assert.NoError(t, err)
}
func TestTestProvider_WebhookWorksWhenFlagExplicitlyFalse(t *testing.T) {
db := setupNotificationTestDB(t)
_ = db.AutoMigrate(&models.Setting{})
svc := NewNotificationService(db)
// Explicitly set feature flag to false
db.Create(&models.Setting{Key: "feature.notifications.service.webhook.enabled", Value: "false"})
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
provider := models.NotificationProvider{
Type: "webhook",
URL: ts.URL + "/webhook",
Template: "minimal",
}
// TestProvider bypasses the dispatch gate, so even with flag=false it should work
err := svc.TestProvider(provider)
assert.NoError(t, err)
}
func TestUpdateProvider_TypeMutationBlocked(t *testing.T) {
db := setupNotificationTestDB(t)
svc := NewNotificationService(db)
existing := models.NotificationProvider{
ID: "prov-type-mut",
Type: "webhook",
Name: "Original",
URL: "https://example.com/hook",
}
require.NoError(t, db.Create(&existing).Error)
update := models.NotificationProvider{
ID: "prov-type-mut",
Type: "discord",
Name: "Changed",
URL: "https://discord.com/api/webhooks/123/abc",
}
err := svc.UpdateProvider(&update)
require.Error(t, err)
assert.Contains(t, err.Error(), "cannot change provider type")
}
func TestUpdateProvider_GotifyKeepsExistingToken(t *testing.T) {
db := setupNotificationTestDB(t)
svc := NewNotificationService(db)
existing := models.NotificationProvider{
ID: "prov-gotify-token",
Type: "gotify",
Name: "My Gotify",
URL: "https://gotify.example.com",
Token: "original-secret-token",
}
require.NoError(t, db.Create(&existing).Error)
update := models.NotificationProvider{
ID: "prov-gotify-token",
Type: "gotify",
Name: "My Gotify Updated",
URL: "https://gotify.example.com",
Token: "",
}
err := svc.UpdateProvider(&update)
require.NoError(t, err)
assert.Equal(t, "original-secret-token", update.Token)
}
func TestGetFeatureFlagValue_FoundSetting(t *testing.T) {
db := setupNotificationTestDB(t)
require.NoError(t, db.AutoMigrate(&models.Setting{}))
svc := NewNotificationService(db)
tests := []struct {
name string
value string
expected bool
}{
{"true_string", "true", true},
{"yes_string", "yes", true},
{"one_string", "1", true},
{"false_string", "false", false},
{"no_string", "no", false},
{"zero_string", "0", false},
{"whitespace_true", " True ", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db.Where("key = ?", "test.flag").Delete(&models.Setting{})
db.Create(&models.Setting{Key: "test.flag", Value: tt.value})
result := svc.getFeatureFlagValue("test.flag", false)
assert.Equal(t, tt.expected, result, "value=%q", tt.value)
})
}
}
-4
View File
@@ -74,10 +74,6 @@ ignore:
- "backend/*.html"
- "backend/codeql-db/**"
# Docker-only code (not testable in CI)
- "backend/internal/services/docker_service.go"
- "backend/internal/api/handlers/docker_handler.go"
# CodeQL artifacts
- "codeql-db/**"
- "codeql-db-*/**"
+1 -1
View File
@@ -237,7 +237,7 @@ Watch requests flow through your proxy in real-time. Filter by domain, status co
### 🔔 Notifications
Get alerted when it matters. Charon currently sends notifications through Discord webhooks using the Notify engine only. No legacy fallback path is used at runtime. Additional providers will roll out later in staged updates.
Get alerted when it matters. Charon notifications now run through the Notify HTTP wrapper with support for Discord, Gotify, and Custom Webhook providers. Payload-focused test coverage is included to help catch formatting and delivery regressions before release.
→ [Learn More](features/notifications.md)
+14 -8
View File
@@ -11,11 +11,13 @@ Notifications can be triggered by various events:
- **Security Events**: WAF blocks, CrowdSec alerts, ACL violations
- **System Events**: Configuration changes, backup completions
## Supported Service (Current Rollout)
## Supported Services
| Service | JSON Templates | Native API | Rich Formatting |
|---------|----------------|------------|-----------------|
| **Discord** | ✅ Yes | ✅ Webhooks | ✅ Embeds |
| **Gotify** | ✅ Yes | ✅ HTTP API | ✅ Priority + Extras |
| **Custom Webhook** | ✅ Yes | ✅ HTTP API | ✅ Template-Controlled |
Additional providers are planned for later staged releases.
@@ -41,7 +43,7 @@ JSON templates give you complete control over notification formatting, allowing
### JSON Template Support
For the currently supported service (Discord), you can choose from three template options:
For current services (Discord, Gotify, and Custom Webhook), you can choose from three template options.
#### 1. Minimal Template (Default)
@@ -157,9 +159,9 @@ Discord supports rich embeds with colors, fields, and timestamps.
## Planned Provider Expansion
Additional providers (for example Slack, Gotify, Telegram, and generic webhooks)
are planned for later staged releases. This page will be expanded as each
provider is validated and released.
Additional providers (for example Slack and Telegram) are planned for later
staged releases. This page will be expanded as each provider is validated and
released.
## Template Variables
@@ -228,9 +230,13 @@ Template: detailed (or custom)
4. Test the notification
5. Save changes
If you previously used non-Discord provider types, keep those entries as
historical records only. They are not active runtime dispatch paths in the
current rollout.
Gotify and Custom Webhook providers are active runtime paths in the current
rollout and can be used in production.
## Validation Coverage
The current rollout includes payload-focused notification tests to catch
formatting and delivery regressions across provider types before release.
### Testing Your Template
+38
View File
@@ -89,6 +89,44 @@ docker run -d \
**Open <http://localhost:8080>** in your browser!
### Docker Socket Access (Important)
Charon runs as a non-root user inside the container. To discover your other Docker containers, it needs permission to read the Docker socket. Without this, you'll see a "Docker Connection Failed" message in the UI.
**Step 1:** Find your Docker socket's group ID:
```bash
stat -c '%g' /var/run/docker.sock
```
This prints a number (for example, `998` or `999`).
**Step 2:** Add that number to your compose file under `group_add`:
```yaml
services:
charon:
image: wikid82/charon:latest
group_add:
- "998" # <-- replace with your number from Step 1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# ... rest of your config
```
**Using `docker run` instead?** Add `--group-add <gid>` to your command:
```bash
docker run -d \
--name charon \
--group-add 998 \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
# ... rest of your flags
wikid82/charon:latest
```
**Why is this needed?** The Docker socket is owned by a specific group on your host machine. Adding that group lets Charon read the socket without running as root—keeping your setup secure.
---
## Step 1.5: Database Migrations (If Upgrading)
@@ -159,7 +159,8 @@ A new scheduled workflow and helper script were added to safely prune old contai
- **Files added**:
- `.github/workflows/container-prune.yml` (weekly schedule, manual dispatch)
- `scripts/prune-container-images.sh` (dry-run by default; supports GHCR and Docker Hub)
- `scripts/prune-ghcr.sh` (GHCR cleanup)
- `scripts/prune-dockerhub.sh` (Docker Hub cleanup)
- **Behavior**:
- Default: **dry-run=true** (no destructive changes).
@@ -0,0 +1,69 @@
---
title: Manual Test Tracking Plan - Notify Wrapper (Gotify + Custom Webhook)
status: Open
priority: High
assignee: QA
labels: testing, notifications, backend, frontend, security
---
# Test Goal
Track manual verification for bugs and regressions after the Notify migration that added HTTP wrapper delivery for Gotify and Custom Webhook providers.
# Scope
- Provider creation and editing for Gotify and Custom Webhook
- Send Test and Preview behavior
- Payload rendering and delivery behavior
- Secret handling and error-message safety
- Existing Discord behavior regression checks
# Preconditions
- Charon is running and reachable in a browser.
- Tester can open Settings → Notifications.
- Tester has reachable endpoints for:
- One Gotify instance
- One custom webhook receiver
## 1) Smoke Path - Provider CRUD
- [ ] Create a Gotify provider with valid URL and token, save successfully.
- [ ] Create a Custom Webhook provider with valid URL, save successfully.
- [ ] Refresh and confirm both providers persist with expected non-secret fields.
- [ ] Edit each provider, save changes, refresh, and confirm updates persist.
## 2) Smoke Path - Test and Preview
- [ ] Run Send Test for Gotify provider and confirm successful delivery.
- [ ] Run Send Test for Custom Webhook provider and confirm successful delivery.
- [ ] Run Preview for both providers and confirm payload is rendered as expected.
- [ ] Confirm Discord provider test/preview still works.
## 3) Payload Regression Checks
- [ ] Validate minimal payload template sends correctly.
- [ ] Validate detailed payload template sends correctly.
- [ ] Validate custom payload template sends correctly.
- [ ] Verify special characters and multi-line content render correctly.
- [ ] Verify payload output remains stable after provider edit + save.
## 4) Secret and Error Safety Checks
- [ ] Confirm Gotify token is never shown in list/readback UI.
- [ ] Confirm Gotify token is not exposed in test/preview responses shown in UI.
- [ ] Trigger a failed test (invalid endpoint) and confirm error text is clear but does not expose secrets.
- [ ] Confirm failed requests do not leak sensitive values in user-visible error content.
## 5) Failure-Mode and Recovery Checks
- [ ] Test with unreachable endpoint and confirm failure is reported clearly.
- [ ] Test with malformed URL and confirm validation blocks save.
- [ ] Test with slow endpoint and confirm UI remains responsive and recoverable.
- [ ] Fix endpoint values and confirm retry succeeds without recreating provider.
## 6) Cross-Provider Regression Checks
- [ ] Confirm Gotify changes do not alter Custom Webhook settings.
- [ ] Confirm Custom Webhook changes do not alter Discord settings.
- [ ] Confirm deleting one provider does not corrupt remaining providers.
## Pass/Fail Criteria
- [ ] PASS when all smoke checks pass, payload output is correct, secrets stay hidden, and no cross-provider regressions are found.
- [ ] FAIL when delivery breaks, payload rendering regresses, secrets are exposed, or provider changes affect unrelated providers.
## Defect Tracking Notes
- [ ] For each defect, record provider type, action, expected result, actual result, and severity.
- [ ] Attach screenshot/video where useful.
- [ ] Mark whether defect is release-blocking.
@@ -0,0 +1,142 @@
---
title: Manual Test Plan - Security Scan PR Event Gating and Artifact Resolution
status: Open
priority: High
assignee: DevOps
labels: testing, workflows, security, ci/cd
---
## Goal
Validate that `Security Scan (PR)` in `.github/workflows/security-pr.yml` behaves deterministically for trigger gating, PR artifact resolution, and trust-boundary checks.
## Scope
- Event gating for `workflow_run`, `workflow_dispatch`, `pull_request`, and `push`
- PR artifact lookup and image loading path
- Failure behavior for missing/corrupt artifacts
- Permission and trust-boundary protection paths
## Preconditions
- You can run workflows in this repository.
- You can view workflow logs in GitHub Actions.
- At least one recent PR exists with a successful `Docker Build, Publish & Test` run and published `pr-image-<PR_NUMBER>` artifact.
- Use a test branch or draft PR for negative testing.
## Evidence to Capture
- Run URL for each scenario
- Job status (`success`, `failure`, `skipped`)
- Exact failure line when expected
- `reason_category` value when present
## Manual Test Checklist
### 1. `workflow_run` from upstream `pull_request` (happy path)
- [ ] Trigger a PR build by pushing a commit to an open PR.
- [ ] Wait for `Docker Build, Publish & Test` to complete successfully.
- [ ] Confirm `Security Scan (PR)` starts from `workflow_run`.
- [ ] Confirm job `Trivy Binary Scan` runs.
- [ ] Confirm logs show trust-boundary validation success.
- [ ] Confirm artifact `pr-image-<PR_NUMBER>` is found and downloaded.
- [ ] Confirm `Load Docker image` resolves to `charon:artifact`.
- [ ] Confirm binary extraction and Trivy scan steps execute.
Expected outcome:
- Workflow succeeds or fails only on real security findings, not on event/artifact resolution.
Failure signals:
- `reason_category=unsupported_upstream_event` on a PR-triggered upstream run.
- Artifact lookup fails for a known valid PR artifact.
- `Load Docker image` cannot resolve image ref despite valid artifact.
### 2. `workflow_run` from upstream `push` (should not run)
- [ ] Push directly to a branch that triggers `Docker Build, Publish & Test` as `push` (for example, `main` in a controlled test window).
- [ ] Open `Security Scan (PR)` run created by `workflow_run`.
- [ ] Verify `Trivy Binary Scan` is skipped by job-level gating.
- [ ] Verify no artifact lookup/download steps were executed.
Expected outcome:
- `Security Scan (PR)` job does not run for upstream `push`.
Failure signals:
- `Trivy Binary Scan` executes for upstream `push`.
- Any artifact resolution step runs under upstream `push`.
### 3. `workflow_dispatch` with valid `pr_number`
- [ ] Open `Security Scan (PR)` and click `Run workflow`.
- [ ] Provide a numeric `pr_number` that has a successful docker-build artifact.
- [ ] Start run and inspect logs.
- [ ] Confirm PR number validation passes.
- [ ] Confirm run lookup resolves a successful `docker-build.yml` run for that PR.
- [ ] Confirm artifact download, image load, extraction, and Trivy steps run.
Expected outcome:
- Workflow executes artifact-only replay path and proceeds to scan.
Failure signals:
- Dispatch falls back to local image build.
- `reason_category=not_found` for a PR known to have valid artifact.
### 4. `workflow_dispatch` without `pr_number` (input validation)
- [ ] Open `Run workflow` for `Security Scan (PR)`.
- [ ] Attempt run with empty `pr_number` (or non-numeric value if UI blocks empty).
- [ ] Inspect early step logs.
Expected outcome:
- Job fails fast before artifact lookup/load.
- Clear validation message indicates missing/invalid `pr_number`.
Failure signals:
- Workflow continues to artifact lookup with invalid input.
- Error message is ambiguous or missing reason category.
### 5. Artifact missing case
- [ ] Run `workflow_dispatch` with a numeric PR that does not have a successful docker-build artifact.
- [ ] Inspect `Check for PR image artifact` logs.
Expected outcome:
- Hard fail with a clear error.
- Log includes `reason_category=not_found`, run context, and artifact name.
Failure signals:
- Step silently skips or succeeds without artifact.
- Workflow proceeds to download/load steps.
### 6. Artifact corrupt/unreadable case
- [ ] Use a controlled test branch to simulate bad artifact content for `charon-pr-image.tar` (for example, tar missing `manifest.json` and no usable load image ID, or unreadable tar).
- [ ] Trigger path through `workflow_run` or `workflow_dispatch`.
- [ ] Inspect `Load Docker image` logs.
Expected outcome:
- Job fails in `Load Docker image` before extraction when image cannot be resolved.
- Error states artifact is missing/unreadable, or valid image reference cannot be resolved.
Failure signals:
- Job continues to extraction with empty/invalid image ref.
- `docker create` fails later due to unresolved image (late failure indicates missed validation).
### 7. Trust-boundary and permission guard failures
- [ ] Verify `permissions` in run metadata are minimal: `contents: read`, `actions: read`, `security-events: write`.
- [ ] For `workflow_run`, inspect guard step output.
- [ ] Confirm guard fails when any of the following are invalid:
- Upstream workflow name mismatch
- Upstream event not `pull_request`
- Upstream head repository not equal to current repository
Expected outcome:
- Guard fails early with explicit `reason_category`.
- No artifact lookup/load/extract occurs after guard failure.
Failure signals:
- Guard passes with mismatched trust-boundary values.
- Workflow attempts artifact operations after trust-boundary failure.
- Unexpected write permissions are present.
## Regression Watchlist
- Event-gating changes accidentally allow `workflow_run` from `push` to execute scan.
- Manual dispatch path silently accepts non-numeric or empty PR input.
- Artifact resolver relies on a single tag and breaks on alternate load output formats.
- Trust-boundary checks are bypassed due to conditional logic drift.
## Exit Criteria
- All scenarios pass with expected behavior.
- Any failure signal is logged as a bug with run URL and exact failing step.
- No ambiguous skip behavior remains for required hard-fail paths.
@@ -0,0 +1,586 @@
---
post_title: "Current Spec: Local Docker Socket Group Access Remediation"
categories:
- planning
- docker
- security
- backend
- frontend
tags:
- docker.sock
- least-privilege
- group-add
- compose
- validation
summary: "Comprehensive plan to resolve local docker socket access failures for non-root process uid=1000 gid=1000 when host socket gid is not in supplemental groups, with phased rollout, PR slicing, and least-privilege validation."
post_date: 2026-02-25
---
## 1) Introduction
### Overview
Charon local Docker discovery currently fails in environments where:
- Socket mount exists: `/var/run/docker.sock:/var/run/docker.sock:ro`
- Charon process runs non-root (typically `uid=1000 gid=1000`)
- Host socket group (example: `gid=988`) is not present in process supplemental groups
Observed user-facing failure class (already emitted by backend details builder):
- `Local Docker socket mounted but not accessible by current process (uid=1000 gid=1000)... Process groups do not include socket gid 988; run container with matching supplemental group (e.g., --group-add 988).`
### Goals
1. Preserve non-root default execution (`USER charon`) while enabling local Docker discovery safely.
2. Standardize supplemental-group strategy across compose variants and launcher scripts.
3. Keep behavior deterministic in backend/API/frontend error surfacing when permissions are wrong.
4. Validate least-privilege posture (non-root, minimal group grant, no broad privilege escalation).
### Non-Goals
- No redesign of remote Docker support (`tcp://...`) beyond compatibility checks.
- No changes to unrelated security modules (WAF, ACL, CrowdSec workflows).
- No broad Docker daemon hardening beyond this socket-access path.
### Scope Labels (Authoritative)
- `repo-deliverable`: changes that must be included in repository PR slices under `/projects/Charon`.
- `operator-local follow-up`: optional local environment changes outside repository scope (for example `/root/docker/...`), not required for repo PR acceptance.
---
## 2) Research Findings
### 2.1 Critical Runtime Files (Confirmed)
- `backend/internal/services/docker_service.go`
- Key functions:
- `NewDockerService()`
- `(*DockerService).ListContainers(...)`
- `resolveLocalDockerHost()`
- `buildLocalDockerUnavailableDetails(...)`
- `isDockerConnectivityError(...)`
- `extractErrno(...)`
- `localSocketStatSummary(...)`
- Contains explicit supplemental-group hint text with `--group-add <gid>` when `EACCES/EPERM` occurs.
- `backend/internal/api/handlers/docker_handler.go`
- Key function: `(*DockerHandler).ListContainers(...)`
- Maps `DockerUnavailableError` to HTTP `503` with `details` string consumed by UI.
- `frontend/src/hooks/useDocker.ts`
- Hook: `useDocker(host?, serverId?)`
- Converts `503` payload details into surfaced `Error(message)`.
- `frontend/src/components/ProxyHostForm.tsx`
- Uses `useDocker`.
- Error panel title: `Docker Connection Failed`.
- Existing troubleshooting text currently mentions socket mount but not explicit supplemental group action.
- `.docker/docker-entrypoint.sh`
- Root path auto-aligns docker socket GID with user group membership via:
- `get_group_by_gid()`
- `create_group_with_gid()`
- `add_user_to_group()`
- Non-root path logs generic `--group-add` guidance but does not include resolved host socket GID.
- `Dockerfile`
- Creates non-root user `charon` (uid/gid 1000) and final `USER charon`.
- This is correct for least privilege and should remain default.
### 2.2 Compose and Script Surface Area
Primary in-repo compose files with docker socket mount:
- `.docker/compose/docker-compose.yml` (`charon` service)
- `.docker/compose/docker-compose.local.yml` (`charon` service)
- `.docker/compose/docker-compose.dev.yml` (`app` service)
- `.docker/compose/docker-compose.playwright-local.yml` (`charon-e2e` service)
- `.docker/compose/docker-compose.playwright-ci.yml` (`charon-app`, `crowdsec` services)
Primary out-of-repo/local-ops file in active workspace:
- `/root/docker/containers/charon/docker-compose.yml` (`charon` service)
- Includes socket mount.
- `user:` is currently commented out.
- No `group_add` entry exists.
Launcher scripts discovered:
- `.github/skills/docker-start-dev-scripts/run.sh`
- Runs: `docker compose -f .docker/compose/docker-compose.dev.yml up -d`
- `/root/docker/containers/charon/docker-compose-up-charon.sh`
- Runs: `docker compose up -d`
### 2.3 Existing Tests Relevant to This Failure
Backend service tests (`backend/internal/services/docker_service_test.go`):
- `TestBuildLocalDockerUnavailableDetails_PermissionDeniedIncludesGroupHint`
- `TestBuildLocalDockerUnavailableDetails_MissingSocket`
- Connectivity classification tests across URL/syscall/network errors.
Backend handler tests (`backend/internal/api/handlers/docker_handler_test.go`):
- `TestDockerHandler_ListContainers_DockerUnavailableMappedTo503`
- Other selector and remote-host mapping tests.
Frontend hook tests (`frontend/src/hooks/__tests__/useDocker.test.tsx`):
- `it('extracts details from 503 service unavailable error', ...)`
### 2.4 Config Review Findings (`.gitignore`, `codecov.yml`, `.dockerignore`, `Dockerfile`)
- `.gitignore`: no blocker for this feature; already excludes local env/artifacts extensively.
- `.dockerignore`: no blocker for this feature; includes docs/tests and build artifacts exclusions.
- `Dockerfile`: non-root default is aligned with least-privilege intent.
- `codecov.yml`: currently excludes the two key Docker logic files:
- `backend/internal/services/docker_service.go`
- `backend/internal/api/handlers/docker_handler.go`
This exclusion undermines regression visibility for this exact problem class and should be revised.
### 2.5 Confidence
Confidence score: **97%**
Reasoning:
- Root cause and symptom path are already explicit in code.
- Required files and control points are concrete and localized.
- Existing tests already cover adjacent behavior and reduce implementation risk.
---
## 3) Requirements (EARS)
- WHEN local Docker source is selected and `/var/run/docker.sock` is mounted, THE SYSTEM SHALL return containers if the process has supplemental membership for socket GID.
- WHEN local Docker source is selected and socket permissions deny access (`EACCES`/`EPERM`), THE SYSTEM SHALL return HTTP `503` with a deterministic, actionable details message including supplemental-group guidance.
- WHEN container runs non-root and socket GID is known, THE SYSTEM SHALL provide explicit startup diagnostics indicating the required `group_add` value.
- WHEN docker-compose-based local/dev startup is used, THE SYSTEM SHALL support local-only `group_add` configuration from host socket GID without requiring root process runtime.
- WHEN remote Docker source is selected (`server_id` path), THE SYSTEM SHALL remain functionally unchanged.
- WHEN least-privilege validation is executed, THE SYSTEM SHALL demonstrate non-root process execution and only necessary supplemental group grant.
- IF resolved socket GID equals `0`, THEN THE SYSTEM SHALL require explicit operator opt-in and risk acknowledgment before any `group_add: ["0"]` path is used.
---
## 4) Technical Specifications
### 4.1 Architecture and Data Flow
User flow:
1. UI `ProxyHostForm` sets source = `Local (Docker Socket)`.
2. `useDocker(...)` calls `dockerApi.listContainers(...)`.
3. Backend `DockerHandler.ListContainers(...)` invokes `DockerService.ListContainers(...)`.
4. If socket access denied, backend emits `DockerUnavailableError` with details.
5. Handler returns `503` JSON `{ error, details }`.
6. Frontend surfaces message in `Docker Connection Failed` block.
No database schema change is required.
### 4.2 API Contract (No endpoint shape change)
Endpoint:
- `GET /api/v1/docker/containers`
- Query params:
- `host` (allowed: empty or `local` only)
- `server_id` (UUID for remote server lookup)
Responses:
- `200 OK`: `DockerContainer[]`
- `503 Service Unavailable`:
- `error: "Docker daemon unavailable"`
- `details: <actionable message>`
- `400`, `404`, `500` unchanged.
### 4.3 Deterministic `group_add` Policy (Chosen)
Chosen policy: **conditional local-only profile/override while keeping CI unaffected**.
Authoritative policy statement:
1. `repo-deliverable`: repository compose paths used for local operator runs (`.docker/compose/docker-compose.local.yml`, `.docker/compose/docker-compose.dev.yml`) may include local-only `group_add` wiring using `DOCKER_SOCK_GID`.
2. `repo-deliverable`: CI compose paths (`.docker/compose/docker-compose.playwright-ci.yml`) remain unaffected by this policy and must not require `DOCKER_SOCK_GID`.
3. `repo-deliverable`: base compose (`.docker/compose/docker-compose.yml`) remains safe by default and must not force a local host-specific GID requirement in CI.
4. `operator-local follow-up`: out-of-repo operator files (for example `/root/docker/containers/charon/docker-compose.yml`) may mirror this policy but are explicitly outside mandatory repo PR scope.
CI compatibility statement:
- CI workflows remain deterministic because they do not depend on local host socket GID export for this remediation.
- No CI job should fail due to missing `DOCKER_SOCK_GID` after this plan.
Security guardrail for `gid==0` (mandatory):
- If `stat -c '%g' /var/run/docker.sock` returns `0`, local profile/override usage must fail closed by default.
- Enabling `group_add: ["0"]` requires explicit opt-in (for example `ALLOW_DOCKER_SOCK_GID_0=true`) and documented risk acknowledgment in operator guidance.
- Silent fallback to GID `0` is prohibited.
### 4.4 Entrypoint Diagnostic Improvements
In `.docker/docker-entrypoint.sh` non-root socket branch:
- Extend current message to include resolved socket GID from `stat -c '%g' /var/run/docker.sock`.
- Emit exact recommendation format:
- `Use docker compose group_add: ["<gid>"] or run with --group-add <gid>`
- If resolved GID is `0`, emit explicit warning requiring opt-in/risk acknowledgment instead of generic recommendation.
No privilege escalation should be introduced.
### 4.5 Frontend UX Message Precision
In `frontend/src/components/ProxyHostForm.tsx` troubleshooting text:
- Retain mount guidance.
- Add supplemental-group guidance for containerized runs.
- Keep language concise and operational.
### 4.6 Coverage and Quality Config Adjustments
`codecov.yml` review outcome:
- Proposed: remove Docker logic file ignores for:
- `backend/internal/services/docker_service.go`
- `backend/internal/api/handlers/docker_handler.go`
- Reason: this issue is rooted in these files; exclusion hides regressions.
`.gitignore` review outcome:
- No change required for core remediation.
`.dockerignore` review outcome:
- No required change for runtime fix.
- Optional follow-up: verify no additional local-only compose/env files are copied in future.
`Dockerfile` review outcome:
- No required behavioral change; preserve non-root default.
---
## 5) Risks, Edge Cases, Mitigations
### Risks
1. Host socket GID differs across environments (`docker` group not stable numeric ID).
2. CI runners may not permit or need explicit `group_add` depending on runner Docker setup.
3. Over-granting groups could violate least-privilege intent.
4. Socket GID can be `0` on some hosts and implies root-group blast radius.
### Edge Cases
- Socket path missing (`ENOENT`) remains handled with existing details path.
- Rootless host Docker sockets (`/run/user/<uid>/docker.sock`) remain selectable by `resolveLocalDockerHost()`.
- Remote server discovery path (`tcp://...`) must remain unaffected.
### Mitigations
- Use environment-substituted `DOCKER_SOCK_GID`, not hardcoded `988` in committed compose files.
- Keep `group_add` scoped only to local operator flows that require socket discovery.
- Fail closed on `DOCKER_SOCK_GID=0` unless explicit opt-in and risk acknowledgment are present.
- Verify `id` output inside container to confirm only necessary supplemental group is present.
---
## 6) Implementation Plan (Phased, minimal request count)
Design principle for phases: maximize delivery per request by grouping strongly-related changes into each phase and minimizing handoffs.
### Phase 1 — Baseline + Diagnostics + Compose Foundations
Scope:
1. Compose updates in local/dev paths to support local-only `group_add` via `DOCKER_SOCK_GID`.
2. Entrypoint diagnostic enhancement for non-root socket path.
`repo-deliverable` files:
- `.docker/compose/docker-compose.local.yml`
- `.docker/compose/docker-compose.dev.yml`
- `.docker/docker-entrypoint.sh`
`operator-local follow-up` files (non-blocking, out of repo PR scope):
- `/root/docker/containers/charon/docker-compose.yml`
- `/root/docker/containers/charon/docker-compose-up-charon.sh`
Deliverables:
- Deterministic startup guidance and immediate local remediation path.
### Phase 2 — API/UI Behavior Tightening + Tests
Scope:
1. Preserve and, if needed, refine backend detail text consistency in `buildLocalDockerUnavailableDetails(...)`.
2. UI troubleshooting copy update in `ProxyHostForm.tsx`.
3. Expand/refresh tests for permission-denied + supplemental-group hint rendering path.
Primary files:
- `backend/internal/services/docker_service.go`
- `backend/internal/services/docker_service_test.go`
- `backend/internal/api/handlers/docker_handler.go`
- `backend/internal/api/handlers/docker_handler_test.go`
- `frontend/src/hooks/useDocker.ts`
- `frontend/src/hooks/__tests__/useDocker.test.tsx`
- `frontend/src/components/ProxyHostForm.tsx`
- `frontend/src/components/__tests__/ProxyHostForm*.test.tsx`
Deliverables:
- User sees precise, actionable guidance when failure occurs.
- Regression tests protect failure classification and surfaced guidance.
### Phase 3 — Coverage Policy + Documentation + CI/Validation Hardening
Scope:
1. Remove Docker logic exclusions in `codecov.yml`.
2. Update docs to include `group_add` guidance where socket mount is described.
3. Validate CI/playwright compose behavior remains unaffected and verify local least-privilege checks.
Primary files:
- `codecov.yml`
- `README.md`
- `docs/getting-started.md`
- `SECURITY.md`
- `.vscode/tasks.json` (only if adding dedicated validation task labels)
Deliverables:
- Documentation and coverage policy match runtime behavior.
- Verified validation playbook for operators and CI.
---
## 7) PR Slicing Strategy
### Decision
**Split into multiple PRs (PR-1 / PR-2 / PR-3).**
### Trigger Reasons
- Cross-domain change set (compose + shell entrypoint + backend + frontend + tests + docs + coverage policy).
- Distinct rollback boundaries needed (runtime config vs behavior vs governance/reporting).
- Faster and safer review with independently verifiable increments.
### Ordered PR Slices
#### PR-1: Runtime Access Foundation (Compose + Entrypoint)
Scope:
- Add local-only `group_add` strategy to local/dev compose flows.
- Improve non-root entrypoint diagnostics to print required GID.
Files (expected):
- `.docker/compose/docker-compose.local.yml`
- `.docker/compose/docker-compose.dev.yml`
- `.docker/docker-entrypoint.sh`
Operator-local follow-up (not part of repo PR gate):
- `/root/docker/containers/charon/docker-compose.yml`
- `/root/docker/containers/charon/docker-compose-up-charon.sh`
Dependencies:
- None.
Acceptance criteria:
1. Container remains non-root (`id -u = 1000`).
2. With local-only config enabled and `DOCKER_SOCK_GID` exported, `id -G` inside container includes socket GID.
3. `GET /api/v1/docker/containers?host=local` no longer fails due to `EACCES` in correctly configured environment.
4. If resolved socket GID is `0`, setup fails by default unless explicit opt-in and risk acknowledgment are provided.
Rollback/contingency:
- Revert compose and entrypoint deltas only.
#### PR-2: Behavior + UX + Tests
Scope:
- Backend details consistency (if required).
- Frontend troubleshooting message update.
- Add/adjust tests around permission-denied + supplemental-group guidance.
Files (expected):
- `backend/internal/services/docker_service.go`
- `backend/internal/services/docker_service_test.go`
- `backend/internal/api/handlers/docker_handler.go`
- `backend/internal/api/handlers/docker_handler_test.go`
- `frontend/src/hooks/useDocker.ts`
- `frontend/src/hooks/__tests__/useDocker.test.tsx`
- `frontend/src/components/ProxyHostForm.tsx`
- `frontend/src/components/__tests__/ProxyHostForm*.test.tsx`
Dependencies:
- PR-1 recommended (runtime setup available for realistic local validation).
Acceptance criteria:
1. `503` details include actionable group guidance for permission-denied scenarios.
2. UI error panel provides mount + supplemental-group troubleshooting.
3. All touched unit/e2e tests pass for local Docker source path.
Rollback/contingency:
- Revert only behavior/UI/test deltas; keep PR-1 foundations.
#### PR-3: Coverage + Docs + Validation Playbook
Scope:
- Update `codecov.yml` exclusions for Docker logic files.
- Update user/operator docs where socket mount guidance appears.
- Optional task additions for socket-permission diagnostics.
Files (expected):
- `codecov.yml`
- `README.md`
- `docs/getting-started.md`
- `SECURITY.md`
- `.vscode/tasks.json` (optional)
Dependencies:
- PR-2 preferred to ensure policy aligns with test coverage additions.
Acceptance criteria:
1. Codecov includes Docker service/handler in coverage accounting.
2. Docs show both socket mount and supplemental-group requirement.
3. Validation command set is documented and reproducible.
Rollback/contingency:
- Revert reporting/docs/task changes only.
---
## 8) Validation Strategy (Protocol-Ordered)
### 8.1 E2E Prerequisite / Rebuild Check (Mandatory First)
Follow project protocol to decide whether E2E container rebuild is required before tests:
1. If application/runtime or Docker build inputs changed, rebuild E2E environment.
2. If only test files changed and environment is healthy, reuse current container.
3. If environment state is suspect, rebuild.
Primary task:
- VS Code task: `Docker: Rebuild E2E Environment` (or clean variant when needed).
### 8.2 E2E First (Mandatory)
Run E2E before unit tests:
- VS Code task: `Test: E2E Playwright (Targeted Suite)` for scoped regression checks.
- VS Code task: `Test: E2E Playwright (Skill)` for broader safety pass as needed.
### 8.3 Local Patch Report (Mandatory Before Unit/Coverage)
Generate patch artifacts immediately after E2E:
```bash
cd /projects/Charon
bash scripts/local-patch-report.sh
```
Required artifacts:
- `test-results/local-patch-report.md`
- `test-results/local-patch-report.json`
### 8.4 Unit + Coverage Validation
Backend and frontend unit coverage gates after patch report:
```bash
cd /projects/Charon/backend && go test ./internal/services ./internal/api/handlers
cd /projects/Charon/frontend && npm run test -- src/hooks/__tests__/useDocker.test.tsx
```
Then run coverage tasks/scripts per project protocol (minimum threshold enforcement remains unchanged).
### 8.5 Least-Privilege + `gid==0` Guardrail Checks
Pass conditions:
1. Container process remains non-root.
2. Supplemental group grant is limited to socket GID only for local operator flow.
3. No privileged mode or unrelated capability additions.
4. Socket remains read-only.
5. If socket GID resolves to `0`, local run fails closed unless explicit opt-in and risk acknowledgment are present.
---
## 9) Suggested File-Level Updates Summary
### `repo-deliverable` Must Update
- `.docker/compose/docker-compose.local.yml`
- `.docker/compose/docker-compose.dev.yml`
- `.docker/docker-entrypoint.sh`
- `frontend/src/components/ProxyHostForm.tsx`
- `codecov.yml`
### `repo-deliverable` Should Update
- `README.md`
- `docs/getting-started.md`
- `SECURITY.md`
### `repo-deliverable` Optional Update
- `.vscode/tasks.json` (dedicated task to precompute/export `DOCKER_SOCK_GID` and start compose)
### `operator-local follow-up` (Out of Mandatory Repo PR Scope)
- `/root/docker/containers/charon/docker-compose.yml`
- `/root/docker/containers/charon/docker-compose-up-charon.sh`
### Reviewed, No Required Change
- `.gitignore`
- `.dockerignore`
- `Dockerfile` (keep non-root default)
---
## 10) Acceptance Criteria / DoD
1. Local Docker source works in non-root container when supplemental socket group is supplied.
2. Failure path remains explicit and actionable when supplemental group is missing.
3. Scope split is explicit and consistent: `repo-deliverable` vs `operator-local follow-up`.
4. Chosen policy is unambiguous: conditional local-only `group_add`; CI remains unaffected.
5. `gid==0` path is guarded by explicit opt-in/risk acknowledgment and never silently defaulted.
6. Validation order is protocol-aligned: E2E prerequisite/rebuild check -> E2E first -> local patch report -> unit/coverage.
7. Coverage policy no longer suppresses Docker service/handler regression visibility.
8. PR-1, PR-2, PR-3 each pass their slice acceptance criteria with independent rollback safety.
9. This file contains one active plan with one frontmatter block and no archived concatenated plan content.
---
## 11) Handoff
This plan is complete and execution-ready for Supervisor review. It includes:
- Root-cause grounded file/function map
- EARS requirements
- Specific multi-phase implementation path
- PR slicing with dependencies and rollback notes
- Validation sequence explicitly aligned to project protocol order and least-privilege guarantees
+318 -792
View File
File diff suppressed because it is too large Load Diff
+114 -39
View File
@@ -1,57 +1,132 @@
## QA Report — PR-2 Security Patch Posture Audit
# QA/Security Audit Report: `security-pr.yml` Workflow Fix
- Date: 2026-02-23
- Scope: PR-2 only (security patch posture, admin API hardening, rollback viability)
- Verdict: **READY (PASS)**
- Date: 2026-02-27
- Auditor: QA Security mode
- Scope: `.github/workflows/security-pr.yml` behavior fix only
- Overall verdict: **PASS (scope-specific)** with one **out-of-scope repository security debt** noted
## Gate Summary
## Findings (Ordered by Severity)
| Gate | Status | Evidence |
| --- | --- | --- |
| Targeted E2E for PR-2 | PASS | Security settings test for Caddy Admin API URL passed (2/2). |
| Local patch preflight artifacts | PASS | `test-results/local-patch-report.md` and `.json` regenerated. |
| Coverage and type-check | PASS | Backend coverage 87.7% line / 87.4% statement; frontend type-check passed; frontend coverage preflight input passed (88.99% lines). |
| Pre-commit gate | PASS | `pre-commit run --all-files` passed after resolving version and type-check hook issues. |
| Security scans | PASS | CodeQL Go/JS CI-aligned scans passed; findings gate passed with no HIGH/CRITICAL; Trivy passed at configured severities. |
| Runtime posture + rollback | PASS | Default scenario shifted `A -> B` for PR-2 posture; rollback remains explicit via `CADDY_PATCH_SCENARIO=A`; admin API URL now validated and normalized at config load. |
### 🟡 IMPORTANT: Repository secret-scan debt exists (not introduced by scoped workflow change)
- Check: `pre-commit run --hook-stage manual gitleaks-tuned-scan --all-files`
- Result: **FAIL** (`135` findings)
- Scope impact: `touches_security_pr = 0` (no findings in `.github/workflows/security-pr.yml`)
- Evidence source: `test-results/security/gitleaks-tuned-precommit.json`
- Why this matters: Existing credential-like content raises background security risk even if unrelated to this workflow fix.
- Recommended remediation:
1. Triage findings by rule/file and classify true positives vs allowed test fixtures.
2. Add justified allowlist entries for confirmed false positives.
3. Remove or rotate any real secrets immediately.
4. Re-run `gitleaks-tuned-scan` until clean/accepted baseline is documented.
## Resolved Items
### ✅ No blocking defects found in the implemented workflow fix
- Deterministic event handling: validated in workflow logic.
- Artifact/image resolution hardening: validated in workflow logic.
- Security hardening: validated in workflow logic and lint gates.
1. `check-version-match` mismatch fixed by syncing `.version` to `v0.19.1`.
2. `frontend-type-check` hook stabilized to `npx tsc --noEmit` for deterministic pre-commit behavior.
## Requested Validations
## PR-2 Closure Statement
### 1) `actionlint` on security workflow
- Command:
- `pre-commit run actionlint --files .github/workflows/security-pr.yml`
- Result: **PASS**
- Key output:
- `actionlint (GitHub Actions)..............................................Passed`
All PR-2 QA/security gates required for merge are passing. No PR-3 scope is included in this report.
### 2) `pre-commit run --all-files`
- Command:
- `pre-commit run --all-files`
- Result: **PASS**
- Key output:
- YAML/shell/actionlint/dockerfile/go vet/golangci-lint/version/LFS/type-check/frontend lint hooks passed.
---
### 3) Security scans/tasks relevant to workflow change (feasible locally)
- Executed:
1. `pre-commit run --hook-stage manual codeql-parity-check --all-files` -> **PASS**
2. `pre-commit run --hook-stage manual codeql-check-findings --all-files` -> **PASS** (no blocking HIGH/CRITICAL)
3. `pre-commit run --hook-stage manual gitleaks-tuned-scan --all-files` -> **FAIL** (repo baseline debt; not in scoped file)
- Additional QA evidence:
- `bash scripts/local-patch-report.sh` -> artifacts generated:
- `test-results/local-patch-report.md`
- `test-results/local-patch-report.json`
## QA Report — PR-3 Keepalive Controls Closure
## Workflow Behavior Verification
- Date: 2026-02-23
- Scope: PR-3 only (keepalive controls, safe fallback/default behavior, non-exposure constraints)
- Verdict: **READY (PASS)**
## A) Deterministic event handling
Validated in `.github/workflows/security-pr.yml`:
- Manual dispatch input is required and validated as digits-only:
- `.github/workflows/security-pr.yml:10`
- `.github/workflows/security-pr.yml:14`
- `.github/workflows/security-pr.yml:71`
- `.github/workflows/security-pr.yml:78`
- `workflow_run` path constrained to successful upstream PR runs:
- `.github/workflows/security-pr.yml:31`
- `.github/workflows/security-pr.yml:36`
- `.github/workflows/security-pr.yml:38`
- Explicit trust-boundary contract checks for upstream workflow name/event/repository:
- `.github/workflows/security-pr.yml:127`
- `.github/workflows/security-pr.yml:130`
- `.github/workflows/security-pr.yml:136`
- `.github/workflows/security-pr.yml:143`
## Reviewer Gate Summary (PR-3)
Assessment: **PASS** for deterministic triggering and contract enforcement.
| Gate | Status | Reviewer evidence |
| --- | --- | --- |
| Targeted E2E rerun | PASS | Security settings targeted rerun completed: **30 passed, 0 failed**. |
| Local patch preflight | PASS | `frontend/coverage/lcov.info` present; `scripts/local-patch-report.sh` artifacts regenerated with `pass` status. |
| Coverage + type-check | PASS | Frontend coverage gate passed (89% lines vs 85% minimum); type-check passed. |
| Pre-commit + security scans | PASS | `pre-commit --all-files`, CodeQL Go/JS CI-aligned scans, findings gate, and Trivy checks passed (no HIGH/CRITICAL blockers). |
| Final readiness | PASS | All PR-3 closure gates are green. |
## B) Artifact and image resolution hardening
Validated in `.github/workflows/security-pr.yml`:
- Artifact is mandatory in `workflow_run`/`workflow_dispatch` artifact path; failures are explicit (`api_error`/`not_found`):
- `.github/workflows/security-pr.yml:159`
- `.github/workflows/security-pr.yml:185`
- `.github/workflows/security-pr.yml:196`
- `.github/workflows/security-pr.yml:214`
- `.github/workflows/security-pr.yml:225`
- Docker image load hardened with:
- tar readability check
- `manifest.json` multi-tag parsing (`RepoTags[]`)
- fallback to `Loaded image ID`
- deterministic alias `charon:artifact`
- `.github/workflows/security-pr.yml:255`
- `.github/workflows/security-pr.yml:261`
- `.github/workflows/security-pr.yml:267`
- `.github/workflows/security-pr.yml:273`
- `.github/workflows/security-pr.yml:282`
- `.github/workflows/security-pr.yml:295`
- `.github/workflows/security-pr.yml:300`
- Extraction consumes resolved alias output rather than reconstructed tag:
- `.github/workflows/security-pr.yml:333`
- `.github/workflows/security-pr.yml:342`
## Scope Guardrails Verified (PR-3)
Assessment: **PASS** for deterministic artifact/image selection and prior mismatch risk mitigation.
- Keepalive controls are limited to approved PR-3 scope.
- Safe fallback behavior remains intact when keepalive values are missing or invalid.
- Non-exposure constraints remain intact (`trusted_proxies_unix` and certificate lifecycle internals are not exposed).
## C) Security hardening
Validated in `.github/workflows/security-pr.yml`:
- Least-privilege job permissions:
- `.github/workflows/security-pr.yml:40`
- `.github/workflows/security-pr.yml:41`
- `.github/workflows/security-pr.yml:42`
- `.github/workflows/security-pr.yml:43`
- Pinned action SHAs maintained for checkout/download/upload/CodeQL SARIF upload/Trivy action usage:
- `.github/workflows/security-pr.yml:48`
- `.github/workflows/security-pr.yml:243`
- `.github/workflows/security-pr.yml:365`
- `.github/workflows/security-pr.yml:388`
- `.github/workflows/security-pr.yml:397`
- `.github/workflows/security-pr.yml:408`
## Manual Verification Reference
Assessment: **PASS** for workflow-level security hardening within scope.
- PR-3 manual test tracking plan: `docs/issues/manual_test_pr3_keepalive_controls_closure.md`
## DoD Mapping for Workflow-Only Change
## PR-3 Closure Statement
Executed:
- `actionlint` scoped check: **Yes (PASS)**
- Full pre-commit: **Yes (PASS)**
- Workflow-relevant security manual checks (CodeQL parity/findings, gitleaks): **Yes (2 PASS, 1 FAIL out-of-scope debt)**
- Local patch report artifacts: **Yes (generated)**
PR-3 is **ready to merge** with no open QA blockers.
N/A for this scope:
- Playwright E2E feature validation for app behavior: **N/A** (no app/runtime code changes)
- Backend/frontend unit coverage gates: **N/A** (no backend/frontend source modifications in audited fix)
- GORM check-mode gate: **N/A** (no model/database/GORM changes)
- Trivy app binary/image scan execution for changed runtime artifact: **N/A locally for this audit** (workflow logic audited; no image/runtime code delta in this fix)
## Conclusion
The implemented fix in `.github/workflows/security-pr.yml` meets the requested goals for deterministic event handling, robust artifact/image resolution, and workflow security hardening. Required validation commands were executed and passed (`actionlint`, `pre-commit --all-files`), and additional feasible security checks were run. One repository-wide gitleaks debt remains and should be remediated separately from this workflow fix.
+138
View File
@@ -0,0 +1,138 @@
# QA Report — PR #754: Enable and Test Gotify and Custom Webhook Notifications
**Branch:** `feature/beta-release`
**Date:** 2026-02-25
**Auditor:** QA Security Agent
---
## Summary
| # | Check | Result | Details |
|---|-------|--------|---------|
| 1 | Local Patch Coverage Preflight | **WARN** | 79.5% overall (threshold 90%), 78.3% backend (threshold 85%) — advisory only |
| 2 | Backend Coverage ≥ 85% | **PASS** | 87.0% statement / 87.3% line (threshold 87%) |
| 3 | Frontend Coverage ≥ 85% | **PASS** | 88.21% statement / 88.97% line (threshold 85%) |
| 4 | TypeScript Type Check | **PASS** | Zero errors |
| 5 | Pre-commit Hooks | **PASS** | All 15 hooks passed |
| 6a | Trivy Filesystem Scan | **PASS** | 0 CRITICAL/HIGH in project code (findings only in Go module cache) |
| 6b | Docker Image Scan | **WARN** | 1 HIGH in Caddy transitive dep (CVE-2026-25793, nebula v1.9.7 → fixed 1.10.3) |
| 6c | CodeQL (Go + JavaScript) | **PASS** | 0 errors, 0 warnings across both languages |
| 7 | GORM Security Scan | **PASS** | 0 CRITICAL/HIGH (2 INFO suggestions: missing indexes on UserPermittedHost) |
| 8 | Go Vulnerability Check | **PASS** | No vulnerabilities found |
---
## Detailed Findings
### 1. Local Patch Coverage Preflight
- **Status:** WARN (advisory, not blocking per policy)
- Overall patch coverage: **79.5%** (threshold: 90%)
- Backend patch coverage: **78.3%** (threshold: 85%)
- Artifacts generated but `test-results/` directory was not persisted at repo root
- **Action:** Consider adding targeted tests for uncovered changed lines in notification service/handler
### 2. Backend Unit Test Coverage
- **Status:** PASS
- Statement coverage: **87.0%**
- Line coverage: **87.3%**
- All tests passed (0 failures)
### 3. Frontend Unit Test Coverage
- **Status:** PASS
- Statement coverage: **88.21%**
- Branch coverage: **80.58%**
- Function coverage: **85.20%**
- Line coverage: **88.97%**
- All tests passed (0 failures)
- Coverage files generated: `lcov.info`, `coverage-summary.json`, `coverage-final.json`
### 4. TypeScript Type Check
- **Status:** PASS
- `tsc --noEmit` completed with zero errors
### 5. Pre-commit Hooks
- **Status:** PASS
- All hooks passed:
- fix end of files
- trim trailing whitespace
- check yaml
- check for added large files
- shellcheck
- actionlint (GitHub Actions)
- dockerfile validation
- Go Vet
- golangci-lint (Fast Linters - BLOCKING)
- Check .version matches latest Git tag
- Prevent large files not tracked by LFS
- Prevent committing CodeQL DB artifacts
- Prevent committing data/backups files
- Frontend TypeScript Check
- Frontend Lint (Fix)
### 6a. Trivy Filesystem Scan
- **Status:** PASS
- Scanned `backend/` and `frontend/` directories: **0 CRITICAL, 0 HIGH**
- Full workspace scan found 3 CRITICAL + 14 HIGH across Go module cache dependencies (not project code)
- Trivy misconfig scanner crashed (known Trivy bug in ansible parser — nil pointer dereference in `discovery.go:82`). Vuln scanner completed successfully.
### 6b. Docker Image Scan
- **Status:** WARN (not blocking — upstream dependency)
- Image: `charon:local`
- **1 HIGH finding:**
- **CVE-2026-25793**`github.com/slackhq/nebula` v1.9.7 (in `usr/bin/caddy` binary)
- Description: Blocklist evasion via ECDSA Signature Malleability
- Fixed in: v1.10.3
- Impact: Caddy transitive dependency, not Charon code
- **Remediation:** Upgrade Caddy to a version that pulls nebula ≥ 1.10.3 when available
### 6c. CodeQL Scans
- **Status:** PASS
- **Go:** 0 errors, 0 warnings
- **JavaScript:** 0 errors, 0 warnings (347/347 files scanned)
- SARIF outputs: `codeql-results-go.sarif`, `codeql-results-javascript.sarif`
### 7. GORM Security Scan
- **Status:** PASS
- Scanned: 41 Go files (2207 lines), 2 seconds
- **0 CRITICAL, 0 HIGH, 0 MEDIUM**
- 2 INFO suggestions:
- `backend/internal/models/user.go:109``UserPermittedHost.UserID` missing index
- `backend/internal/models/user.go:110``UserPermittedHost.ProxyHostID` missing index
### 8. Go Vulnerability Check
- **Status:** PASS
- `govulncheck ./...` — No vulnerabilities found
---
## Gotify Token Security Review
- No Gotify tokens found in logs, test artifacts, or API examples
- No tokenized URL query parameters exposed in diagnostics or output
- Token handling follows `json:"-"` pattern (verified via `HasToken` computed field approach in PR)
---
## Recommendation
### GO / NO-GO: **GO** (conditional)
All blocking gates pass. Two advisory warnings exist:
1. **Patch coverage** (79.5% overall, 78.3% backend) is below advisory thresholds but not a blocking gate per current policy
2. **Docker image** has 1 HIGH CVE in Caddy's transitive dependency (nebula) — upstream fix required, not actionable in Charon code
**Conditions:**
- Track nebula CVE-2026-25793 remediation as a follow-up issue when a Caddy update incorporates the fix
- Consider adding targeted tests for uncovered changed lines in notification service/handler to improve patch coverage
+1291 -116
View File
File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More