chore: clean .gitignore cache

This commit is contained in:
GitHub Actions
2026-01-26 19:21:33 +00:00
parent 1b1b3a70b1
commit e5f0fec5db
1483 changed files with 0 additions and 472793 deletions

View File

@@ -1,53 +0,0 @@
# Scripts Directory
## Running Tests Locally Before Pushing to CI
### WAF Integration Test
**Always run this locally before pushing WAF-related changes to avoid CI failures:**
```bash
# From project root
bash ./scripts/coraza_integration.sh
```
Or use the VS Code task: `Ctrl+Shift+P``Tasks: Run Task``Coraza: Run Integration Script`
**Requirements:**
- Docker image `charon:local` must be built first:
```bash
docker build -t charon:local .
```
- The script will:
1. Start a test container with WAF enabled
2. Create a backend container (httpbin)
3. Test WAF in block mode (expect HTTP 403)
4. Test WAF in monitor mode (expect HTTP 200)
5. Clean up all test containers
**Expected output:**
```
✓ httpbin backend is ready
✓ Coraza WAF blocked payload as expected (HTTP 403) in BLOCK mode
✓ Coraza WAF in MONITOR mode allowed payload through (HTTP 200) as expected
=== All Coraza integration tests passed ===
```
### Other Test Scripts
- **Security Scan**: `bash ./scripts/security-scan.sh`
- **Go Test Coverage**: `bash ./scripts/go-test-coverage.sh`
- **Frontend Test Coverage**: `bash ./scripts/frontend-test-coverage.sh`
## CI/CD Workflows
Changes to these scripts may trigger CI workflows:
- `coraza_integration.sh` → WAF Integration Tests workflow
- Files in `.github/workflows/` directory control CI behavior
**Tip**: Run tests locally to save CI minutes and catch issues faster!

View File

@@ -1,97 +0,0 @@
#!/bin/bash
# Bump Beta Version Script
# Automates version bumping for Beta releases.
# Logic:
# - If current is Alpha (x.y.z-alpha), bumps to next MINOR as beta (e.g., 0.3.0 -> 0.4.0-beta.1)
# - If current is Beta (x.y.z-beta.X), bumps to x.y.z-beta.(X+1)
# - Updates .version, backend/internal/version/version.go, package.json (root/frontend/backend), VERSION.md
set -e
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh utility-bump-beta
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh utility-bump-beta" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${YELLOW}Starting Beta Version Bump...${NC}"
# 1. Read current version
CURRENT_VERSION=$(cat .version 2>/dev/null || echo "0.0.0")
echo "Current Version: $CURRENT_VERSION"
# 2. Calculate new version
if [[ "$CURRENT_VERSION" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)-beta\.([0-9]+)$ ]]; then
# Already a beta: increment the beta number
MAJOR="${BASH_REMATCH[1]}"
MINOR="${BASH_REMATCH[2]}"
PATCH="${BASH_REMATCH[3]}"
BETA_NUM="${BASH_REMATCH[4]}"
NEXT_NUM=$((BETA_NUM + 1))
NEW_VERSION="$MAJOR.$MINOR.$PATCH-beta.$NEXT_NUM"
elif [[ "$CURRENT_VERSION" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
# Plain semver; bump MINOR and add beta.1
MAJOR="${BASH_REMATCH[1]}"
MINOR="${BASH_REMATCH[2]}"
NEXT_MINOR=$((MINOR + 1))
NEW_VERSION="$MAJOR.$NEXT_MINOR.0-beta.1"
else
# Fallback / Safety: set to 0.3.0-beta.1
echo "Current version format not recognized for auto-beta bump. Defaulting to 0.3.0-beta.1"
NEW_VERSION="0.3.0-beta.1"
fi
echo -e "${GREEN}New Version: $NEW_VERSION${NC}"
# 3. Update Files
# .version
echo "$NEW_VERSION" > .version
echo "Updated .version"
# backend/internal/version/version.go
# Regex to replace: Version = "..."
sed -i "s/Version = \".*\"/Version = \"$NEW_VERSION\"/" backend/internal/version/version.go
echo "Updated backend/internal/version/version.go"
# package.json (Frontend)
# Using sed for simplicity, assuming standard formatting
sed -i "s/\"version\": \".*\"/\"version\": \"$NEW_VERSION\"/" frontend/package.json
echo "Updated frontend/package.json"
# package.json (Backend) - update if exists
if [[ -f backend/package.json ]]; then
sed -i "s/\"version\": \".*\"/\"version\": \"$NEW_VERSION\"/" backend/package.json
echo "Updated backend/package.json"
fi
# VERSION.md (Optional: just appending a log or ensuring it's mentioned?
# For now, let's just leave it or maybe update a "Current Version" line if it existed.
# The user plan said "Update VERSION.md to reflect the current version".
# Let's assume we just want to ensure the file exists or maybe add a header.
# Actually, let's just print a reminder for now as VERSION.md is usually a guide.)
# But I can replace a specific line if I knew the format.
# Looking at previous read_file of VERSION.md, it doesn't seem to have a "Current Version: X" line easily replaceable.
# I will skip modifying VERSION.md content automatically to avoid messing up the guide text,
# unless I see a specific placeholder.
# 4. Git Commit and Tag
read -p "Do you want to commit and tag this version? (y/n) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
git add .version backend/internal/version/version.go frontend/package.json backend/package.json
git commit -m "chore: bump version to $NEW_VERSION"
git tag "v$NEW_VERSION"
echo -e "${GREEN}Committed and tagged v$NEW_VERSION${NC}"
echo "Remember to push: git push origin feature/beta-release --tags"
else
echo "Changes made but not committed."
fi

View File

@@ -1,557 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Brief: Full integration test for Cerberus security stack
# Tests all security features working together:
# - WAF (Coraza) for payload inspection
# - Rate Limiting for volume abuse prevention
# - Security handler ordering in Caddy config
#
# Test Cases:
# - TC-1: Verify all features enabled via /api/v1/security/status
# - TC-2: Verify handler order in Caddy config
# - TC-3: WAF blocking doesn't consume rate limit quota
# - TC-4: Legitimate traffic flows through all layers
# - TC-5: Basic latency check
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Configuration
# ============================================================================
CONTAINER_NAME="charon-cerberus-test"
BACKEND_CONTAINER="cerberus-backend"
TEST_DOMAIN="cerberus.test.local"
# Use unique non-conflicting ports
API_PORT=8480
HTTP_PORT=8481
HTTPS_PORT=8444
CADDY_ADMIN_PORT=2319
# Rate limit config for testing
RATE_LIMIT_REQUESTS=5
RATE_LIMIT_WINDOW_SEC=30
# ============================================================================
# Colors for output
# ============================================================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_test() { echo -e "${BLUE}[TEST]${NC} $1"; }
# ============================================================================
# Test counters
# ============================================================================
PASSED=0
FAILED=0
pass_test() {
PASSED=$((PASSED + 1))
echo -e " ${GREEN}✓ PASS${NC}"
}
fail_test() {
FAILED=$((FAILED + 1))
echo -e " ${RED}✗ FAIL${NC}: $1"
}
# Assert HTTP status code
assert_http() {
local expected=$1
local actual=$2
local desc=$3
if [ "$actual" = "$expected" ]; then
log_info "$desc: HTTP $actual"
PASSED=$((PASSED + 1))
else
log_error "$desc: HTTP $actual (expected $expected)"
FAILED=$((FAILED + 1))
fi
}
# ============================================================================
# Helper Functions
# ============================================================================
# Dumps debug information on failure
on_failure() {
local exit_code=$?
echo ""
echo "=============================================="
echo "=== FAILURE DEBUG INFO (exit code: $exit_code) ==="
echo "=============================================="
echo ""
echo "=== Charon API Logs (last 150 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -150 || echo "Could not retrieve container logs"
echo ""
echo "=== Caddy Admin API Config ==="
curl -sL "http://localhost:${CADDY_ADMIN_PORT}/config/" 2>/dev/null | head -300 || echo "Could not retrieve Caddy config"
echo ""
echo "=== Security Config in API ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/security/config" 2>/dev/null || echo "Could not retrieve security config"
echo ""
echo "=== Security Status ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/security/status" 2>/dev/null || echo "Could not retrieve security status"
echo ""
echo "=== Security Rulesets ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/security/rulesets" 2>/dev/null || echo "Could not retrieve rulesets"
echo ""
echo "=============================================="
echo "=== END DEBUG INFO ==="
echo "=============================================="
}
# Cleanup function
cleanup() {
log_info "Cleaning up test resources..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
rm -f "${TMP_COOKIE:-}" 2>/dev/null || true
log_info "Cleanup complete"
}
# Set up trap to dump debug info on any error and always cleanup
trap on_failure ERR
trap cleanup EXIT
echo "=============================================="
echo "=== Cerberus Full Integration Test Starting ==="
echo "=============================================="
echo ""
# Check dependencies
if ! command -v docker >/dev/null 2>&1; then
log_error "docker is not available; aborting"
exit 1
fi
if ! command -v curl >/dev/null 2>&1; then
log_error "curl is not available; aborting"
exit 1
fi
# ============================================================================
# Step 1: Build image if needed
# ============================================================================
if ! docker image inspect charon:local >/dev/null 2>&1; then
log_info "Building charon:local image..."
docker build -t charon:local .
else
log_info "Using existing charon:local image"
fi
# ============================================================================
# Step 2: Start containers
# ============================================================================
log_info "Stopping any existing test containers..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
# Ensure network exists
if ! docker network inspect containers_default >/dev/null 2>&1; then
log_info "Creating containers_default network..."
docker network create containers_default
fi
log_info "Starting httpbin backend container..."
docker run -d --name ${BACKEND_CONTAINER} --network containers_default kennethreitz/httpbin
log_info "Starting Charon container with ALL Cerberus features enabled..."
docker run -d --name ${CONTAINER_NAME} \
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--network containers_default \
-p ${HTTP_PORT}:80 -p ${HTTPS_PORT}:443 -p ${API_PORT}:8080 -p ${CADDY_ADMIN_PORT}:2019 \
-e CHARON_ENV=development \
-e CHARON_DEBUG=1 \
-e CHARON_HTTP_PORT=8080 \
-e CHARON_DB_PATH=/app/data/charon.db \
-e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 \
-e CHARON_CADDY_CONFIG_DIR=/app/data/caddy \
-e CHARON_CADDY_BINARY=caddy \
-e CERBERUS_SECURITY_CERBERUS_ENABLED=true \
-e CHARON_SECURITY_WAF_MODE=block \
-e CERBERUS_SECURITY_RATELIMIT_MODE=enabled \
-e CERBERUS_SECURITY_ACL_ENABLED=true \
-v charon_cerberus_test_data:/app/data \
-v caddy_cerberus_test_data:/data \
-v caddy_cerberus_test_config:/config \
charon:local
log_info "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f "http://localhost:${API_PORT}/api/v1/health" >/dev/null 2>&1; then
log_info "Charon API is ready"
break
fi
if [ $i -eq 30 ]; then
log_error "Charon API failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
echo ""
log_info "Waiting for httpbin backend to be ready..."
for i in {1..20}; do
if docker exec ${CONTAINER_NAME} sh -c "curl -sf http://${BACKEND_CONTAINER}/get" >/dev/null 2>&1; then
log_info "httpbin backend is ready"
break
fi
if [ $i -eq 20 ]; then
log_error "httpbin backend failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
echo ""
# ============================================================================
# Step 3: Register user and authenticate
# ============================================================================
log_info "Registering admin user and logging in..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"cerberus-test@example.local","password":"password123","name":"Cerberus Tester"}' \
"http://localhost:${API_PORT}/api/v1/auth/register" >/dev/null 2>&1 || true
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"cerberus-test@example.local","password":"password123"}' \
-c "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/auth/login" >/dev/null
log_info "Authentication complete"
# ============================================================================
# Step 4: Create proxy host
# ============================================================================
log_info "Creating proxy host '${TEST_DOMAIN}' pointing to backend..."
PROXY_HOST_PAYLOAD=$(cat <<EOF
{
"name": "cerberus-test-backend",
"domain_names": "${TEST_DOMAIN}",
"forward_scheme": "http",
"forward_host": "${BACKEND_CONTAINER}",
"forward_port": 80,
"enabled": true
}
EOF
)
CREATE_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${PROXY_HOST_PAYLOAD}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/proxy-hosts")
CREATE_STATUS=$(echo "$CREATE_RESP" | tail -n1)
if [ "$CREATE_STATUS" = "201" ]; then
log_info "Proxy host created successfully"
else
log_info "Proxy host may already exist (status: $CREATE_STATUS)"
fi
# Wait for Caddy to apply config
sleep 3
# ============================================================================
# Step 5: Create WAF ruleset (XSS protection)
# ============================================================================
log_info "Creating XSS WAF ruleset..."
XSS_RULESET=$(cat <<'EOF'
{
"name": "cerberus-xss",
"content": "SecRule REQUEST_BODY|ARGS|ARGS_NAMES \"<script\" \"id:99001,phase:2,deny,status:403,msg:'XSS Attack Detected'\""
}
EOF
)
XSS_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${XSS_RULESET}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/rulesets")
XSS_STATUS=$(echo "$XSS_RESP" | tail -n1)
if [ "$XSS_STATUS" = "200" ] || [ "$XSS_STATUS" = "201" ]; then
log_info "XSS ruleset created"
else
log_warn "XSS ruleset creation returned status: $XSS_STATUS"
fi
# ============================================================================
# Step 6: Enable WAF in block mode + configure rate limiting
# ============================================================================
log_info "Enabling WAF (block mode) and rate limiting (${RATE_LIMIT_REQUESTS} req / ${RATE_LIMIT_WINDOW_SEC} sec)..."
SECURITY_CONFIG=$(cat <<EOF
{
"name": "default",
"enabled": true,
"waf_mode": "block",
"waf_rules_source": "cerberus-xss",
"rate_limit_enable": true,
"rate_limit_requests": ${RATE_LIMIT_REQUESTS},
"rate_limit_window_sec": ${RATE_LIMIT_WINDOW_SEC},
"rate_limit_burst": 1,
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
SEC_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${SECURITY_CONFIG}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/config")
SEC_STATUS=$(echo "$SEC_RESP" | tail -n1)
if [ "$SEC_STATUS" = "200" ]; then
log_info "Security configuration applied"
else
log_warn "Security config returned status: $SEC_STATUS"
fi
# Wait for Caddy to reload with all security features
log_info "Waiting for Caddy to apply security configuration..."
sleep 5
echo ""
echo "=============================================="
echo "=== Running Cerberus Integration Test Cases ==="
echo "=============================================="
echo ""
# ============================================================================
# TC-1: Verify all features enabled via /api/v1/security/status
# ============================================================================
log_test "TC-1: Verify All Features Enabled"
STATUS_RESP=$(curl -s -b "${TMP_COOKIE}" "http://localhost:${API_PORT}/api/v1/security/status")
# Check Cerberus enabled (nested: "cerberus":{"enabled":true})
if echo "$STATUS_RESP" | grep -qE '"cerberus":\s*\{[^}]*"enabled":\s*true'; then
log_info " ✓ Cerberus enabled"
PASSED=$((PASSED + 1))
else
fail_test "Cerberus not enabled in status response"
fi
# Check WAF mode (nested: "waf":{"mode":"block",...})
if echo "$STATUS_RESP" | grep -qE '"waf":\s*\{[^}]*"mode":\s*"block"'; then
log_info " ✓ WAF mode is 'block'"
PASSED=$((PASSED + 1))
else
fail_test "WAF mode not set to 'block'"
fi
# Check rate limit enabled (nested: "rate_limit":{"enabled":true,...})
if echo "$STATUS_RESP" | grep -qE '"rate_limit":\s*\{[^}]*"enabled":\s*true'; then
log_info " ✓ Rate limit enabled"
PASSED=$((PASSED + 1))
else
fail_test "Rate limit not enabled"
fi
# ============================================================================
# TC-2: Verify handler order in Caddy config
# ============================================================================
log_test "TC-2: Verify Handler Order in Caddy Config"
CADDY_CONFIG=$(curl -sL "http://localhost:${CADDY_ADMIN_PORT}/config/" 2>/dev/null || echo "")
if [ -z "$CADDY_CONFIG" ]; then
fail_test "Could not retrieve Caddy config"
else
# Check for WAF handler
if echo "$CADDY_CONFIG" | grep -q '"handler":"waf"'; then
log_info " ✓ WAF handler found in Caddy config"
PASSED=$((PASSED + 1))
else
fail_test "WAF handler not found in Caddy config"
fi
# Check for rate_limit handler
if echo "$CADDY_CONFIG" | grep -q '"handler":"rate_limit"'; then
log_info " ✓ rate_limit handler found in Caddy config"
PASSED=$((PASSED + 1))
else
fail_test "rate_limit handler not found in Caddy config"
fi
# Check for reverse_proxy handler (should be last)
if echo "$CADDY_CONFIG" | grep -q '"handler":"reverse_proxy"'; then
log_info " ✓ reverse_proxy handler found in Caddy config"
PASSED=$((PASSED + 1))
else
fail_test "reverse_proxy handler not found in Caddy config"
fi
# Verify security handlers appear before reverse_proxy
# Since Caddy JSON can be minified (one line), use byte offset approach
WAF_POS=$(echo "$CADDY_CONFIG" | grep -ob '"handler":"waf"' | head -1 | cut -d: -f1 || echo "0")
RATE_POS=$(echo "$CADDY_CONFIG" | grep -ob '"handler":"rate_limit"' | head -1 | cut -d: -f1 || echo "0")
PROXY_POS=$(echo "$CADDY_CONFIG" | grep -ob '"handler":"reverse_proxy"' | head -1 | cut -d: -f1 || echo "0")
if [ "$WAF_POS" != "0" ] && [ "$RATE_POS" != "0" ] && [ "$PROXY_POS" != "0" ]; then
if [ "$WAF_POS" -lt "$PROXY_POS" ] && [ "$RATE_POS" -lt "$PROXY_POS" ]; then
log_info " ✓ Security handlers appear before reverse_proxy"
PASSED=$((PASSED + 1))
else
fail_test "Security handlers not in correct order"
fi
else
log_warn " Could not determine exact handler positions (may be nested)"
PASSED=$((PASSED + 1))
fi
fi
# ============================================================================
# TC-3: WAF blocking doesn't consume rate limit quota
# ============================================================================
log_test "TC-3: WAF Blocking Doesn't Consume Rate Limit"
log_info " Sending 3 malicious requests (should be blocked by WAF with 403)..."
WAF_BLOCKED=0
for i in 1 2 3; do
CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?q=%3Cscript%3Ealert(1)%3C/script%3E")
if [ "$CODE" = "403" ]; then
WAF_BLOCKED=$((WAF_BLOCKED + 1))
log_info " Malicious request $i: HTTP $CODE (WAF blocked) ✓"
else
log_warn " Malicious request $i: HTTP $CODE (expected 403)"
fi
done
if [ $WAF_BLOCKED -eq 3 ]; then
log_info " ✓ All 3 malicious requests blocked by WAF"
PASSED=$((PASSED + 1))
else
fail_test "Not all malicious requests were blocked by WAF ($WAF_BLOCKED/3)"
fi
log_info " Sending ${RATE_LIMIT_REQUESTS} legitimate requests (should all succeed with 200)..."
LEGIT_SUCCESS=0
for i in $(seq 1 ${RATE_LIMIT_REQUESTS}); do
CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?name=john&id=$i")
if [ "$CODE" = "200" ]; then
LEGIT_SUCCESS=$((LEGIT_SUCCESS + 1))
log_info " Legitimate request $i: HTTP $CODE"
else
log_warn " Legitimate request $i: HTTP $CODE (expected 200)"
fi
sleep 0.1
done
if [ $LEGIT_SUCCESS -eq ${RATE_LIMIT_REQUESTS} ]; then
log_info " ✓ All ${RATE_LIMIT_REQUESTS} legitimate requests succeeded"
PASSED=$((PASSED + 1))
else
fail_test "Not all legitimate requests succeeded ($LEGIT_SUCCESS/${RATE_LIMIT_REQUESTS})"
fi
# ============================================================================
# TC-4: Legitimate traffic flows through all layers
# ============================================================================
log_test "TC-4: Legitimate Traffic Flows Through All Layers"
# Wait for rate limit window to reset
log_info " Waiting for rate limit window to reset (${RATE_LIMIT_WINDOW_SEC} seconds + buffer)..."
sleep $((RATE_LIMIT_WINDOW_SEC + 2))
log_info " Sending 10 legitimate requests..."
FLOW_SUCCESS=0
for i in $(seq 1 10); do
BODY=$(curl -s -H "Host: ${TEST_DOMAIN}" "http://localhost:${HTTP_PORT}/get?test=$i")
if echo "$BODY" | grep -q "args\|headers\|origin\|url"; then
FLOW_SUCCESS=$((FLOW_SUCCESS + 1))
echo " Request $i: ✓ Success (reached upstream)"
else
echo " Request $i: ✗ Failed (response: ${BODY:0:100}...)"
fi
# Space out requests to avoid hitting rate limit
sleep 0.5
done
log_info " Total successful: $FLOW_SUCCESS/10"
if [ $FLOW_SUCCESS -ge 5 ]; then
log_info " ✓ Legitimate traffic flowing through all layers"
PASSED=$((PASSED + 1))
else
fail_test "Too many legitimate requests failed ($FLOW_SUCCESS/10)"
fi
# ============================================================================
# TC-5: Basic latency check
# ============================================================================
log_test "TC-5: Basic Latency Check"
# Wait for rate limit window to reset again
log_info " Waiting for rate limit window to reset..."
sleep $((RATE_LIMIT_WINDOW_SEC + 2))
# Measure latency for a single request
LATENCY=$(curl -s -o /dev/null -w "%{time_total}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get")
log_info " Single request latency: ${LATENCY}s"
# Convert to milliseconds for comparison (using awk since bc may not be available)
LATENCY_MS=$(echo "$LATENCY" | awk '{printf "%.0f", $1 * 1000}')
if [ "$LATENCY_MS" -lt 5000 ]; then
log_info " ✓ Latency ${LATENCY_MS}ms is within acceptable range (<5000ms)"
PASSED=$((PASSED + 1))
else
fail_test "Latency ${LATENCY_MS}ms exceeds threshold"
fi
# ============================================================================
# Results Summary
# ============================================================================
echo ""
echo "=============================================="
echo "=== Cerberus Full Integration Test Results ==="
echo "=============================================="
echo ""
echo -e " ${GREEN}Passed:${NC} $PASSED"
echo -e " ${RED}Failed:${NC} $FAILED"
echo ""
if [ $FAILED -eq 0 ]; then
echo "=============================================="
echo "=== ALL CERBERUS INTEGRATION TESTS PASSED ==="
echo "=============================================="
echo ""
exit 0
else
echo "=============================================="
echo "=== CERBERUS TESTS FAILED ==="
echo "=============================================="
echo ""
exit 1
fi

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh utility-version-check
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh utility-version-check" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT_DIR"
if [ ! -f ".version" ]; then
echo "No .version file present; skipping version consistency check"
exit 0
fi
VERSION_FILE=$(cat .version | tr -d '\n' | tr -d '\r')
GIT_TAG="$(git describe --tags --abbrev=0 2>/dev/null || echo "")"
if [ -z "$GIT_TAG" ]; then
echo "No tags in repository; cannot validate .version against tag"
# Do not fail; allow commits when no tags exist
exit 0
fi
# Normalize: strip leading v if present in either
normalize() {
echo "$1" | sed 's/^v//'
}
TAG_NORM=$(normalize "$GIT_TAG")
VER_NORM=$(normalize "$VERSION_FILE")
if [ "$TAG_NORM" != "$VER_NORM" ]; then
echo "ERROR: .version ($VERSION_FILE) does not match latest Git tag ($GIT_TAG)"
echo "To sync, either update .version or tag with 'v$VERSION_FILE'"
exit 1
fi
echo "OK: .version matches latest Git tag $GIT_TAG"

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
echo "[charon] repo root: $ROOT_DIR"
echo "-- go version --"
go version || true
echo "-- go env --"
go env || true
echo "-- go list (backend) --"
cd "$ROOT_DIR/backend"
echo "module: $(cat go.mod | sed -n '1p')"
go list -deps ./... | wc -l || true
echo "-- go build backend ./... --"
if go build ./...; then
echo "BUILD_OK"
exit 0
else
echo "BUILD_FAIL"
echo "Run 'cd backend && go build -v ./...' for verbose output"
exit 2
fi

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# CI wrapper that fails if the repo contains historical objects or commits
# touching specified paths, or objects larger than the configured strip size.
PATHS="backend/codeql-db,codeql-db,codeql-db-js,codeql-db-go"
STRIP_SIZE=50
usage() {
cat <<EOF
Usage: $0 [--paths 'p1,p2'] [--strip-size N]
Runs a quick, non-destructive check against the repository history and fails
with a non-zero exit code if any commits or objects are found that touch the
specified paths or if any historical blobs exceed the --strip-size in MB.
EOF
}
while [ "$#" -gt 0 ]; do
case "$1" in
--paths)
PATHS="$2"; shift 2;;
--strip-size)
STRIP_SIZE="$2"; shift 2;;
--help)
usage; exit 0;;
*)
echo "Unknown option: $1" >&2; usage; exit 1;;
esac
done
IFS=','; set -f
paths_list=""
for p in $PATHS; do
paths_list="$paths_list $p"
done
set +f; unset IFS
echo "Checking repository history for banned paths: $paths_list"
echo "Blobs larger than: ${STRIP_SIZE}M will fail the check"
failed=0
# 1) Check for commits touching paths
for p in $paths_list; do
count=$(git rev-list --all -- "$p" | wc -l | tr -d ' ')
if [ "$count" -gt 0 ]; then
echo "ERROR: Found $count historical commit(s) touching path: $p"
git rev-list --all -- "$p" | nl -ba | sed -n '1,50p'
echo "DRY-RUN FAILED: historical commits detected"
exit 1
else
echo "OK: No history touching: $p"
fi
done
# 2) Check for blob objects in paths only (ignore tag/commit objects)
# Temp files
tmp_objects=$(mktemp)
blob_list=$(mktemp)
# shellcheck disable=SC2086 # $paths_list is intentionally unquoted to expand into multiple args
git rev-list --objects --all -- $paths_list > "$tmp_objects"
blob_count=0
tmp_oids="$(mktemp)"
trap 'rm -f "$tmp_objects" "$blob_list" "$tmp_oids"' EXIT INT TERM
while read -r line; do
oid=$(printf '%s' "$line" | awk '{print $1}')
# Determine object type and only consider blobs
type=$(git cat-file -t "$oid" 2>/dev/null || true)
if [ "$type" = "blob" ]; then
echo "$line" >> "$blob_list"
blob_count=$((blob_count + 1))
fi
done < "$tmp_objects"
if [ "$blob_count" -gt 0 ]; then
echo "ERROR: Found $blob_count blob object(s) in specified paths"
nl -ba "$blob_list" | sed -n '1,100p'
echo "DRY-RUN FAILED: repository blob objects found in banned paths"
exit 1
else
echo "OK: No repository blob objects in specified paths"
fi
# 3) Check for large objects across history
echo "Scanning for objects larger than ${STRIP_SIZE}M..."
large_found=0
# Write all object oids to a temp file to avoid a subshell problem
tmp_oids="$(mktemp)"
git rev-list --objects --all | awk '{print $1}' > "$tmp_oids"
while read -r oid; do
size=$(git cat-file -s "$oid" 2>/dev/null || echo 0)
if [ -n "$size" ] && [ "$size" -ge $((STRIP_SIZE * 1024 * 1024)) ]; then
echo "LARGE OBJECT: $oid size=$size"
large_found=1
failed=1
fi
done < "$tmp_oids"
if [ "$large_found" -eq 0 ]; then
echo "OK: No large objects detected across history"
else
echo "DRY-RUN FAILED: large historical blobs detected"
exit 1
fi
if [ "$failed" -ne 0 ]; then
echo "DRY-RUN FAILED: Repository history contains blocked entries"
exit 1
fi
echo "DRY-RUN OK: No problems detected"
exit 0

View File

@@ -1,34 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh utility-clear-go-cache
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh utility-clear-go-cache" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Clear Go caches and gopls cache
echo "Clearing Go build and module caches..."
go clean -cache -testcache -modcache || true
echo "Clearing gopls cache..."
rm -rf "${XDG_CACHE_HOME:-$HOME/.cache}/gopls" || true
echo "Re-downloading modules..."
cd backend || exit 1
go mod download
echo "Caches cleared and modules re-downloaded."
# Provide instructions for next steps
cat <<'EOF'
Next steps:
- Restart your editor's Go language server (gopls)
- In VS Code: Command Palette -> 'Go: Restart Language Server'
- Verify the toolchain:
$ go version
$ gopls version
EOF

View File

@@ -1,319 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh integration-test-coraza
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh integration-test-coraza" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Brief: Integration test for Coraza WAF using Docker Compose and built image
# Steps:
# 1. Build the local image: docker build -t charon:local .
# 2. Start docker-compose.local.yml: docker compose -f .docker/compose/docker-compose.local.yml up -d
# 3. Wait for API to be ready and then configure a ruleset that blocks a simple signature
# 4. Request a path containing the signature and verify 403 (or WAF block response)
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Helper Functions
# ============================================================================
# Verifies WAF handler is present in Caddy config with correct ruleset
verify_waf_config() {
local expected_ruleset="${1:-integration-xss}"
local retries=10
local wait=3
echo "Verifying WAF config (expecting ruleset: ${expected_ruleset})..."
for i in $(seq 1 $retries); do
# Fetch Caddy config via admin API
local caddy_config
caddy_config=$(curl -s http://localhost:2019/config 2>/dev/null || echo "")
if [ -z "$caddy_config" ]; then
echo " Attempt $i/$retries: Caddy admin API not responding, retrying..."
sleep $wait
continue
fi
# Check for WAF handler
if echo "$caddy_config" | grep -q '"handler":"waf"'; then
echo " ✓ WAF handler found in Caddy config"
# Also verify the directives include our ruleset
if echo "$caddy_config" | grep -q "$expected_ruleset"; then
echo " ✓ Ruleset '${expected_ruleset}' found in directives"
return 0
else
echo " ⚠ WAF handler present but ruleset '${expected_ruleset}' not found in directives"
fi
else
echo " Attempt $i/$retries: WAF handler not found, waiting..."
fi
sleep $wait
done
echo " ✗ WAF handler verification failed after $retries attempts"
return 1
}
# Dumps debug information on failure
on_failure() {
local exit_code=$?
echo ""
echo "=============================================="
echo "=== FAILURE DEBUG INFO (exit code: $exit_code) ==="
echo "=============================================="
echo ""
echo "=== Charon API Logs (last 150 lines) ==="
docker logs charon-debug 2>&1 | tail -150 || echo "Could not retrieve container logs"
echo ""
echo "=== Caddy Admin API Config ==="
curl -s http://localhost:2019/config 2>/dev/null | head -300 || echo "Could not retrieve Caddy config"
echo ""
echo "=== Ruleset Files in Container ==="
docker exec charon-debug sh -c 'ls -la /app/data/caddy/coraza/rulesets/ 2>/dev/null' || echo "No rulesets directory found"
echo ""
echo "=== Ruleset File Contents ==="
docker exec charon-debug sh -c 'cat /app/data/caddy/coraza/rulesets/*.conf 2>/dev/null' || echo "No ruleset files found"
echo ""
echo "=== Security Config in API ==="
curl -s http://localhost:8080/api/v1/security/config 2>/dev/null || echo "Could not retrieve security config"
echo ""
echo "=== Proxy Hosts ==="
curl -s http://localhost:8080/api/v1/proxy-hosts 2>/dev/null | head -50 || echo "Could not retrieve proxy hosts"
echo ""
echo "=============================================="
echo "=== END DEBUG INFO ==="
echo "=============================================="
}
# Set up trap to dump debug info on any error
trap on_failure ERR
echo "Starting Coraza integration test..."
if ! command -v docker >/dev/null 2>&1; then
echo "docker is not available; aborting"
exit 1
fi
# Build the image if it doesn't already exist (CI workflow builds it beforehand)
if ! docker image inspect charon:local >/dev/null 2>&1; then
echo "Building charon:local image..."
docker build -t charon:local .
else
echo "Using existing charon:local image"
fi
# Run charon using docker run to ensure we pass CHARON_SECURITY_WAF_MODE and control network membership for integration
docker rm -f charon-debug >/dev/null 2>&1 || true
if ! docker network inspect containers_default >/dev/null 2>&1; then
docker network create containers_default
fi
# NOTE: We intentionally do NOT mount $(pwd)/backend or $(pwd)/frontend/dist here.
# In CI, frontend/dist does not exist (it's built inside the Docker image).
# Mounting a non-existent directory would override the built frontend with an empty dir.
# For local development with hot-reload, use .docker/compose/docker-compose.local.yml instead.
docker run -d --name charon-debug --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network containers_default -p 80:80 -p 443:443 -p 8080:8080 -p 2019:2019 -p 2345:2345 \
-e CHARON_ENV=development -e CHARON_DEBUG=1 -e CHARON_HTTP_PORT=8080 -e CHARON_DB_PATH=/app/data/charon.db -e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 -e CHARON_CADDY_CONFIG_DIR=/app/data/caddy -e CHARON_CADDY_BINARY=caddy -e CHARON_IMPORT_CADDYFILE=/import/Caddyfile \
-e CHARON_IMPORT_DIR=/app/data/imports -e CHARON_ACME_STAGING=false -e CHARON_SECURITY_WAF_MODE=block \
-v charon_data:/app/data -v caddy_data:/data -v caddy_config:/config -v /var/run/docker.sock:/var/run/docker.sock:ro charon:local
echo "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f http://localhost:8080/api/v1/ >/dev/null 2>&1; then
break
fi
echo -n '.'
sleep 1
done
echo "Skipping unauthenticated ruleset creation (will register and create with cookie later)..."
echo "Creating a backend container for proxy host..."
# ensure the overlay network exists (docker-compose uses containers_default)
CREATED_NETWORK=0
if ! docker network inspect containers_default >/dev/null 2>&1; then
docker network create containers_default
CREATED_NETWORK=1
fi
docker rm -f coraza-backend >/dev/null 2>&1 || true
docker run -d --name coraza-backend --network containers_default kennethreitz/httpbin
echo "Waiting for httpbin backend to be ready..."
for i in {1..20}; do
# Check if container is running and has network connectivity
if docker exec charon-debug sh -c 'curl -s http://coraza-backend/get' >/dev/null 2>&1; then
echo "✓ httpbin backend is ready"
break
fi
if [ $i -eq 20 ]; then
echo "✗ httpbin backend failed to start"
echo "Container status:"
docker ps -a --filter name=coraza-backend
echo "Container logs:"
docker logs coraza-backend 2>&1 | tail -20
exit 1
fi
echo -n '.'
sleep 1
done
echo "Registering admin user and logging in to retrieve session cookie..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" -d '{"email":"integration@example.local","password":"password123","name":"Integration Tester"}' http://localhost:8080/api/v1/auth/register >/dev/null || true
curl -s -X POST -H "Content-Type: application/json" -d '{"email":"integration@example.local","password":"password123"}' -c ${TMP_COOKIE} http://localhost:8080/api/v1/auth/login >/dev/null
echo "Creating proxy host 'integration.local' pointing to backend..."
PROXY_HOST_PAYLOAD=$(cat <<EOF
{
"name": "integration-backend",
"domain_names": "integration.local",
"forward_scheme": "http",
"forward_host": "coraza-backend",
"forward_port": 80,
"enabled": true,
"advanced_config": "{\"handler\":\"waf\",\"ruleset_name\":\"integration-xss\"}"
}
EOF
)
CREATE_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" -d "${PROXY_HOST_PAYLOAD}" -b ${TMP_COOKIE} http://localhost:8080/api/v1/proxy-hosts)
CREATE_STATUS=$(echo "$CREATE_RESP" | tail -n1)
if [ "$CREATE_STATUS" != "201" ]; then
echo "Proxy host create failed or already exists; attempting to update existing host..."
# Find the existing host UUID by searching for the domain in the proxy-hosts list
EXISTING_UUID=$(curl -s -b ${TMP_COOKIE} http://localhost:8080/api/v1/proxy-hosts | grep -o '{[^}]*"domain_names":"integration.local"[^}]*}' | head -n1 | grep -o '"uuid":"[^"]*"' | sed 's/"uuid":"\([^"]*\)"/\1/')
if [ -n "$EXISTING_UUID" ]; then
echo "Updating existing host $EXISTING_UUID with Coraza handler"
curl -s -X PUT -H "Content-Type: application/json" -d "${PROXY_HOST_PAYLOAD}" -b ${TMP_COOKIE} http://localhost:8080/api/v1/proxy-hosts/$EXISTING_UUID
else
echo "Could not find existing host; create response:"
echo "$CREATE_RESP"
fi
fi
echo "Give Caddy a moment to apply configuration..."
sleep 3
echo "Creating simple WAF ruleset (XSS block)..."
RULESET=$(cat <<'EOF'
{"name":"integration-xss","content":"SecRule REQUEST_BODY \"<script>\" \"id:12345,phase:2,deny,status:403,msg:'XSS blocked'\""}
EOF
)
curl -s -X POST -H "Content-Type: application/json" -d "${RULESET}" -b ${TMP_COOKIE} http://localhost:8080/api/v1/security/rulesets
echo "Enable WAF globally and set ruleset source to integration-xss..."
SEC_CFG_PAYLOAD='{"name":"default","enabled":true,"waf_mode":"block","waf_rules_source":"integration-xss","admin_whitelist":"0.0.0.0/0"}'
curl -s -X POST -H "Content-Type: application/json" -d "${SEC_CFG_PAYLOAD}" -b ${TMP_COOKIE} http://localhost:8080/api/v1/security/config
echo "Waiting for Caddy to apply WAF configuration..."
sleep 10
# Verify WAF handler is properly configured before proceeding
# Note: This is advisory - if admin API is restarting we'll proceed anyway
if ! verify_waf_config "integration-xss"; then
echo "WARNING: WAF configuration verification failed (admin API may be restarting)"
echo "Proceeding with test anyway..."
fi
echo "Apply rules and test payload..."
# create minimal proxy host if needed; omitted here for brevity; test will target local Caddy root
echo "Verifying Caddy config has WAF handler..."
curl -s http://localhost:2019/config | grep -E '"handler":"waf"' || echo "WARNING: WAF handler not found in initial config check"
echo "Inspecting ruleset file inside container..."
docker exec charon-debug sh -c 'cat /app/data/caddy/coraza/rulesets/integration-xss-*.conf' || echo "WARNING: Could not read ruleset file"
echo ""
echo "=== Testing BLOCK mode ==="
MAX_RETRIES=3
BLOCK_SUCCESS=0
for attempt in $(seq 1 $MAX_RETRIES); do
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" -d "<script>alert(1)</script>" -H "Host: integration.local" http://localhost/post)
if [ "$RESPONSE" = "403" ]; then
echo "✓ Coraza WAF blocked payload as expected (HTTP 403) in BLOCK mode"
BLOCK_SUCCESS=1
break
fi
if [ $attempt -eq $MAX_RETRIES ]; then
echo "✗ Unexpected response code: $RESPONSE (expected 403) in BLOCK mode after $MAX_RETRIES attempts"
exit 1
fi
echo " Attempt $attempt: Got $RESPONSE, retrying in 2s..."
sleep 2
done
echo ""
echo "=== Testing MONITOR mode (DetectionOnly) ==="
echo "Switching WAF to monitor mode..."
SEC_CFG_MONITOR='{"name":"default","enabled":true,"waf_mode":"monitor","waf_rules_source":"integration-xss","admin_whitelist":"0.0.0.0/0"}'
curl -s -X POST -H "Content-Type: application/json" -d "${SEC_CFG_MONITOR}" -b ${TMP_COOKIE} http://localhost:8080/api/v1/security/config
echo "Wait for Caddy to apply monitor mode config..."
sleep 12
# Verify WAF handler is still present after mode switch
# Note: This is advisory - if admin API is restarting we'll proceed anyway
if ! verify_waf_config "integration-xss"; then
echo "WARNING: WAF config verification failed after mode switch (admin API may be restarting)"
echo "Proceeding with test anyway..."
fi
echo "Inspecting ruleset file (should now have DetectionOnly)..."
docker exec charon-debug sh -c 'cat /app/data/caddy/coraza/rulesets/integration-xss-*.conf | head -5' || true
MONITOR_SUCCESS=0
for attempt in $(seq 1 $MAX_RETRIES); do
RESPONSE_MONITOR=$(curl -s -o /dev/null -w "%{http_code}" -d "<script>alert(1)</script>" -H "Host: integration.local" http://localhost/post)
if [ "$RESPONSE_MONITOR" = "200" ]; then
echo "✓ Coraza WAF in MONITOR mode allowed payload through (HTTP 200) as expected"
MONITOR_SUCCESS=1
break
fi
if [ $attempt -eq $MAX_RETRIES ]; then
echo "✗ Unexpected response code: $RESPONSE_MONITOR (expected 200) in MONITOR mode after $MAX_RETRIES attempts"
echo " Note: Monitor mode should log but not block"
exit 1
fi
echo " Attempt $attempt: Got $RESPONSE_MONITOR, retrying in 2s..."
sleep 2
done
echo ""
echo "=== All Coraza integration tests passed ==="
echo "Cleaning up..."
# Delete the integration test proxy host from DB before stopping container
echo "Removing integration test proxy host from database..."
INTEGRATION_UUID=$(curl -s -b ${TMP_COOKIE} http://localhost:8080/api/v1/proxy-hosts | grep -o '"uuid":"[^"]*"[^}]*"domain_names":"integration.local"' | head -n1 | grep -o '"uuid":"[^"]*"' | sed 's/"uuid":"\([^"]*\)"/\1/')
if [ -n "$INTEGRATION_UUID" ]; then
curl -s -X DELETE -b ${TMP_COOKIE} "http://localhost:8080/api/v1/proxy-hosts/${INTEGRATION_UUID}?delete_uptime=true" >/dev/null
echo "✓ Deleted integration proxy host ${INTEGRATION_UUID}"
fi
docker rm -f coraza-backend || true
if [ "$CREATED_NETWORK" -eq 1 ]; then
docker network rm containers_default || true
fi
docker rm -f charon-debug || true
rm -f ${TMP_COOKIE}
echo "Done"

View File

@@ -1,391 +0,0 @@
#!/bin/bash
set -e
REPO="Wikid82/charon"
MILESTONE="v$(cat .version | tr -d '\n')"
echo "Creating Bulk ACL Testing Issues for $REPO"
echo "============================================"
# Create main issue
echo ""
echo "Creating main testing issue..."
MAIN_ISSUE=$(gh issue create \
--repo "$REPO" \
--title "Test: Bulk ACL Application Feature" \
--label "beta,high,feature,frontend,backend" \
--body "## Description
Comprehensive testing required for the newly implemented Bulk ACL (Access Control List) application feature. This feature allows users to apply or remove access lists from multiple proxy hosts simultaneously.
## Feature Overview
The bulk ACL feature introduces:
### Backend Testing ✅ (Completed)
- [x] Unit tests for \`BulkUpdateACL\` handler (5 tests)
- [x] Coverage: 82.2% maintained
- [x] Coverage: 86.06% (improved from 85.57%)
## Sub-Issues
- [ ] #TBD - UI/UX Testing
- [ ] #TBD - Integration Testing
- [ ] #TBD - Cross-Browser Testing
- [ ] #TBD - Regression Testing
## Success Criteria
- ✅ All manual test checklists completed
- ✅ No critical bugs found
- ✅ Performance acceptable with 50+ hosts
- ✅ UI/UX meets design standards
- ✅ Cross-browser compatibility confirmed
- ✅ No regressions in existing features
## Related Files
**Backend:**
- \`backend/internal/api/handlers/proxy_host_handler.go\`
- \`backend/internal/api/handlers/proxy_host_handler_test.go\`
**Frontend:**
- \`frontend/src/pages/ProxyHosts.tsx\`
- \`frontend/src/api/proxyHosts.ts\`
- \`frontend/src/hooks/useProxyHosts.ts\`
**Documentation:**
- \`BULK_ACL_FEATURE.md\`
- \`docs/issues/bulk-acl-testing.md\`
- \`docs/issues/bulk-acl-subissues.md\`
**Implementation Date**: November 27, 2025
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created main issue #$MAIN_ISSUE"
# Sub-issue 1: Basic Functionality
echo ""
echo "Creating sub-issue #1: Basic Functionality..."
SUB1=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] Basic Functionality - Selection and Application" \
--label "beta,medium,feature,frontend" \
--body "Part of #$MAIN_ISSUE
## Description
Test the core functionality of the bulk ACL feature - selecting hosts and applying access lists.
## Test Checklist
- [ ] Navigate to Proxy Hosts page
- [ ] Verify checkbox column appears in table
- [ ] Select individual hosts using checkboxes
- [ ] Verify \"Select All\" checkbox works correctly
- [ ] Confirm selection count displays accurately
- [ ] Click \"Bulk Actions\" button - modal should appear
- [ ] Select an ACL from dropdown - hosts should update
- [ ] Verify toast notification shows success message
- [ ] Confirm hosts table refreshes with updated ACL assignments
- [ ] Check database to verify \`access_list_id\` fields updated
## Expected Results
- All checkboxes functional
- Selection count accurate
- Modal displays correctly
- ACL applies to all selected hosts
- Database reflects changes
## Test Environment
Local development
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB1"
# Sub-issue 2: ACL Removal
echo ""
echo "Creating sub-issue #2: ACL Removal..."
SUB2=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] ACL Removal Functionality" \
--label "beta,medium,feature,frontend" \
--body "Part of #$MAIN_ISSUE
## Description
Test the ability to remove access lists from multiple hosts simultaneously.
## Test Checklist
- [ ] Select hosts that have ACLs assigned
- [ ] Open Bulk Actions modal
- [ ] Select \"🚫 Remove Access List\" option
- [ ] Confirm removal dialog appears
- [ ] Proceed with removal
- [ ] Verify toast shows \"Access list removed from X host(s)\"
- [ ] Confirm hosts no longer have ACL assigned in UI
- [ ] Check database to verify \`access_list_id\` is NULL
## Expected Results
- Removal option clearly visible
- Confirmation dialog prevents accidental removal
- All selected hosts have ACL removed
- Database updated correctly (NULL values)
## Test Environment
Local development
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB2"
# Sub-issue 3: Error Handling
echo ""
echo "Creating sub-issue #3: Error Handling..."
SUB3=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] Error Handling and Edge Cases" \
--label "beta,medium,feature,backend" \
--body "Part of #$MAIN_ISSUE
## Description
Test error scenarios and edge cases to ensure graceful degradation.
## Test Checklist
- [ ] Select multiple hosts including one that doesn't exist
- [ ] Apply ACL via bulk action
- [ ] Verify toast shows partial success: \"Updated X host(s), Y failed\"
- [ ] Confirm successful hosts were updated
- [ ] Test with no hosts selected (button should not appear)
- [ ] Test with empty ACL list (dropdown should show appropriate message)
- [ ] Disconnect backend - verify network error handling
- [ ] Test applying invalid ACL ID (edge case)
## Expected Results
- Partial failures handled gracefully
- Clear error messages displayed
- No data corruption on partial failures
- Network errors caught and reported
## Test Environment
Local development + simulated failures
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB3"
# Sub-issue 4: UI/UX
echo ""
echo "Creating sub-issue #4: UI/UX..."
SUB4=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] UI/UX and Usability" \
--label "beta,medium,frontend" \
--body "Part of #$MAIN_ISSUE
## Description
Test the user interface and experience aspects of the bulk ACL feature.
## Test Checklist
- [ ] Verify checkboxes align properly in table
- [ ] Test checkbox hover states
- [ ] Verify \"Bulk Actions\" button appears/disappears based on selection
- [ ] Test modal appearance and dismissal (click outside, ESC key)
- [ ] Verify dropdown styling and readability
- [ ] Test loading state (\`isBulkUpdating\`) - button should show \"Updating...\"
- [ ] Verify selection persists during table sorting
- [ ] Test selection persistence during table filtering (if applicable)
- [ ] Verify toast notifications don't overlap
- [ ] Test on mobile viewport (responsive design)
## Expected Results
- Clean, professional UI
- Intuitive user flow
- Proper loading states
- Mobile-friendly
- Accessible (keyboard navigation)
## Test Environment
Local development (multiple screen sizes)
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB4"
# Sub-issue 5: Integration
echo ""
echo "Creating sub-issue #5: Integration..."
SUB5=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] Integration and Performance" \
--label "beta,high,feature,backend,frontend" \
--body "Part of #$MAIN_ISSUE
## Description
Test the feature in realistic scenarios and with varying data loads.
## Test Checklist
- [ ] Create new ACL, immediately apply to multiple hosts
- [ ] Verify Caddy config reloads once (not per host)
- [ ] Test with 1 host selected
- [ ] Test with 10+ hosts selected (performance)
- [ ] Test with 50+ hosts selected (edge case)
- [ ] Apply ACL, then immediately remove it (rapid operations)
- [ ] Apply different ACLs sequentially to same host group
- [ ] Delete a host that's selected, then bulk apply ACL
- [ ] Disable an ACL, verify it doesn't appear in dropdown
- [ ] Test concurrent user scenarios (multi-tab if possible)
## Expected Results
- Single Caddy reload per bulk operation
- Performance acceptable up to 50+ hosts
- No race conditions with rapid operations
- Graceful handling of deleted/disabled entities
## Test Environment
Docker production build
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB5"
# Sub-issue 6: Cross-Browser
echo ""
echo "Creating sub-issue #6: Cross-Browser..."
SUB6=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] Cross-Browser Compatibility" \
--label "beta,low,frontend" \
--body "Part of #$MAIN_ISSUE
## Description
Verify the feature works across all major browsers and devices.
## Test Checklist
- [ ] Chrome/Chromium (latest)
- [ ] Firefox (latest)
- [ ] Safari (macOS/iOS)
- [ ] Edge (latest)
- [ ] Mobile Chrome (Android)
- [ ] Mobile Safari (iOS)
## Expected Results
- Feature works identically across all browsers
- No CSS layout issues
- No JavaScript errors in console
- Touch interactions work on mobile
## Test Environment
Multiple browsers/devices
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB6"
# Sub-issue 7: Regression
echo ""
echo "Creating sub-issue #7: Regression..."
SUB7=$(gh issue create \
--repo "$REPO" \
--title "[Bulk ACL Testing] Regression Testing - Existing Features" \
--label "beta,high,feature,frontend,backend" \
--body "Part of #$MAIN_ISSUE
## Description
Ensure the new bulk ACL feature doesn't break existing functionality.
## Test Checklist
- [ ] Verify individual proxy host edit still works
- [ ] Confirm single-host ACL assignment unchanged
- [ ] Test proxy host creation with ACL pre-selected
- [ ] Verify ACL deletion prevents assignment
- [ ] Confirm existing ACL features unaffected:
- [ ] IP-based rules
- [ ] Geo-blocking rules
- [ ] Local network only rules
- [ ] Test IP functionality
- [ ] Verify certificate assignment still works
- [ ] Test proxy host enable/disable toggle
## Expected Results
- Zero regressions
- All existing features work as before
- No performance degradation
- No new bugs introduced
## Test Environment
Docker production build
" | grep -oP '(?<=github.com/Wikid82/charon/issues/)\d+')
echo "✓ Created sub-issue #$SUB7"
# Update main issue with sub-issue numbers
echo ""
echo "Updating main issue with sub-issue references..."
gh issue edit "$MAIN_ISSUE" \
--repo "$REPO" \
--body "## Description
Comprehensive testing required for the newly implemented Bulk ACL (Access Control List) application feature. This feature allows users to apply or remove access lists from multiple proxy hosts simultaneously.
## Feature Overview
The bulk ACL feature introduces:
- Multi-select checkboxes in Proxy Hosts table
- Bulk Actions button with ACL selection modal
- Backend endpoint: \`PUT /api/v1/proxy-hosts/bulk-update-acl\`
- Comprehensive error handling for partial failures
## Testing Status
### Backend Testing ✅ (Completed)
- [x] Unit tests for \`BulkUpdateACL\` handler (5 tests)
- [x] Coverage: 82.2% maintained
### Frontend Testing ✅ (Completed)
- [x] Unit tests for API client and hooks (10 tests)
- [x] Coverage: 86.06% (improved from 85.57%)
### Manual Testing 🔴 (Required)
See sub-issues below for detailed test plans.
## Sub-Issues
- [ ] #$SUB1 - Basic Functionality Testing
- [ ] #$SUB2 - ACL Removal Testing
- [ ] #$SUB3 - Error Handling Testing
- [ ] #$SUB4 - UI/UX Testing
- [ ] #$SUB5 - Integration Testing
- [ ] #$SUB6 - Cross-Browser Testing
- [ ] #$SUB7 - Regression Testing
## Success Criteria
- ✅ All manual test checklists completed
- ✅ No critical bugs found
- ✅ Performance acceptable with 50+ hosts
- ✅ UI/UX meets design standards
- ✅ Cross-browser compatibility confirmed
- ✅ No regressions in existing features
## Related Files
**Backend:**
- \`backend/internal/api/handlers/proxy_host_handler.go\`
- \`backend/internal/api/handlers/proxy_host_handler_test.go\`
**Frontend:**
- \`frontend/src/pages/ProxyHosts.tsx\`
- \`frontend/src/api/proxyHosts.ts\`
- \`frontend/src/hooks/useProxyHosts.ts\`
**Documentation:**
- \`BULK_ACL_FEATURE.md\`
- \`docs/issues/bulk-acl-testing.md\`
- \`docs/issues/bulk-acl-subissues.md\`
**Implementation Date**: November 27, 2025
"
echo "✓ Updated main issue"
echo ""
echo "============================================"
echo "✅ Successfully created all issues!"
echo ""
echo "Main Issue: #$MAIN_ISSUE"
echo "Sub-Issues: #$SUB1, #$SUB2, #$SUB3, #$SUB4, #$SUB5, #$SUB6, #$SUB7"
echo ""
echo "View them at: https://github.com/$REPO/issues/$MAIN_ISSUE"

View File

@@ -1,646 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec-decisions
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec-decisions" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Brief: Integration test for CrowdSec Decision Management
# Steps:
# 1. Build the local image if not present: docker build -t charon:local .
# 2. Start Charon container with CrowdSec/Cerberus features enabled
# 3. Test CrowdSec status endpoint
# 4. Test decisions list (expect empty initially)
# 5. Test ban IP operation
# 6. Verify ban appears in decisions list
# 7. Test unban IP operation
# 8. Verify IP removed from decisions
# 9. Test export endpoint
# 10. Test LAPI health endpoint
# 11. Clean up test resources
#
# Note: CrowdSec binary may not be available in test container
# Tests gracefully handle this scenario and skip operations requiring cscli
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Configuration
# ============================================================================
CONTAINER_NAME="charon-crowdsec-decision-test"
TEST_IP="192.168.100.100"
TEST_DURATION="1h"
TEST_REASON="Integration test ban"
# Use same non-conflicting ports as rate_limit_integration.sh
API_PORT=8280
HTTP_PORT=8180
HTTPS_PORT=8143
CADDY_ADMIN_PORT=2119
# ============================================================================
# Colors for output
# ============================================================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_test() { echo -e "${BLUE}[TEST]${NC} $1"; }
# ============================================================================
# Test counters
# ============================================================================
PASSED=0
FAILED=0
SKIPPED=0
pass_test() {
PASSED=$((PASSED + 1))
echo -e " ${GREEN}✓ PASS${NC}"
}
fail_test() {
FAILED=$((FAILED + 1))
echo -e " ${RED}✗ FAIL${NC}: $1"
}
skip_test() {
SKIPPED=$((SKIPPED + 1))
echo -e " ${YELLOW}⊘ SKIP${NC}: $1"
}
# ============================================================================
# Helper Functions
# ============================================================================
# Dumps debug information on failure
on_failure() {
local exit_code=$?
echo ""
echo "=============================================="
echo "=== FAILURE DEBUG INFO (exit code: $exit_code) ==="
echo "=============================================="
echo ""
echo "=== Charon API Logs (last 100 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -100 || echo "Could not retrieve container logs"
echo ""
echo "=== CrowdSec Status ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/admin/crowdsec/status" 2>/dev/null || echo "Could not retrieve CrowdSec status"
echo ""
echo "=============================================="
echo "=== END DEBUG INFO ==="
echo "=============================================="
}
# Cleanup function
cleanup() {
log_info "Cleaning up test resources..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
rm -f "${TMP_COOKIE:-}" 2>/dev/null || true
log_info "Cleanup complete"
}
# Set up trap to dump debug info on any error
trap on_failure ERR
echo "=============================================="
echo "=== CrowdSec Decision Integration Test ==="
echo "=============================================="
echo ""
# Check dependencies
if ! command -v docker >/dev/null 2>&1; then
log_error "docker is not available; aborting"
exit 1
fi
if ! command -v curl >/dev/null 2>&1; then
log_error "curl is not available; aborting"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
log_error "jq is not available; aborting"
exit 1
fi
# ============================================================================
# Step 1: Build image if needed
# ============================================================================
if ! docker image inspect charon:local >/dev/null 2>&1; then
log_info "Building charon:local image..."
docker build -t charon:local .
else
log_info "Using existing charon:local image"
fi
# ============================================================================
# Step 2: Start Charon container
# ============================================================================
log_info "Stopping any existing test containers..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
# Ensure network exists
if ! docker network inspect containers_default >/dev/null 2>&1; then
log_info "Creating containers_default network..."
docker network create containers_default
fi
log_info "Starting Charon container with CrowdSec features enabled..."
docker run -d --name ${CONTAINER_NAME} \
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--network containers_default \
-p ${HTTP_PORT}:80 -p ${HTTPS_PORT}:443 -p ${API_PORT}:8080 -p ${CADDY_ADMIN_PORT}:2019 \
-e CHARON_ENV=development \
-e CHARON_DEBUG=1 \
-e CHARON_HTTP_PORT=8080 \
-e CHARON_DB_PATH=/app/data/charon.db \
-e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 \
-e CHARON_CADDY_CONFIG_DIR=/app/data/caddy \
-e CHARON_CADDY_BINARY=caddy \
-e FEATURE_CERBERUS_ENABLED=true \
-e CERBERUS_SECURITY_CROWDSEC_MODE=local \
-v charon_crowdsec_test_data:/app/data \
-v caddy_crowdsec_test_data:/data \
-v caddy_crowdsec_test_config:/config \
charon:local
log_info "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f "http://localhost:${API_PORT}/api/v1/" >/dev/null 2>&1; then
log_info "Charon API is ready"
break
fi
if [ $i -eq 30 ]; then
log_error "Charon API failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
echo ""
# ============================================================================
# Step 3: Register user and authenticate
# ============================================================================
log_info "Registering admin user and logging in..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"crowdsec@example.local","password":"password123","name":"CrowdSec Tester"}' \
"http://localhost:${API_PORT}/api/v1/auth/register" >/dev/null 2>&1 || true
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"crowdsec@example.local","password":"password123"}' \
-c "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/auth/login" >/dev/null
log_info "Authentication complete"
echo ""
# ============================================================================
# Pre-flight CrowdSec Startup Checks (TC-0 series)
# ============================================================================
echo "=============================================="
echo "=== Pre-flight CrowdSec Startup Checks ==="
echo "=============================================="
echo ""
# ----------------------------------------------------------------------------
# TC-0: Verify CrowdSec agent started successfully
# ----------------------------------------------------------------------------
log_test "TC-0: Verify CrowdSec agent started successfully"
CROWDSEC_READY=$(docker logs ${CONTAINER_NAME} 2>&1 | grep -c "CrowdSec LAPI is ready" || echo "0")
CROWDSEC_FATAL=$(docker logs ${CONTAINER_NAME} 2>&1 | grep -c "no datasource enabled" || echo "0")
if [ "$CROWDSEC_FATAL" -ge 1 ]; then
fail_test "CRITICAL: CrowdSec failed with 'no datasource enabled' - acquis.yaml is missing or empty"
echo ""
log_error "CrowdSec is fundamentally broken. Cannot proceed with tests."
echo ""
echo "=== Container Logs (CrowdSec related) ==="
docker logs ${CONTAINER_NAME} 2>&1 | grep -i "crowdsec\|acquis\|datasource" | tail -30
echo ""
cleanup
exit 1
elif [ "$CROWDSEC_READY" -ge 1 ]; then
log_info " CrowdSec LAPI is ready (found startup message in logs)"
pass_test
else
# CrowdSec may not have started yet or may not be available
CROWDSEC_STARTED=$(docker logs ${CONTAINER_NAME} 2>&1 | grep -c "Starting CrowdSec" || echo "0")
if [ "$CROWDSEC_STARTED" -ge 1 ]; then
log_info " CrowdSec startup initiated (may still be initializing)"
pass_test
else
log_warn " CrowdSec startup message not found (may not be enabled or binary missing)"
pass_test
fi
fi
# ----------------------------------------------------------------------------
# TC-0b: Verify acquisition config exists
# ----------------------------------------------------------------------------
log_test "TC-0b: Verify acquisition config exists"
ACQUIS_CONTENT=$(docker exec ${CONTAINER_NAME} cat /etc/crowdsec/acquis.yaml 2>/dev/null || echo "")
ACQUIS_HAS_SOURCE=$(echo "$ACQUIS_CONTENT" | grep -c "source:" || echo "0")
if [ "$ACQUIS_HAS_SOURCE" -ge 1 ]; then
log_info " Acquisition config found with datasource definition"
# Show first few lines for debugging
log_info " Config preview:"
echo "$ACQUIS_CONTENT" | head -5 | sed 's/^/ /'
pass_test
elif [ -n "$ACQUIS_CONTENT" ]; then
fail_test "CRITICAL: acquis.yaml exists but has no 'source:' definition"
echo ""
log_error "CrowdSec will fail to start without a valid datasource. Cannot proceed."
echo "Content found:"
echo "$ACQUIS_CONTENT" | head -10 | sed 's/^/ /'
echo ""
cleanup
exit 1
else
# acquis.yaml doesn't exist - this might be okay if CrowdSec mode is disabled
MODE_CHECK=$(docker exec ${CONTAINER_NAME} printenv CERBERUS_SECURITY_CROWDSEC_MODE 2>/dev/null || echo "disabled")
if [ "$MODE_CHECK" = "local" ]; then
fail_test "CRITICAL: acquis.yaml missing but CROWDSEC_MODE=local"
log_error "CrowdSec local mode enabled but no acquisition config exists."
cleanup
exit 1
else
log_warn " acquis.yaml not found (acceptable if CrowdSec mode is disabled)"
pass_test
fi
fi
# ----------------------------------------------------------------------------
# TC-0c: Verify hub items installed
# ----------------------------------------------------------------------------
log_test "TC-0c: Verify hub items installed (at least one parser)"
PARSER_COUNT=$(docker exec ${CONTAINER_NAME} cscli parsers list -o json 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
if [ "$PARSER_COUNT" = "0" ] || [ -z "$PARSER_COUNT" ]; then
# cscli may not be available or no parsers installed
CSCLI_EXISTS=$(docker exec ${CONTAINER_NAME} which cscli 2>/dev/null || echo "")
if [ -z "$CSCLI_EXISTS" ]; then
log_warn " cscli not available - cannot verify hub items"
pass_test
else
log_warn " No parsers installed (CrowdSec may not detect attacks)"
pass_test
fi
else
log_info " Found $PARSER_COUNT parser(s) installed"
# List a few for debugging
docker exec ${CONTAINER_NAME} cscli parsers list 2>/dev/null | head -5 | sed 's/^/ /' || true
pass_test
fi
echo ""
# ============================================================================
# Detect CrowdSec/cscli availability
# ============================================================================
log_info "Detecting CrowdSec/cscli availability..."
CSCLI_AVAILABLE=true
# Check decisions endpoint to detect cscli availability
DETECT_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/decisions" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$DETECT_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$DETECT_RESP" | jq -r '.error')
if [[ "$ERROR_MSG" == *"cscli"* ]] || [[ "$ERROR_MSG" == *"not available"* ]]; then
CSCLI_AVAILABLE=false
log_warn "cscli is NOT available in container - ban/unban tests will be SKIPPED"
fi
fi
if [ "$CSCLI_AVAILABLE" = "true" ]; then
log_info "cscli appears to be available"
fi
echo ""
# ============================================================================
# Test Cases
# ============================================================================
echo "=============================================="
echo "=== Running CrowdSec Decision Test Cases ==="
echo "=============================================="
echo ""
# ----------------------------------------------------------------------------
# TC-1: Start CrowdSec (may fail if binary not available - that's OK)
# ----------------------------------------------------------------------------
log_test "TC-1: Start CrowdSec process"
START_RESP=$(curl -s -X POST -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/start" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$START_RESP" | jq -e '.status == "started"' >/dev/null 2>&1; then
log_info " CrowdSec started: $(echo "$START_RESP" | jq -c)"
pass_test
elif echo "$START_RESP" | jq -e '.error' >/dev/null 2>&1; then
# CrowdSec binary may not be available - this is acceptable
ERROR_MSG=$(echo "$START_RESP" | jq -r '.error // "unknown"')
if [[ "$ERROR_MSG" == *"not found"* ]] || [[ "$ERROR_MSG" == *"not available"* ]] || [[ "$ERROR_MSG" == *"executable"* ]]; then
skip_test "CrowdSec binary not available in container"
else
log_warn " Start returned error: $ERROR_MSG (continuing with tests)"
pass_test
fi
else
log_warn " Unexpected response: $START_RESP"
pass_test
fi
# ----------------------------------------------------------------------------
# TC-2: Get CrowdSec status
# ----------------------------------------------------------------------------
log_test "TC-2: Get CrowdSec status"
STATUS_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/status" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$STATUS_RESP" | jq -e 'has("running")' >/dev/null 2>&1; then
RUNNING=$(echo "$STATUS_RESP" | jq -r '.running')
PID=$(echo "$STATUS_RESP" | jq -r '.pid // 0')
log_info " Status: running=$RUNNING, pid=$PID"
pass_test
else
fail_test "Status endpoint returned unexpected response: $STATUS_RESP"
fi
# ----------------------------------------------------------------------------
# TC-3: List decisions (expect empty initially, or error if cscli unavailable)
# ----------------------------------------------------------------------------
log_test "TC-3: List decisions (expect empty or cscli error)"
DECISIONS_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/decisions" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$DECISIONS_RESP" | jq -e 'has("decisions")' >/dev/null 2>&1; then
TOTAL=$(echo "$DECISIONS_RESP" | jq -r '.total // 0')
# Check if there's also an error field (cscli not available returns both decisions:[] and error)
if echo "$DECISIONS_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$DECISIONS_RESP" | jq -r '.error')
if [[ "$ERROR_MSG" == *"cscli"* ]] || [[ "$ERROR_MSG" == *"not available"* ]]; then
log_info " Decisions endpoint working - returns error as expected (cscli unavailable)"
pass_test
else
log_info " Decisions count: $TOTAL (with error: $ERROR_MSG)"
pass_test
fi
else
log_info " Decisions count: $TOTAL"
pass_test
fi
elif echo "$DECISIONS_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$DECISIONS_RESP" | jq -r '.error')
if [[ "$ERROR_MSG" == *"cscli"* ]] || [[ "$ERROR_MSG" == *"not available"* ]]; then
log_info " Decisions endpoint correctly reports cscli unavailable"
pass_test
else
log_warn " Decisions returned error: $ERROR_MSG (acceptable)"
pass_test
fi
else
fail_test "Decisions endpoint returned unexpected response: $DECISIONS_RESP"
fi
# ----------------------------------------------------------------------------
# TC-4: Ban test IP (192.168.100.100) with 1h duration
# ----------------------------------------------------------------------------
log_test "TC-4: Ban test IP (${TEST_IP}) with ${TEST_DURATION} duration"
# Skip if cscli is not available
if [ "$CSCLI_AVAILABLE" = "false" ]; then
skip_test "cscli not available - ban operation requires cscli"
BAN_SUCCEEDED=false
else
BAN_PAYLOAD=$(cat <<EOF
{"ip": "${TEST_IP}", "duration": "${TEST_DURATION}", "reason": "${TEST_REASON}"}
EOF
)
BAN_RESP=$(curl -s -X POST -b "${TMP_COOKIE}" \
-H "Content-Type: application/json" \
-d "${BAN_PAYLOAD}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/ban" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$BAN_RESP" | jq -e '.status == "banned"' >/dev/null 2>&1; then
log_info " Ban successful: $(echo "$BAN_RESP" | jq -c)"
pass_test
BAN_SUCCEEDED=true
elif echo "$BAN_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$BAN_RESP" | jq -r '.error')
if [[ "$ERROR_MSG" == *"cscli"* ]] || [[ "$ERROR_MSG" == *"not available"* ]] || [[ "$ERROR_MSG" == *"not found"* ]] || [[ "$ERROR_MSG" == *"failed to ban"* ]]; then
skip_test "cscli not available for ban operation (error: $ERROR_MSG)"
BAN_SUCCEEDED=false
# Update global flag since we now know cscli is unavailable
CSCLI_AVAILABLE=false
else
fail_test "Ban failed: $ERROR_MSG"
BAN_SUCCEEDED=false
fi
else
fail_test "Ban returned unexpected response: $BAN_RESP"
BAN_SUCCEEDED=false
fi
fi
# ----------------------------------------------------------------------------
# TC-5: Verify ban appears in decisions list
# ----------------------------------------------------------------------------
log_test "TC-5: Verify ban appears in decisions list"
if [ "$CSCLI_AVAILABLE" = "false" ]; then
skip_test "cscli not available - cannot verify ban in decisions"
elif [ "${BAN_SUCCEEDED:-false}" = "true" ]; then
# Give CrowdSec a moment to register the decision
sleep 1
VERIFY_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/decisions" 2>/dev/null || echo '{"decisions":[]}')
if echo "$VERIFY_RESP" | jq -e ".decisions[] | select(.value == \"${TEST_IP}\")" >/dev/null 2>&1; then
log_info " Ban verified in decisions list"
pass_test
elif echo "$VERIFY_RESP" | jq -e '.error' >/dev/null 2>&1; then
skip_test "cscli not available for verification"
else
# May not find it if CrowdSec is not fully operational
log_warn " Ban not found in decisions (CrowdSec may not be fully operational)"
pass_test
fi
else
skip_test "Ban operation was skipped, cannot verify"
fi
# ----------------------------------------------------------------------------
# TC-6: Unban the test IP
# ----------------------------------------------------------------------------
log_test "TC-6: Unban the test IP (${TEST_IP})"
if [ "$CSCLI_AVAILABLE" = "false" ]; then
skip_test "cscli not available - unban operation requires cscli"
UNBAN_SUCCEEDED=false
elif [ "${BAN_SUCCEEDED:-false}" = "true" ]; then
UNBAN_RESP=$(curl -s -X DELETE -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/ban/${TEST_IP}" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$UNBAN_RESP" | jq -e '.status == "unbanned"' >/dev/null 2>&1; then
log_info " Unban successful: $(echo "$UNBAN_RESP" | jq -c)"
pass_test
UNBAN_SUCCEEDED=true
elif echo "$UNBAN_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$UNBAN_RESP" | jq -r '.error')
if [[ "$ERROR_MSG" == *"cscli"* ]] || [[ "$ERROR_MSG" == *"not available"* ]]; then
skip_test "cscli not available for unban operation"
UNBAN_SUCCEEDED=false
else
fail_test "Unban failed: $ERROR_MSG"
UNBAN_SUCCEEDED=false
fi
else
fail_test "Unban returned unexpected response: $UNBAN_RESP"
UNBAN_SUCCEEDED=false
fi
else
skip_test "Ban operation was skipped, cannot unban"
UNBAN_SUCCEEDED=false
fi
# ----------------------------------------------------------------------------
# TC-7: Verify IP removed from decisions
# ----------------------------------------------------------------------------
log_test "TC-7: Verify IP removed from decisions"
if [ "$CSCLI_AVAILABLE" = "false" ]; then
skip_test "cscli not available - cannot verify removal from decisions"
elif [ "${UNBAN_SUCCEEDED:-false}" = "true" ]; then
# Give CrowdSec a moment to remove the decision
sleep 1
REMOVAL_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/decisions" 2>/dev/null || echo '{"decisions":[]}')
FOUND=$(echo "$REMOVAL_RESP" | jq -r ".decisions[] | select(.value == \"${TEST_IP}\") | .value" 2>/dev/null || echo "")
if [ -z "$FOUND" ]; then
log_info " IP successfully removed from decisions"
pass_test
else
log_warn " IP still present in decisions (may take time to propagate)"
pass_test
fi
else
skip_test "Unban operation was skipped, cannot verify removal"
fi
# ----------------------------------------------------------------------------
# TC-8: Test export endpoint (should return tar.gz or 404 if no config)
# ----------------------------------------------------------------------------
log_test "TC-8: Test export endpoint"
EXPORT_FILE=$(mktemp --suffix=.tar.gz)
EXPORT_HTTP_CODE=$(curl -s -b "${TMP_COOKIE}" \
-o "${EXPORT_FILE}" -w "%{http_code}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/export" 2>/dev/null || echo "000")
if [ "$EXPORT_HTTP_CODE" = "200" ]; then
if [ -s "${EXPORT_FILE}" ]; then
EXPORT_SIZE=$(ls -lh "${EXPORT_FILE}" 2>/dev/null | awk '{print $5}')
log_info " Export successful: ${EXPORT_SIZE}"
pass_test
else
log_info " Export returned empty file (no config to export)"
pass_test
fi
elif [ "$EXPORT_HTTP_CODE" = "404" ]; then
log_info " Export returned 404 (no CrowdSec config exists - expected)"
pass_test
elif [ "$EXPORT_HTTP_CODE" = "500" ]; then
# May fail if config directory doesn't exist
log_info " Export returned 500 (config directory may not exist - acceptable)"
pass_test
else
fail_test "Export returned unexpected HTTP code: $EXPORT_HTTP_CODE"
fi
rm -f "${EXPORT_FILE}" 2>/dev/null || true
# ----------------------------------------------------------------------------
# TC-10: Test LAPI health endpoint
# ----------------------------------------------------------------------------
log_test "TC-10: Test LAPI health endpoint"
LAPI_RESP=$(curl -s -b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/admin/crowdsec/lapi/health" 2>/dev/null || echo '{"error":"request failed"}')
if echo "$LAPI_RESP" | jq -e 'has("healthy")' >/dev/null 2>&1; then
HEALTHY=$(echo "$LAPI_RESP" | jq -r '.healthy')
LAPI_URL=$(echo "$LAPI_RESP" | jq -r '.lapi_url // "not configured"')
log_info " LAPI Health: healthy=$HEALTHY, url=$LAPI_URL"
pass_test
elif echo "$LAPI_RESP" | jq -e '.error' >/dev/null 2>&1; then
ERROR_MSG=$(echo "$LAPI_RESP" | jq -r '.error')
log_info " LAPI Health check returned error: $ERROR_MSG (acceptable - LAPI may not be configured)"
pass_test
else
# Any response from the endpoint is acceptable
log_info " LAPI Health response: $(echo "$LAPI_RESP" | head -c 200)"
pass_test
fi
# ============================================================================
# Results Summary
# ============================================================================
echo ""
echo "=============================================="
echo "=== CrowdSec Decision Integration Results ==="
echo "=============================================="
echo ""
echo -e " ${GREEN}Passed:${NC} $PASSED"
echo -e " ${RED}Failed:${NC} $FAILED"
echo -e " ${YELLOW}Skipped:${NC} $SKIPPED"
echo ""
if [ "$CSCLI_AVAILABLE" = "false" ]; then
echo -e " ${YELLOW}Note:${NC} cscli was not available in container - ban/unban tests were skipped"
echo " This is expected behavior for the current charon:local image."
echo ""
fi
# Cleanup
cleanup
if [ $FAILED -eq 0 ]; then
if [ $SKIPPED -gt 0 ]; then
echo "=============================================="
echo "=== CROWDSEC TESTS PASSED (with skips) ==="
echo "=============================================="
echo "=== ALL CROWDSEC DECISION TESTS PASSED ==="
echo "=============================================="
else
echo "=============================================="
echo "=== ALL CROWDSEC DECISION TESTS PASSED ==="
echo "=============================================="
fi
echo ""
exit 0
else
echo "=============================================="
echo "=== CROWDSEC DECISION TESTS FAILED ==="
echo "=============================================="
echo ""
exit 1
fi

View File

@@ -1,97 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
trap 'echo "Error occurred, dumping debug info..."; docker logs charon-debug 2>&1 | tail -200 || true' ERR
if ! command -v docker >/dev/null 2>&1; then
echo "docker is not available; aborting"
exit 1
fi
echo "Building charon:local image..."
docker build -t charon:local .
docker rm -f charon-debug >/dev/null 2>&1 || true
if ! docker network inspect containers_default >/dev/null 2>&1; then
docker network create containers_default
fi
docker run -d --name charon-debug --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network containers_default -p 80:80 -p 443:443 -p 8080:8080 -p 2019:2019 -p 2345:2345 \
-e CHARON_ENV=development -e CHARON_DEBUG=1 -e CHARON_HTTP_PORT=8080 -e CHARON_DB_PATH=/app/data/charon.db -e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 -e CHARON_CADDY_CONFIG_DIR=/app/data/caddy -e CHARON_CADDY_BINARY=caddy -e CHARON_IMPORT_CADDYFILE=/import/Caddyfile \
-e CHARON_IMPORT_DIR=/app/data/imports -e CHARON_ACME_STAGING=false -e FEATURE_CERBERUS_ENABLED=true \
-v charon_data:/app/data -v caddy_data:/data -v caddy_config:/config -v /var/run/docker.sock:/var/run/docker.sock:ro charon:local
echo "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f http://localhost:8080/api/v1/ >/dev/null 2>&1; then
break
fi
echo -n '.'
sleep 1
done
echo "Registering admin user and logging in..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" -d '{"email":"integration@example.local","password":"password123","name":"Integration Tester"}' http://localhost:8080/api/v1/auth/register >/dev/null || true
curl -s -X POST -H "Content-Type: application/json" -d '{"email":"integration@example.local","password":"password123"}' -c ${TMP_COOKIE} http://localhost:8080/api/v1/auth/login >/dev/null
# Check hub availability first
echo "Checking CrowdSec Hub availability..."
HUB_AVAILABLE=false
if curl -sf --max-time 10 "https://hub-data.crowdsec.net/api/index.json" > /dev/null 2>&1; then
HUB_AVAILABLE=true
echo "✓ CrowdSec Hub is available"
else
echo "⚠ CrowdSec Hub is unavailable - skipping hub preset tests"
fi
# Only test hub presets if hub is available
if [ "$HUB_AVAILABLE" = true ]; then
echo "Pulled presets list..."
LIST=$(curl -s -H "Content-Type: application/json" -b ${TMP_COOKIE} http://localhost:8080/api/v1/admin/crowdsec/presets)
echo "$LIST" | jq -r .presets | head -20
SLUG="bot-mitigation-essentials"
echo "Pulling preset $SLUG"
PULL_RESP=$(curl -s -X POST -H "Content-Type: application/json" -d '{"slug":"'${SLUG}'"}' -b ${TMP_COOKIE} http://localhost:8080/api/v1/admin/crowdsec/presets/pull)
echo "Pull response: $PULL_RESP"
if ! echo "$PULL_RESP" | jq -e .status >/dev/null 2>&1; then
echo "Pull failed: $PULL_RESP"
exit 1
fi
if [ "$(echo "$PULL_RESP" | jq -r .status)" != "pulled" ]; then
echo "Unexpected pull status: $(echo $PULL_RESP | jq -r .status)"
exit 1
fi
CACHE_KEY=$(echo "$PULL_RESP" | jq -r .cache_key)
echo "Applying preset $SLUG"
APPLY_RESP=$(curl -s -X POST -H "Content-Type: application/json" -d '{"slug":"'${SLUG}'"}' -b ${TMP_COOKIE} http://localhost:8080/api/v1/admin/crowdsec/presets/apply)
echo "Apply response: $APPLY_RESP"
if ! echo "$APPLY_RESP" | jq -e .status >/dev/null 2>&1; then
echo "Apply failed: $APPLY_RESP"
exit 1
fi
if [ "$(echo "$APPLY_RESP" | jq -r .status)" != "applied" ]; then
echo "Unexpected apply status: $(echo $APPLY_RESP | jq -r .status)"
exit 1
fi
fi
echo "Cleanup and exit"
docker rm -f charon-debug >/dev/null 2>&1 || true
rm -f ${TMP_COOKIE}
echo "Done"

View File

@@ -1,338 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec-startup
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh integration-test-crowdsec-startup" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Brief: Focused integration test for CrowdSec startup in Charon container
# This test verifies that CrowdSec can start successfully without the fatal
# "no datasource enabled" error, which indicates a missing or empty acquis.yaml.
#
# Steps:
# 1. Build charon:local image if not present
# 2. Start container with CERBERUS_SECURITY_CROWDSEC_MODE=local
# 3. Wait for initialization (30 seconds)
# 4. Check for fatal errors
# 5. Check LAPI health
# 6. Check acquisition config
# 7. Check installed parsers/scenarios
# 8. Output clear PASS/FAIL results
# 9. Clean up container
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Configuration
# ============================================================================
CONTAINER_NAME="charon-crowdsec-startup-test"
INIT_WAIT_SECONDS=30
# Use unique ports to avoid conflicts with running Charon
API_PORT=8580
HTTP_PORT=8480
HTTPS_PORT=8443
# ============================================================================
# Colors for output
# ============================================================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_test() { echo -e "${BLUE}[TEST]${NC} $1"; }
# ============================================================================
# Test counters
# ============================================================================
PASSED=0
FAILED=0
CRITICAL_FAILURE=false
pass_test() {
PASSED=$((PASSED + 1))
echo -e " ${GREEN}✓ PASS${NC}"
}
fail_test() {
FAILED=$((FAILED + 1))
echo -e " ${RED}✗ FAIL${NC}: $1"
}
critical_fail() {
FAILED=$((FAILED + 1))
CRITICAL_FAILURE=true
echo -e " ${RED}✗ CRITICAL FAIL${NC}: $1"
}
# ============================================================================
# Cleanup function
# ============================================================================
cleanup() {
log_info "Cleaning up test resources..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
# Clean up test volumes
docker volume rm charon_crowdsec_startup_data 2>/dev/null || true
docker volume rm caddy_crowdsec_startup_data 2>/dev/null || true
docker volume rm caddy_crowdsec_startup_config 2>/dev/null || true
log_info "Cleanup complete"
}
# Set up trap for cleanup on exit (success or failure)
trap cleanup EXIT
echo "=============================================="
echo "=== CrowdSec Startup Integration Test ==="
echo "=============================================="
echo ""
# ============================================================================
# Step 1: Check dependencies
# ============================================================================
log_info "Checking dependencies..."
if ! command -v docker >/dev/null 2>&1; then
log_error "docker is not available; aborting"
exit 1
fi
# ============================================================================
# Step 2: Build image if needed
# ============================================================================
if ! docker image inspect charon:local >/dev/null 2>&1; then
log_info "Building charon:local image..."
docker build -t charon:local .
else
log_info "Using existing charon:local image"
fi
# ============================================================================
# Step 3: Clean up any existing container
# ============================================================================
log_info "Stopping any existing test containers..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
# ============================================================================
# Step 4: Start container with CrowdSec enabled
# ============================================================================
log_info "Starting Charon container with CERBERUS_SECURITY_CROWDSEC_MODE=local..."
docker run -d --name ${CONTAINER_NAME} \
-p ${HTTP_PORT}:80 \
-p ${HTTPS_PORT}:443 \
-p ${API_PORT}:8080 \
-e CHARON_ENV=development \
-e CHARON_DEBUG=1 \
-e FEATURE_CERBERUS_ENABLED=true \
-e CERBERUS_SECURITY_CROWDSEC_MODE=local \
-v charon_crowdsec_startup_data:/app/data \
-v caddy_crowdsec_startup_data:/data \
-v caddy_crowdsec_startup_config:/config \
charon:local
log_info "Waiting ${INIT_WAIT_SECONDS} seconds for CrowdSec to initialize..."
sleep ${INIT_WAIT_SECONDS}
echo ""
echo "=============================================="
echo "=== Running CrowdSec Startup Checks ==="
echo "=============================================="
echo ""
# ============================================================================
# Test 1: Check for fatal "no datasource enabled" error
# ============================================================================
log_test "Check 1: No fatal 'no datasource enabled' error"
FATAL_ERROR_COUNT=$(docker logs ${CONTAINER_NAME} 2>&1 | grep -c "no datasource enabled" || echo "0")
if [ "$FATAL_ERROR_COUNT" -ge 1 ]; then
critical_fail "Found fatal 'no datasource enabled' error - acquis.yaml is missing or empty"
echo ""
echo "=== Relevant Container Logs ==="
docker logs ${CONTAINER_NAME} 2>&1 | grep -i "crowdsec\|acquis\|datasource\|fatal" | tail -20
echo ""
else
log_info " No 'no datasource enabled' fatal error found"
pass_test
fi
# ============================================================================
# Test 2: Check LAPI health endpoint
# ============================================================================
log_test "Check 2: CrowdSec LAPI health (127.0.0.1:8085/health)"
# Use docker exec to check LAPI health from inside the container
LAPI_HEALTH=$(docker exec ${CONTAINER_NAME} curl -sf http://127.0.0.1:8085/health 2>/dev/null || echo "FAILED")
if [ "$LAPI_HEALTH" != "FAILED" ] && [ -n "$LAPI_HEALTH" ]; then
log_info " LAPI is healthy"
log_info " Response: $LAPI_HEALTH"
pass_test
else
fail_test "LAPI health check failed (port 8085 not responding)"
# This could be expected if CrowdSec binary is not in the image
log_warn " This may be expected if CrowdSec binary is not installed"
fi
# ============================================================================
# Test 3: Check acquisition config exists and has datasource
# ============================================================================
log_test "Check 3: Acquisition config exists and has 'source:' definition"
ACQUIS_CONTENT=$(docker exec ${CONTAINER_NAME} cat /etc/crowdsec/acquis.yaml 2>/dev/null || echo "")
if [ -z "$ACQUIS_CONTENT" ]; then
critical_fail "acquis.yaml does not exist or is empty"
else
SOURCE_COUNT=$(echo "$ACQUIS_CONTENT" | grep -c "source:" || echo "0")
if [ "$SOURCE_COUNT" -ge 1 ]; then
log_info " acquis.yaml found with $SOURCE_COUNT datasource definition(s)"
echo ""
echo " --- acquis.yaml content ---"
echo "$ACQUIS_CONTENT" | head -15 | sed 's/^/ /'
echo " ---"
echo ""
pass_test
else
critical_fail "acquis.yaml exists but has no 'source:' definition"
echo " Content:"
echo "$ACQUIS_CONTENT" | head -10 | sed 's/^/ /'
fi
fi
# ============================================================================
# Test 4: Check for installed parsers
# ============================================================================
log_test "Check 4: Installed parsers (at least one expected)"
PARSERS_OUTPUT=$(docker exec ${CONTAINER_NAME} cscli parsers list 2>&1 || echo "CSCLI_NOT_AVAILABLE")
if [ "$PARSERS_OUTPUT" = "CSCLI_NOT_AVAILABLE" ]; then
log_warn " cscli command not available - cannot check parsers"
# Not a failure - cscli may not be in the image
pass_test
elif echo "$PARSERS_OUTPUT" | grep -q "PARSERS"; then
# cscli output includes "PARSERS" header
PARSER_COUNT=$(echo "$PARSERS_OUTPUT" | grep -c "✔" || echo "0")
if [ "$PARSER_COUNT" -ge 1 ]; then
log_info " Found $PARSER_COUNT installed parser(s)"
echo "$PARSERS_OUTPUT" | head -10 | sed 's/^/ /'
pass_test
else
log_warn " No parsers installed (CrowdSec may not parse logs correctly)"
pass_test
fi
else
log_warn " Unexpected cscli output"
echo "$PARSERS_OUTPUT" | head -5 | sed 's/^/ /'
pass_test
fi
# ============================================================================
# Test 5: Check for installed scenarios
# ============================================================================
log_test "Check 5: Installed scenarios (at least one expected)"
SCENARIOS_OUTPUT=$(docker exec ${CONTAINER_NAME} cscli scenarios list 2>&1 || echo "CSCLI_NOT_AVAILABLE")
if [ "$SCENARIOS_OUTPUT" = "CSCLI_NOT_AVAILABLE" ]; then
log_warn " cscli command not available - cannot check scenarios"
pass_test
elif echo "$SCENARIOS_OUTPUT" | grep -q "SCENARIOS"; then
SCENARIO_COUNT=$(echo "$SCENARIOS_OUTPUT" | grep -c "✔" || echo "0")
if [ "$SCENARIO_COUNT" -ge 1 ]; then
log_info " Found $SCENARIO_COUNT installed scenario(s)"
echo "$SCENARIOS_OUTPUT" | head -10 | sed 's/^/ /'
pass_test
else
log_warn " No scenarios installed (CrowdSec may not detect attacks)"
pass_test
fi
else
log_warn " Unexpected cscli output"
echo "$SCENARIOS_OUTPUT" | head -5 | sed 's/^/ /'
pass_test
fi
# ============================================================================
# Test 6: Check CrowdSec process is running (if expected)
# ============================================================================
log_test "Check 6: CrowdSec process running"
CROWDSEC_PID=$(docker exec ${CONTAINER_NAME} pgrep -f "crowdsec" 2>/dev/null || echo "")
if [ -n "$CROWDSEC_PID" ]; then
log_info " CrowdSec process is running (PID: $CROWDSEC_PID)"
pass_test
else
log_warn " CrowdSec process not found (may not be installed or may have crashed)"
# Check if crowdsec binary exists
CROWDSEC_BIN=$(docker exec ${CONTAINER_NAME} which crowdsec 2>/dev/null || echo "")
if [ -z "$CROWDSEC_BIN" ]; then
log_warn " crowdsec binary not found in container"
fi
pass_test
fi
# ============================================================================
# Show last container logs for debugging
# ============================================================================
echo ""
echo "=== Container Logs (last 30 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -30
echo ""
# ============================================================================
# Results Summary
# ============================================================================
echo ""
echo "=============================================="
echo "=== CrowdSec Startup Test Results ==="
echo "=============================================="
echo ""
echo -e " ${GREEN}Passed:${NC} $PASSED"
echo -e " ${RED}Failed:${NC} $FAILED"
echo ""
if [ "$CRITICAL_FAILURE" = "true" ]; then
echo -e "${RED}=============================================="
echo "=== CRITICAL: CrowdSec STARTUP BROKEN ==="
echo "==============================================${NC}"
echo ""
echo "CrowdSec cannot start properly. The 'no datasource enabled' error"
echo "indicates that acquis.yaml is missing or has no datasource definitions."
echo ""
echo "To fix:"
echo " 1. Ensure configs/crowdsec/acquis.yaml exists with 'source:' definition"
echo " 2. Ensure Dockerfile copies acquis.yaml to /etc/crowdsec.dist/"
echo " 3. Ensure .docker/docker-entrypoint.sh copies configs to /etc/crowdsec/"
echo ""
exit 1
fi
if [ $FAILED -eq 0 ]; then
echo "=============================================="
echo "=== ALL CROWDSEC STARTUP TESTS PASSED ==="
echo "=============================================="
echo ""
exit 0
else
echo "=============================================="
echo "=== CROWDSEC STARTUP TESTS FAILED ==="
echo "=============================================="
echo ""
exit 1
fi

View File

@@ -1,365 +0,0 @@
#!/usr/bin/env bash
# ==============================================================================
# Charon Database Recovery Script
# ==============================================================================
# This script performs database integrity checks and recovery operations for
# the Charon SQLite database. It can detect corruption, create backups, and
# attempt to recover data using SQLite's .dump command.
#
# Usage: ./scripts/db-recovery.sh [--force]
# --force: Skip confirmation prompts
#
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh utility-db-recovery
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh utility-db-recovery" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Exit codes:
# 0 - Success (database healthy or recovered)
# 1 - Failure (recovery failed or prerequisites missing)
# ==============================================================================
set -euo pipefail
# Configuration
DOCKER_DB_PATH="/app/data/charon.db"
LOCAL_DB_PATH="backend/data/charon.db"
BACKUP_DIR=""
DB_PATH=""
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
FORCE_MODE=false
# Colors for output (disabled if not a terminal)
if [ -t 1 ]; then
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
else
RED=''
GREEN=''
YELLOW=''
BLUE=''
NC=''
fi
# ==============================================================================
# Helper Functions
# ==============================================================================
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if sqlite3 is available
check_prerequisites() {
if ! command -v sqlite3 &> /dev/null; then
log_error "sqlite3 is not installed or not in PATH"
log_info "Install with: apt-get install sqlite3 (Debian/Ubuntu)"
log_info " or: apk add sqlite (Alpine)"
log_info " or: brew install sqlite (macOS)"
exit 1
fi
log_info "sqlite3 found: $(sqlite3 --version)"
}
# Detect environment (Docker vs Local)
detect_environment() {
if [ -f "$DOCKER_DB_PATH" ]; then
DB_PATH="$DOCKER_DB_PATH"
BACKUP_DIR="/app/data/backups"
log_info "Running in Docker environment"
elif [ -f "$LOCAL_DB_PATH" ]; then
DB_PATH="$LOCAL_DB_PATH"
BACKUP_DIR="backend/data/backups"
log_info "Running in local development environment"
else
log_error "Database not found at expected locations:"
log_error " - Docker: $DOCKER_DB_PATH"
log_error " - Local: $LOCAL_DB_PATH"
exit 1
fi
log_info "Database path: $DB_PATH"
}
# Create backup directory if it doesn't exist
ensure_backup_dir() {
if [ ! -d "$BACKUP_DIR" ]; then
mkdir -p "$BACKUP_DIR"
log_info "Created backup directory: $BACKUP_DIR"
fi
}
# Create a timestamped backup of the current database
create_backup() {
local backup_file="${BACKUP_DIR}/charon_backup_${TIMESTAMP}.db"
log_info "Creating backup: $backup_file"
cp "$DB_PATH" "$backup_file"
# Also backup WAL and SHM files if they exist
if [ -f "${DB_PATH}-wal" ]; then
cp "${DB_PATH}-wal" "${backup_file}-wal"
log_info "Backed up WAL file"
fi
if [ -f "${DB_PATH}-shm" ]; then
cp "${DB_PATH}-shm" "${backup_file}-shm"
log_info "Backed up SHM file"
fi
log_success "Backup created successfully"
echo "$backup_file"
}
# Run SQLite integrity check
run_integrity_check() {
log_info "Running SQLite integrity check..."
local result
result=$(sqlite3 "$DB_PATH" "PRAGMA integrity_check;" 2>&1) || true
echo "$result"
if [ "$result" = "ok" ]; then
return 0
else
return 1
fi
}
# Attempt to recover database using .dump
recover_database() {
local dump_file="${BACKUP_DIR}/charon_dump_${TIMESTAMP}.sql"
local recovered_db="${BACKUP_DIR}/charon_recovered_${TIMESTAMP}.db"
log_info "Attempting database recovery..."
# Export database using .dump (works even with some corruption)
log_info "Exporting database via .dump command..."
if ! sqlite3 "$DB_PATH" ".dump" > "$dump_file" 2>&1; then
log_error "Failed to export database dump"
return 1
fi
log_success "Database dump created: $dump_file"
# Check if dump file has content
if [ ! -s "$dump_file" ]; then
log_error "Dump file is empty - no data to recover"
return 1
fi
# Create new database from dump
log_info "Creating new database from dump..."
if ! sqlite3 "$recovered_db" < "$dump_file" 2>&1; then
log_error "Failed to create database from dump"
return 1
fi
log_success "Recovered database created: $recovered_db"
# Verify recovered database integrity
log_info "Verifying recovered database integrity..."
local verify_result
verify_result=$(sqlite3 "$recovered_db" "PRAGMA integrity_check;" 2>&1) || true
if [ "$verify_result" != "ok" ]; then
log_error "Recovered database failed integrity check"
log_error "Result: $verify_result"
return 1
fi
log_success "Recovered database passed integrity check"
# Replace original with recovered database
log_info "Replacing original database with recovered version..."
# Remove old WAL/SHM files first
rm -f "${DB_PATH}-wal" "${DB_PATH}-shm"
# Move recovered database to original location
mv "$recovered_db" "$DB_PATH"
log_success "Database replaced successfully"
return 0
}
# Enable WAL mode on database
enable_wal_mode() {
log_info "Enabling WAL (Write-Ahead Logging) mode..."
local current_mode
current_mode=$(sqlite3 "$DB_PATH" "PRAGMA journal_mode;" 2>&1) || true
if [ "$current_mode" = "wal" ]; then
log_info "WAL mode already enabled"
return 0
fi
if sqlite3 "$DB_PATH" "PRAGMA journal_mode=WAL;" > /dev/null 2>&1; then
log_success "WAL mode enabled"
return 0
else
log_warn "Failed to enable WAL mode (database may be locked)"
return 1
fi
}
# Cleanup old backups (keep last 10)
cleanup_old_backups() {
log_info "Cleaning up old backups (keeping last 10)..."
local backup_count
backup_count=$(find "$BACKUP_DIR" -name "charon_backup_*.db" -type f 2>/dev/null | wc -l)
if [ "$backup_count" -gt 10 ]; then
find "$BACKUP_DIR" -name "charon_backup_*.db" -type f -printf '%T@ %p\n' 2>/dev/null | \
sort -n | head -n -10 | cut -d' ' -f2- | \
while read -r file; do
rm -f "$file" "${file}-wal" "${file}-shm"
log_info "Removed old backup: $file"
done
fi
}
# Parse command line arguments
parse_args() {
while [ $# -gt 0 ]; do
case "$1" in
--force|-f)
FORCE_MODE=true
shift
;;
--help|-h)
echo "Usage: $0 [--force]"
echo ""
echo "Options:"
echo " --force, -f Skip confirmation prompts"
echo " --help, -h Show this help message"
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
}
# ==============================================================================
# Main Script
# ==============================================================================
main() {
echo "=============================================="
echo " Charon Database Recovery Tool"
echo "=============================================="
echo ""
parse_args "$@"
# Step 1: Check prerequisites
check_prerequisites
# Step 2: Detect environment
detect_environment
# Step 3: Ensure backup directory exists
ensure_backup_dir
# Step 4: Create backup before any operations
local backup_file
backup_file=$(create_backup)
echo ""
# Step 5: Run integrity check
echo "=============================================="
echo " Integrity Check Results"
echo "=============================================="
local integrity_result
if integrity_result=$(run_integrity_check); then
echo "$integrity_result"
log_success "Database integrity check passed!"
echo ""
# Even if healthy, ensure WAL mode is enabled
enable_wal_mode
# Cleanup old backups
cleanup_old_backups
echo ""
echo "=============================================="
echo " Summary"
echo "=============================================="
log_success "Database is healthy"
log_info "Backup stored at: $backup_file"
exit 0
fi
# Database has issues
echo "$integrity_result"
log_error "Database integrity check FAILED"
echo ""
# Step 6: Confirm recovery (unless force mode)
if [ "$FORCE_MODE" != "true" ]; then
echo -e "${YELLOW}WARNING: Database corruption detected!${NC}"
echo "This script will attempt to recover the database."
echo "A backup has already been created at: $backup_file"
echo ""
read -p "Continue with recovery? (y/N): " -r confirm
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
log_info "Recovery cancelled by user"
exit 1
fi
fi
# Step 7: Attempt recovery
echo ""
echo "=============================================="
echo " Recovery Process"
echo "=============================================="
if recover_database; then
# Step 8: Enable WAL mode on recovered database
enable_wal_mode
# Cleanup old backups
cleanup_old_backups
echo ""
echo "=============================================="
echo " Summary"
echo "=============================================="
log_success "Database recovery completed successfully!"
log_info "Original backup: $backup_file"
log_info "Please restart the Charon application"
exit 0
else
echo ""
echo "=============================================="
echo " Summary"
echo "=============================================="
log_error "Database recovery FAILED"
log_info "Your original database backup is at: $backup_file"
log_info "SQL dump (if created) is in: $BACKUP_DIR"
log_info "Manual intervention may be required"
exit 1
fi
}
# Run main function with all arguments
main "$@"

View File

@@ -1,23 +0,0 @@
import sqlite3
import os
db_path = '/projects/Charon/backend/data/charon.db'
if not os.path.exists(db_path):
print(f"Database not found at {db_path}")
exit(1)
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute("SELECT id, domain_names, forward_host, forward_port FROM proxy_hosts")
rows = cursor.fetchall()
print("Proxy Hosts:")
for row in rows:
print(f"ID: {row[0]}, Domains: {row[1]}, ForwardHost: {row[2]}, Port: {row[3]}")
conn.close()
except Exception as e:
print(f"Error: {e}")

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Debug script to check rate limit configuration
echo "=== Starting debug container ==="
docker rm -f charon-debug 2>/dev/null || true
docker run -d --name charon-debug \
--network containers_default \
-p 8180:80 -p 8280:8080 -p 2119:2019 \
-e CHARON_ENV=development \
charon:local
sleep 10
echo ""
echo "=== Registering user ==="
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"debug@test.local","password":"pass123","name":"Debug"}' \
http://localhost:8280/api/v1/auth/register >/dev/null || true
echo "=== Logging in ==="
TOKEN=$(curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"debug@test.local","password":"pass123"}' \
-c /tmp/debug-cookie \
http://localhost:8280/api/v1/auth/login | jq -r '.token // empty')
echo ""
echo "=== Current security status (before config) ==="
curl -s -b /tmp/debug-cookie http://localhost:8280/api/v1/security/status | jq .
echo ""
echo "=== Setting security config ==="
curl -s -X POST -H "Content-Type: application/json" \
-d '{
"name": "default",
"enabled": true,
"rate_limit_enable": true,
"rate_limit_requests": 3,
"rate_limit_window_sec": 10,
"rate_limit_burst": 1,
"admin_whitelist": "0.0.0.0/0"
}' \
-b /tmp/debug-cookie \
http://localhost:8280/api/v1/security/config | jq .
echo ""
echo "=== Waiting for config to apply ==="
sleep 5
echo ""
echo "=== Security status (after config) ==="
curl -s -b /tmp/debug-cookie http://localhost:8280/api/v1/security/status | jq .
echo ""
echo "=== Security config from DB ==="
curl -s -b /tmp/debug-cookie http://localhost:8280/api/v1/security/config | jq .
echo ""
echo "=== Caddy config (checking for rate_limit handler) ==="
curl -s http://localhost:2119/config/ | jq '.apps.http.servers.charon_server.routes[0].handle // []' | grep -i rate_limit || echo "No rate_limit handler found"
echo ""
echo "=== Full Caddy route handlers ==="
curl -s http://localhost:2119/config/ | jq '.apps.http.servers.charon_server.routes[0].handle // []'
echo ""
echo "=== Container logs (last 50 lines) ==="
docker logs charon-debug 2>&1 | tail -50
echo ""
echo "=== Cleanup ==="
docker rm -f charon-debug
rm -f /tmp/debug-cookie

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh test-frontend-coverage
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh test-frontend-coverage" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
FRONTEND_DIR="$ROOT_DIR/frontend"
MIN_COVERAGE="${CHARON_MIN_COVERAGE:-${CPM_MIN_COVERAGE:-85}}"
cd "$FRONTEND_DIR"
# Ensure dependencies are installed for CI runs
npm ci --silent
# Ensure coverage output directories exist to avoid intermittent ENOENT errors
mkdir -p coverage/.tmp
# Run tests with coverage and json-summary reporter (force istanbul provider)
# Using istanbul ensures json-summary and coverage-summary artifacts are produced
# so that downstream checks can parse them reliably.
npm run test:coverage -- --run
SUMMARY_FILE="coverage/coverage-summary.json"
if [ ! -f "$SUMMARY_FILE" ]; then
echo "Error: Coverage summary file not found at $SUMMARY_FILE"
exit 1
fi
# Extract total statements percentage using python
TOTAL_PERCENT=$(python3 -c "import json; print(json.load(open('$SUMMARY_FILE'))['total']['statements']['pct'])")
echo "Computed frontend coverage: ${TOTAL_PERCENT}% (minimum required ${MIN_COVERAGE}%)"
python3 - <<PY
import os, sys
from decimal import Decimal
total = Decimal('$TOTAL_PERCENT')
minimum = Decimal('$MIN_COVERAGE')
if total < minimum:
print(f"Frontend coverage {total}% is below required {minimum}% (set CHARON_MIN_COVERAGE or CPM_MIN_COVERAGE to override)", file=sys.stderr)
sys.exit(1)
PY
echo "Frontend coverage requirement met"

View File

@@ -1,125 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh test-backend-coverage
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh test-backend-coverage" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
BACKEND_DIR="$ROOT_DIR/backend"
COVERAGE_FILE="$BACKEND_DIR/coverage.txt"
MIN_COVERAGE="${CHARON_MIN_COVERAGE:-${CPM_MIN_COVERAGE:-85}}"
# Perf asserts are sensitive to -race overhead; loosen defaults for hook runs
export PERF_MAX_MS_GETSTATUS_P95="${PERF_MAX_MS_GETSTATUS_P95:-25ms}"
export PERF_MAX_MS_GETSTATUS_P95_PARALLEL="${PERF_MAX_MS_GETSTATUS_P95_PARALLEL:-50ms}"
export PERF_MAX_MS_LISTDECISIONS_P95="${PERF_MAX_MS_LISTDECISIONS_P95:-75ms}"
# trap 'rm -f "$COVERAGE_FILE"' EXIT
cd "$BACKEND_DIR"
# Packages to exclude from coverage (main packages and infrastructure code)
# These are entrypoints and initialization code that don't benefit from unit tests
EXCLUDE_PACKAGES=(
"github.com/Wikid82/charon/backend/cmd/api"
"github.com/Wikid82/charon/backend/cmd/seed"
"github.com/Wikid82/charon/backend/internal/logger"
"github.com/Wikid82/charon/backend/internal/metrics"
"github.com/Wikid82/charon/backend/internal/trace"
"github.com/Wikid82/charon/backend/integration"
"github.com/Wikid82/charon/backend/pkg/dnsprovider/builtin"
)
# Try to run tests to produce coverage file; some toolchains may return a non-zero
# exit if certain coverage tooling is unavailable (e.g. covdata) while still
# producing a usable coverage file. Capture the status so we can report real
# test failures after the coverage check.
# Note: Using -v for verbose output and -race for race detection
GO_TEST_STATUS=0
if command -v gotestsum &> /dev/null; then
if ! gotestsum --format pkgname -- -race -mod=readonly -coverprofile="$COVERAGE_FILE" ./...; then
GO_TEST_STATUS=$?
fi
else
if ! go test -race -v -mod=readonly -coverprofile="$COVERAGE_FILE" ./...; then
GO_TEST_STATUS=$?
fi
fi
if [ "$GO_TEST_STATUS" -ne 0 ]; then
echo "Warning: go test returned non-zero (status ${GO_TEST_STATUS}); checking coverage file presence"
fi
# Filter out excluded packages from coverage file
if [ -f "$COVERAGE_FILE" ]; then
echo "Filtering excluded packages from coverage report..."
FILTERED_COVERAGE="${COVERAGE_FILE}.filtered"
# Build sed command with all patterns at once (more efficient than loop)
SED_PATTERN=""
for pkg in "${EXCLUDE_PACKAGES[@]}"; do
if [ -z "$SED_PATTERN" ]; then
SED_PATTERN="\|^${pkg}|d"
else
SED_PATTERN="${SED_PATTERN};\|^${pkg}|d"
fi
done
# Use non-blocking sed with explicit input/output (avoids -i hang issues)
timeout 30 sed "$SED_PATTERN" "$COVERAGE_FILE" > "$FILTERED_COVERAGE" || {
echo "Error: Coverage filtering failed or timed out"
echo "Using unfiltered coverage file"
cp "$COVERAGE_FILE" "$FILTERED_COVERAGE"
}
mv "$FILTERED_COVERAGE" "$COVERAGE_FILE"
echo "Coverage filtering complete"
fi
if [ ! -f "$COVERAGE_FILE" ]; then
echo "Error: coverage file not generated by go test"
exit 1
fi
# Generate coverage report once with timeout protection
# NOTE: Large repos can produce big coverage profiles; allow more time for parsing.
COVERAGE_OUTPUT=$(timeout 180 go tool cover -func="$COVERAGE_FILE" 2>&1) || {
echo "Error: go tool cover failed or timed out after 180 seconds"
echo "This may indicate corrupted coverage data or memory issues"
exit 1
}
# Extract and display the summary line (total coverage)
TOTAL_LINE=$(echo "$COVERAGE_OUTPUT" | awk '/^total:/ {line=$0} END {print line}')
echo "$TOTAL_LINE"
# Extract total coverage percentage
TOTAL_PERCENT=$(echo "$TOTAL_LINE" | awk '{print substr($3, 1, length($3)-1)}')
echo "Computed coverage: ${TOTAL_PERCENT}% (minimum required ${MIN_COVERAGE}%)"
export TOTAL_PERCENT
export MIN_COVERAGE
python3 - <<'PY'
import os, sys
from decimal import Decimal
total = Decimal(os.environ['TOTAL_PERCENT'])
minimum = Decimal(os.environ['MIN_COVERAGE'])
if total < minimum:
print(f"Coverage {total}% is below required {minimum}% (set CHARON_MIN_COVERAGE or CPM_MIN_COVERAGE to override)", file=sys.stderr)
sys.exit(1)
PY
echo "Coverage requirement met"
# Bubble up real test failures (after printing coverage info) so pre-commit
# reflects the actual test status.
if [ "$GO_TEST_STATUS" -ne 0 ]; then
exit "$GO_TEST_STATUS"
fi

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
OUT_DIR="/tmp/charon-gopls-logs-$(date +%s)"
mkdir -p "$OUT_DIR"
echo "Collecting gopls debug output to $OUT_DIR"
if ! command -v gopls >/dev/null 2>&1; then
echo "gopls not found in PATH. Install with: go install golang.org/x/tools/gopls@latest"
exit 2
fi
cd "$ROOT_DIR/backend"
echo "Running: gopls -rpc.trace -v check ./... > $OUT_DIR/gopls.log 2>&1"
gopls -rpc.trace -v check ./... > "$OUT_DIR/gopls.log" 2>&1 || true
echo "Also collecting 'go env' and 'go version'"
go version > "$OUT_DIR/go-version.txt" 2>&1 || true
go env > "$OUT_DIR/go-env.txt" 2>&1 || true
echo "Logs collected at: $OUT_DIR"
echo "Attach the $OUT_DIR contents when filing issues against golang/vscode-go or gopls."

View File

@@ -1 +0,0 @@
Triggered re-run by automation on 2025-12-09T14:32:02Z

View File

@@ -1,42 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
usage() {
cat <<EOF
Usage: $0
Lists branches and tags, saves a tag reference tarball to data/backups.
EOF
}
if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ]; then
usage; exit 0
fi
logdir="data/backups"
mkdir -p "$logdir"
ts=$(date +"%Y%m%d-%H%M%S")
tags_tar="$logdir/tags-$ts.tar.gz"
echo "Branches:"
git branch -a || true
echo "Tags:"
git tag -l || true
tmpdir=$(mktemp -d)
git show-ref --tags > "$tmpdir/tags-show-ref.txt" || true
tar -C "$tmpdir" -czf "$tags_tar" . || { echo "Warning: failed to create tag tarball" >&2; rm -rf "$tmpdir"; exit 1; }
rm -rf "$tmpdir"
echo "Created tags tarball: $tags_tar"
echo "Attempting to push tags to origin under refs/backups/tags/*"
for t in $(git tag --list); do
if ! git push origin "refs/tags/$t:refs/backups/tags/$t" >/dev/null 2>&1; then
echo "Warning: pushing tag $t to refs/backups/tags/$t failed" >&2
fi
done
echo "Done."
exit 0

View File

@@ -1,231 +0,0 @@
#!/usr/bin/env bash
# Bash script to safely preview and optionally run a git history rewrite
set -euo pipefail
IFS=$'\n\t'
# Default values
DRY_RUN=1
FORCE=0
NON_INTERACTIVE=0
PATHS="backend/codeql-db,codeql-db,codeql-db-js,codeql-db-go"
STRIP_SIZE=50
usage() {
cat <<EOF
Usage: $0 [--dry-run] [--force] [--paths 'p1,p2'] [--strip-size N]
Options:
--dry-run (default) Show what would be removed; no changes are made.
--force Run rewrite (destructive). Requires manual confirmation.
--paths Comma-separated list of paths to remove from history.
--strip-size Strip blobs larger than N MB in the history.
--help Show this help and exit.
Example:
$0 --dry-run --paths 'backend/codeql-db,codeql-db' --strip-size 50
$0 --force --paths 'backend/codeql-db' --strip-size 100
EOF
}
check_requirements() {
if ! command -v git >/dev/null 2>&1; then
echo "git is required but not found. Aborting." >&2
exit 1
fi
if ! command -v git-filter-repo >/dev/null 2>&1; then
echo "git-filter-repo not found. Please install it:"
echo " - Debian/Ubuntu: sudo apt install git-filter-repo"
echo " - Mac (Homebrew): brew install git-filter-repo"
echo " - Python pip: pip install git-filter-repo"
echo "Or see https://github.com/newren/git-filter-repo for details."
exit 2
fi
}
timestamp() {
# POSIX-friendly timestamp
date +"%Y%m%d-%H%M%S"
}
logdir="data/backups"
mkdir -p "$logdir"
logfile="$logdir/history_cleanup-$(timestamp).log"
echo "Starting history cleanup tool at $(date)" | tee "$logfile"
while [ "$#" -gt 0 ]; do
case "$1" in
--dry-run)
DRY_RUN=1; shift;;
--force)
DRY_RUN=0; FORCE=1; shift;;
--non-interactive)
NON_INTERACTIVE=1; shift;;
--paths)
PATHS="$2"; shift 2;;
--strip-size)
STRIP_SIZE="$2"; shift 2;;
--help)
usage; exit 0;;
*)
echo "Unknown option: $1" >&2; usage; exit 1;;
esac
done
check_requirements
# Reject shallow clones
if git rev-parse --is-shallow-repository >/dev/null 2>&1 && [ "$(git rev-parse --is-shallow-repository 2>/dev/null)" = "true" ]; then
echo "Shallow clone detected; fetch full history before rewriting history. Run: git fetch --unshallow or actions/checkout: fetch-depth: 0 in CI." | tee -a "$logfile"
exit 4
fi
current_branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "(detached)")
if [ "$current_branch" = "main" ] || [ "$current_branch" = "master" ]; then
if [ "$FORCE" -ne 1 ]; then
echo "Refusing to run on main/master branch. Switch to a feature branch and retry. To force running on main/master set FORCE=1" | tee -a "$logfile"
exit 3
fi
echo "WARNING: Running on main/master as FORCE=1 is set." | tee -a "$logfile"
fi
backup_branch="backup/history-$(timestamp)"
echo "Creating backup branch: $backup_branch" | tee -a "$logfile"
git branch -f "$backup_branch" || true
if ! git push origin "$backup_branch" >/dev/null 2>&1; then
echo "Error: Failed to push backup branch $backup_branch to origin. Aborting." | tee -a "$logfile"
exit 5
fi
IFS=','; set -f
paths_list=""
for p in $PATHS; do
# Expand shell expansion
paths_list="$paths_list $p"
done
set +f; unset IFS
echo "Paths targeted: $paths_list" | tee -a "$logfile"
echo "Strip blobs bigger than: ${STRIP_SIZE}M" | tee -a "$logfile"
# Ensure STRIP_SIZE is numeric
if ! printf '%s\n' "$STRIP_SIZE" | grep -Eq '^[0-9]+$'; then
echo "Error: --strip-size must be a numeric value (MB). Got: $STRIP_SIZE" | tee -a "$logfile"
exit 6
fi
preview_removals() {
echo "=== Preview: commits & blobs touching specified paths ===" | tee -a "$logfile"
# List commits that touch the paths
for p in $paths_list; do
echo "--- Path: $p" | tee -a "$logfile"
git rev-list --all -- "$p" | head -n 20 | tee -a "$logfile"
done
echo "=== End of commit preview ===" | tee -a "$logfile"
echo "=== Preview: objects in paths ===" | tee -a "$logfile"
# List objects for the given paths
for p in $paths_list; do
echo "Path: $p" | tee -a "$logfile"
git rev-list --objects --all -- "$p" | while read -r line; do
oid=$(printf '%s' "$line" | awk '{print $1}')
label=$(printf '%s' "$line" | awk '{print $2}')
type=$(git cat-file -t "$oid" 2>/dev/null || true)
if [ "$type" = "blob" ]; then
echo "$oid $label"
else
echo "[${type^^}] $oid $label"
fi
done | head -n 50 | tee -a "$logfile"
done
echo "=== Example large objects (candidate for --strip-size) ===" | tee -a "$logfile"
# List object sizes and show top N
git rev-list --objects --all | awk '{print $1}' | while read -r oid; do
size=$(git cat-file -s "$oid" 2>/dev/null || true)
if [ -n "$size" ] && [ "$size" -ge $((STRIP_SIZE * 1024 * 1024)) ]; then
echo "$oid size=$size"
fi
done | head -n 30 | tee -a "$logfile"
}
if [ "$DRY_RUN" -eq 1 ]; then
echo "Running dry-run mode. No destructive operations will be performed." | tee -a "$logfile"
preview_removals
echo "Dry-run complete. See $logfile for details." | tee -a "$logfile"
exit 0
fi
if [ "$FORCE" -ne 1 ]; then
echo "To run a destructive rewrite, pass --force. Aborting." | tee -a "$logfile"
exit 1
fi
echo "FORCE mode enabled - performing rewrite. This is destructive and will rewrite history." | tee -a "$logfile"
if [ "$NON_INTERACTIVE" -eq 0 ]; then
echo "Confirm operation: Type 'I UNDERSTAND' to proceed:" | tee -a "$logfile"
read -r confirmation
if [ "$confirmation" != "I UNDERSTAND" ]; then
echo "Confirmation not provided. Aborting." | tee -a "$logfile"
exit 1
fi
else
if [ "$FORCE" -ne 1 ]; then
echo "Error: Non-interactive mode requires FORCE=1 to proceed. Aborting." | tee -a "$logfile"
exit 1
fi
fi
## No additional branch check here; earlier check prevents running on main/master unless FORCE=1
# Build git-filter-repo arguments
paths_args=""
IFS=' '
for p in $paths_list; do
paths_args="$paths_args --paths $p"
done
set +f
echo "Running git filter-repo with: $paths_args --invert-paths --strip-blobs-bigger-than ${STRIP_SIZE}M" | tee -a "$logfile"
echo "Performing a local dry-run against a local clone before actual rewrite is strongly recommended." | tee -a "$logfile"
# shellcheck disable=SC2086
set -- $paths_args
git filter-repo --invert-paths "$@" --strip-blobs-bigger-than "${STRIP_SIZE}"M | tee -a "$logfile"
echo "Rewrite complete. Running post-rewrite checks..." | tee -a "$logfile"
git count-objects -vH | tee -a "$logfile"
git fsck --full | tee -a "$logfile"
git gc --aggressive --prune=now | tee -a "$logfile"
# Backup tags list as a tarball and try to push tags to a backup namespace
tags_tar="$logdir/tags-$(timestamp).tar.gz"
tmp_tags_dir=$(mktemp -d)
git for-each-ref --format='%(refname:short) %(objectname)' refs/tags > "$tmp_tags_dir/tags.txt"
tar -C "$tmp_tags_dir" -czf "$tags_tar" . || echo "Warning: failed to create tag tarball" | tee -a "$logfile"
rm -rf "$tmp_tags_dir"
echo "Created tags tarball: $tags_tar" | tee -a "$logfile"
echo "Attempting to push tags to origin under refs/backups/tags/*" | tee -a "$logfile"
for t in $(git tag --list); do
if ! git push origin "refs/tags/$t:refs/backups/tags/$t" >/dev/null 2>&1; then
echo "Warning: pushing tag $t to refs/backups/tags/$t failed" | tee -a "$logfile"
fi
done
echo "REWRITE DONE. Next steps (manual):" | tee -a "$logfile"
cat <<EOF | tee -a "$logfile"
- Verify repo locally and run CI checks: ./.venv/bin/pre-commit run --all-files
- Run backend tests: cd backend && go test ./...
- Run frontend build: cd frontend && npm run build
- Coordinate with maintainers prior to force-push. To finalize:
git push --all --force
git push --tags --force
- If anything goes wrong, restore from your backup branch: git checkout -b restore/$(date +"%Y%m%d-%H%M%S") $backup_branch
EOF
echo "Log saved to $logfile"
exit 0

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env bash
# Preview the list of commits and objects that would be removed by clean_history.sh
set -euo pipefail
IFS=$'\n\t'
PATHS="backend/codeql-db,codeql-db,codeql-db-js,codeql-db-go"
STRIP_SIZE=50
FORMAT="text"
usage() {
cat <<EOF
Usage: $0 [--paths 'p1,p2'] [--strip-size N]
Prints commits and objects that would be removed by a history rewrite.
EOF
}
while [ "$#" -gt 0 ]; do
case "$1" in
--paths)
PATHS="$2"; shift 2;;
--strip-size)
STRIP_SIZE="$2"; shift 2;;
--format)
FORMAT="$2"; shift 2;;
--help)
usage; exit 0;;
*)
echo "Unknown option: $1" >&2; usage; exit 1;;
esac
done
IFS=','; set -f
paths_list=""
for p in $PATHS; do
paths_list="$paths_list $p"
done
set +f; unset IFS
echo "Paths: $paths_list"
echo "Strip blobs larger than: ${STRIP_SIZE}M"
# Reject shallow clones
if git rev-parse --is-shallow-repository >/dev/null 2>&1 && [ "$(git rev-parse --is-shallow-repository 2>/dev/null)" = "true" ]; then
echo "Error: Shallow clone detected. Please run 'git fetch --unshallow' or use actions/checkout fetch-depth: 0 to fetch full history." >&2
exit 2
fi
# Ensure STRIP_SIZE is numeric
if ! printf '%s\n' "$STRIP_SIZE" | grep -Eq '^[0-9]+$'; then
echo "Error: --strip-size must be a numeric value (MB). Got: $STRIP_SIZE" >&2
exit 3
fi
if [ "$FORMAT" = "json" ]; then
printf '{"paths":['
first_path=true
for p in $paths_list; do
if [ "$first_path" = true ]; then
printf '"%s"' "$p"
first_path=false
else
printf ',"%s"' "$p"
fi
done
printf '],"strip_size":%s,"commits":{' "$STRIP_SIZE"
fi
echo "--- Commits touching specified paths ---"
for p in $paths_list; do
if [ "$FORMAT" = "json" ]; then
printf '"%s":[' "$p"
git rev-list --all -- "$p" | head -n 50 | awk '{printf "%s\n", $0}' | sed -n '1,50p' | awk '{printf "%s,", $0}' | sed 's/,$//'
printf '],'
else
echo "Path: $p"
git rev-list --all -- "$p" | nl -ba | sed -n '1,50p'
fi
done
if [ "$FORMAT" = "json" ]; then
printf '},"objects":['
for p in $paths_list; do
git rev-list --objects --all -- "$p" | head -n 100 | awk '{printf "\"%s\",", $1}' | sed 's/,$//'
done
printf '],'
else
echo "--- Objects in paths (blob objects shown; tags highlighted) ---"
for p in $paths_list; do
echo "Path: $p"
git rev-list --objects --all -- "$p" | while read -r line; do
oid=$(printf '%s' "$line" | awk '{print $1}')
label=$(printf '%s' "$line" | awk '{print $2}')
type=$(git cat-file -t "$oid" 2>/dev/null || true)
if [ "$type" = "blob" ]; then
echo "$oid $label"
else
echo "[${type^^}] $oid $label"
fi
done | nl -ba | sed -n '1,100p'
done
fi
echo "--- Example large objects larger than ${STRIP_SIZE}M ---"
git rev-list --objects --all | awk '{print $1}' | while read -r oid; do
size=$(git cat-file -s "$oid" 2>/dev/null || true)
if [ -n "$size" ] && [ "$size" -ge $((STRIP_SIZE * 1024 * 1024)) ]; then
if [ "$FORMAT" = "json" ]; then
printf '{"oid":"%s","size":%s},' "$oid" "$size"
else
echo "$oid size=$size"
fi
fi
done | nl -ba | sed -n '1,50p'
if [ "$FORMAT" = "json" ]; then
printf '],"large_objects":[]}'
echo
else
echo "Preview complete. Use clean_history.sh --dry-run to get a log file."
fi
exit 0

View File

@@ -1,49 +0,0 @@
#!/usr/bin/env bats
setup() {
TMPREPO=$(mktemp -d)
cd "$TMPREPO"
git init -q
# Set local git identity for test commits
git config user.email "test@example.com"
git config user.name "Test Runner"
# create a directory that matches the paths to be pruned
mkdir -p backend/codeql-db
# add a large fake blob file
dd if=/dev/zero of=backend/codeql-db/largefile.bin bs=1M count=2 >/dev/null 2>&1 || true
git add -A && git commit -m 'add large blob' -q
git checkout -b feature/test
# Create a local bare repo to act as origin and allow git push
TMPORIGIN=$(mktemp -d)
git init --bare "$TMPORIGIN" >/dev/null
git remote add origin "$TMPORIGIN"
git push -u origin feature/test >/dev/null 2>&1 || true
# Add a stub git-filter-repo to PATH to satisfy requirements without installing
STUBBIN=$(mktemp -d)
cat > "$STUBBIN/git-filter-repo" <<'SH'
#!/usr/bin/env bash
echo "stub git-filter-repo called: $@"
exit 0
SH
chmod +x "$STUBBIN/git-filter-repo"
PATH="$STUBBIN:$PATH"
}
teardown() {
rm -rf "$TMPREPO"
}
REPO_ROOT=$(cd "$BATS_TEST_DIRNAME/../../../" && pwd)
SCRIPT="$REPO_ROOT/scripts/history-rewrite/clean_history.sh"
@test "clean_history dry-run prints expected log and exits 0" {
run bash "$SCRIPT" --dry-run --paths 'backend/codeql-db' --strip-size 1
[ "$status" -eq 0 ]
[[ "$output" == *"Dry-run complete"* ]]
}
@test "preview_removals shows commits for the path" {
run bash "$REPO_ROOT/scripts/history-rewrite/preview_removals.sh" --paths 'backend/codeql-db' --strip-size 1
[ "$status" -eq 0 ]
[[ "$output" == *"Path: backend/codeql-db"* ]]
}

View File

@@ -1,42 +0,0 @@
#!/usr/bin/env bats
setup() {
TMPREPO=$(mktemp -d)
cd "$TMPREPO"
git init -q
# local git identity
git config user.email "test@example.com"
git config user.name "Test Runner"
# create a directory that matches the paths to be pruned
mkdir -p backend/codeql-db
echo "dummy" > backend/codeql-db/keep.txt
git add -A && git commit -m 'add test files' -q
git checkout -b feature/test
# Create a local bare repo to act as origin and allow git push
TMPORIGIN=$(mktemp -d)
git init --bare "$TMPORIGIN" >/dev/null
git remote add origin "$TMPORIGIN"
git push -u origin feature/test >/dev/null 2>&1 || true
# Add a stub git-filter-repo to PATH to satisfy requirements without installing
STUBBIN=$(mktemp -d)
cat > "$STUBBIN/git-filter-repo" <<'SH'
#!/usr/bin/env bash
echo "stub git-filter-repo called: $@"
exit 0
SH
chmod +x "$STUBBIN/git-filter-repo"
PATH="$STUBBIN:$PATH"
}
teardown() {
rm -rf "$TMPREPO"
}
REPO_ROOT=$(cd "$BATS_TEST_DIRNAME/../../../" && pwd)
SCRIPT="$REPO_ROOT/scripts/history-rewrite/clean_history.sh"
@test "clean_history non-interactive + force runs without prompting and invokes git-filter-repo" {
run bash "$SCRIPT" --force --non-interactive --paths 'backend/codeql-db' --strip-size 1
[ "$status" -eq 0 ]
[[ "$output" == *"stub git-filter-repo called"* ]]
}

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env bats
setup() {
TMPREPO=$(mktemp -d)
cd "$TMPREPO"
git init -q
# Set local git identity so commits succeed in CI
git config user.email "test@example.com"
git config user.name "Test Runner"
# Create a commit in an unrelated path
mkdir -p other/dir
echo hello > other/dir/file.txt
git add other/dir/file.txt && git commit -m 'add unrelated file' -q
# Create an annotated tag
git tag -a v0.3.0 -m "annotated tag v0.3.0"
}
teardown() {
rm -rf "$TMPREPO"
}
REPO_ROOT=$(cd "$BATS_TEST_DIRNAME/../../../" && pwd)
SCRIPT="$REPO_ROOT/scripts/ci/dry_run_history_rewrite.sh"
@test "dry_run script ignores tag-only objects and passes" {
run bash "$SCRIPT" --paths 'backend/codeql-db' --strip-size 50
[ "$status" -eq 0 ]
[[ "$output" == *"DRY-RUN OK"* ]]
}

View File

@@ -1,41 +0,0 @@
#!/usr/bin/env bats
setup() {
# Create an isolated working repo
TMPREPO=$(mktemp -d)
cd "$TMPREPO"
git init -q
# Set local git identity for test commits
git config user.email "test@example.com"
git config user.name "Test Runner"
echo 'initial' > README.md
git add README.md && git commit -m 'init' -q
# Make a minimal .venv pre-commit stub
mkdir -p .venv/bin
cat > .venv/bin/pre-commit <<'SH'
#!/usr/bin/env sh
exit 0
SH
chmod +x .venv/bin/pre-commit
}
teardown() {
rm -rf "$TMPREPO"
}
## Prefer deriving the script location from the test directory rather than hard-coding
## repository root paths such as /projects/Charon. This is more portable across
## environments and CI runners (e.g., forks where the repo path is different).
SCRIPT_DIR=$(cd "$BATS_TEST_DIRNAME/.." && pwd -P)
SCRIPT="$SCRIPT_DIR/validate_after_rewrite.sh"
@test "validate_after_rewrite fails when backup branch is missing" {
run bash "$SCRIPT"
[ "$status" -ne 0 ]
[[ "$output" == *"backup branch not provided"* ]]
}
@test "validate_after_rewrite passes with backup branch argument" {
run bash "$SCRIPT" --backup-branch backup/main
[ "$status" -eq 0 ]
}

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
TMPREMOTE=$(mktemp -d)
git init --bare "$TMPREMOTE/remote.git"
TMPCLONE=$(mktemp -d)
cd "$TMPCLONE"
git clone "$TMPREMOTE/remote.git" .
# create a commit
mkdir -p backend/codeql-db
echo 'dummy' > backend/codeql-db/foo.txt
git add -A
git commit -m "Add dummy file" -q
git checkout -b feature/test
# set up stub git-filter-repo in PATH
## Resolve the repo root based on the script file location (Bash-safe)
# Use ${BASH_SOURCE[0]} instead of $0 to correctly resolve the script path even
# when invoked from different PWDs or via sourced contexts.
REPO_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/../../" && pwd -P)
TMPBIN=$(mktemp -d)
cat > "$TMPBIN/git-filter-repo" <<'SH'
#!/usr/bin/env sh
# Minimal stub to simulate git-filter-repo
while [ $# -gt 0 ]; do
shift
done
exit 0
SH
chmod +x "$TMPBIN/git-filter-repo"
export PATH="$TMPBIN:$PATH"
# run clean_history.sh with dry-run
# NOTE: Avoid hard-coded repo paths like /projects/Charon/
# Use the dynamically-derived REPO_ROOT (above) or a relative path from this script
# so this helper script runs correctly on other machines/CI environments.
# Examples:
# "$REPO_ROOT/scripts/history-rewrite/clean_history.sh" --dry-run ...
# "$(dirname "$0")/clean_history.sh" --dry-run ...
"$REPO_ROOT/scripts/history-rewrite/clean_history.sh" --dry-run --paths 'backend/codeql-db' --strip-size 1
# run clean_history.sh with force should attempt to push branch then succeed (requires that remote exists)
"$REPO_ROOT/scripts/history-rewrite/clean_history.sh" --force --paths 'backend/codeql-db' --strip-size 1 <<'IN'
I UNDERSTAND
IN
# test non-interactive with force
"$REPO_ROOT/scripts/history-rewrite/clean_history.sh" --force --non-interactive --paths 'backend/codeql-db' --strip-size 1
# cleanup
rm -rf "$TMPREMOTE" "$TMPCLONE" "$TMPBIN"
echo 'done'

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
TMP=$(mktemp -d)
REPO_ROOT=$(cd "$(dirname "$0")/../../" && pwd)
cd "$TMP"
git init -q
echo hi > README.md
git add README.md
git commit -q -m init
mkdir -p .venv/bin
cat > .venv/bin/pre-commit <<'PRE'
#!/usr/bin/env sh
exit 0
PRE
chmod +x .venv/bin/pre-commit
echo "temp repo: $TMP"
# Use the configured REPO_ROOT rather than hardcoding /projects/Charon.
# Note: avoid a leading slash before "$REPO_ROOT" which would make the path invalid
# on different hosts; use "$REPO_ROOT/scripts/..." directly.
"$REPO_ROOT/scripts/history-rewrite/validate_after_rewrite.sh" || echo "first run rc $?"
"$REPO_ROOT/scripts/history-rewrite/validate_after_rewrite.sh" --backup-branch backup/main || echo "second run rc $?"
echo exit status $?

View File

@@ -1,97 +0,0 @@
#!/usr/bin/env bash
# Verify repository health after a destructive history-rewrite
set -euo pipefail
IFS=$'\n\t'
usage() {
cat <<EOF
Usage: $0 [--backup-branch BRANCH]
Performs: sanity checks after a destructive history-rewrite.
Options:
--backup-branch BRANCH Name of the backup branch created prior to rewrite.
-h, --help Show this help and exit.
EOF
}
backup_branch=""
while [ "${#}" -gt 0 ]; do
case "$1" in
--backup-branch)
shift
if [ -z "${1:-}" ]; then
echo "Error: --backup-branch requires an argument" >&2
usage
exit 2
fi
backup_branch="$1"
shift
;;
-h|--help)
usage; exit 0
;;
*)
echo "Unknown argument: $1" >&2; usage; exit 2
;;
esac
done
# Fallback to env variable
if [ -z "${backup_branch}" ]; then
if [ -n "${BACKUP_BRANCH:-}" ]; then
backup_branch="$BACKUP_BRANCH"
fi
fi
# If still not set, try to infer from data/backups logs
if [ -z "${backup_branch}" ] && [ -d data/backups ]; then
# Look for common patterns referencing a backup branch name
candidate=$(grep -E "backup[-_]branch" data/backups/* 2>/dev/null | sed -E 's/.*[:=]//; s/^[[:space:]]+//; s/[[:space:]\047"\"]+$//' | head -n1 || true)
if [ -n "${candidate}" ]; then
backup_branch="$candidate"
fi
fi
if [ -z "${backup_branch}" ]; then
echo "Error: backup branch not provided. Use --backup-branch or set BACKUP_BRANCH environment variable, or ensure data/backups/ contains a log referencing the branch." >&2
exit 3
fi
# No positional args required; any unknown options are handled during parsing
echo "Running git maintenance: git count-objects -vH"
git count-objects -vH || true
echo "Running git fsck --full"
git fsck --full || true
pre_commit_executable=""
if [ -x "./.venv/bin/pre-commit" ]; then
pre_commit_executable="./.venv/bin/pre-commit"
elif command -v pre-commit >/dev/null 2>&1; then
pre_commit_executable=$(command -v pre-commit)
fi
if [ -z "${pre_commit_executable}" ]; then
echo "Error: pre-commit not found. Install pre-commit in a virtualenv at ./.venv/bin/pre-commit or ensure it's in PATH." >&2
exit 4
fi
echo "Running pre-commit checks (${pre_commit_executable})"
${pre_commit_executable} run --all-files || { echo "pre-commit checks reported issues" >&2; exit 5; }
if [ -d backend ]; then
echo "Running backend go tests"
(cd backend && go test ./... -v) || echo "backend tests failed"
fi
if [ -d frontend ]; then
echo "Running frontend build"
(cd frontend && npm run build) || echo "frontend build failed"
fi
echo "Validation complete. Inspect output for errors. If something is wrong, restore:
git checkout -b restore/$(date +"%Y%m%d-%H%M%S") ${backup_branch:-}"
exit 0

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Script to install Go 1.25.6 to /usr/local/go
# Usage: sudo ./scripts/install-go-1.25.6.sh
GO_VERSION="1.25.6"
ARCH="linux-amd64"
TARFILE="go${GO_VERSION}.${ARCH}.tar.gz"
TMPFILE="/tmp/${TARFILE}"
# Ensure GOPATH is set
: ${GOPATH:=$HOME/go}
: ${GOBIN:=${GOPATH}/bin}
# Download
if [ ! -f "$TMPFILE" ]; then
echo "Downloading go${GO_VERSION}..."
curl -sSfL -o "$TMPFILE" "https://go.dev/dl/${TARFILE}"
fi
# Remove existing installation
if [ -d "/usr/local/go" ]; then
echo "Removing existing /usr/local/go..."
sudo rm -rf /usr/local/go
fi
# Extract
echo "Extracting to /usr/local..."
sudo tar -C /usr/local -xzf "$TMPFILE"
# Setup system PATH via /etc/profile.d
echo "Creating /etc/profile.d/go.sh to export /usr/local/go/bin and GOPATH/bin"
sudo tee /etc/profile.d/go.sh > /dev/null <<'EOF'
export PATH=/usr/local/go/bin:$GOPATH/bin:$PATH
EOF
sudo chmod +x /etc/profile.d/go.sh
# Update current session PATH
export PATH=/usr/local/go/bin:$GOPATH/bin:$PATH
# Verify
echo "Installed go: $(go version)"
# Optionally install gopls
echo "Installing gopls..."
go install golang.org/x/tools/gopls@latest
GOPLS_PATH="$GOPATH/bin/gopls"
if [ -f "$GOPLS_PATH" ]; then
echo "gopls installed at $GOPLS_PATH"
$GOPLS_PATH version || true
else
echo "gopls not installed in GOPATH/bin"
fi
cat <<'EOF'
Done. Please restart your shell or run:
source /etc/profile.d/go.sh
and restart your editor's Go language server (Go: Restart Language Server in VS Code)
EOF

View File

@@ -1,226 +0,0 @@
#!/bin/bash
set -e
set -o pipefail
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh integration-test-all
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh integration-test-all" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Fail entire script if it runs longer than 4 minutes (240 seconds)
# This prevents CI hangs from indefinite waits
TIMEOUT=${INTEGRATION_TEST_TIMEOUT:-240}
if command -v timeout >/dev/null 2>&1; then
if [ "${INTEGRATION_TEST_WRAPPED:-}" != "1" ]; then
export INTEGRATION_TEST_WRAPPED=1
exec timeout $TIMEOUT "$0" "$@"
fi
fi
# Configuration
API_URL="http://localhost:8080/api/v1"
ADMIN_EMAIL="admin@example.com"
ADMIN_PASSWORD="changeme"
echo "Waiting for Charon to be ready..."
for i in $(seq 1 30); do
code=$(curl -s -o /dev/null -w "%{http_code}" $API_URL/health || echo "000")
if [ "$code" = "200" ]; then
echo "✅ Charon is ready!"
break
fi
echo "Attempt $i/30: health not ready (code=$code); waiting..."
sleep 2
done
if [ "$code" != "200" ]; then
echo "❌ Charon failed to start"
exit 1
fi
echo "Checking setup status..."
SETUP_RESPONSE=$(curl -s $API_URL/setup)
echo "Setup response: $SETUP_RESPONSE"
# Validate response is JSON before parsing
if ! echo "$SETUP_RESPONSE" | jq -e . >/dev/null 2>&1; then
echo "❌ Setup endpoint did not return valid JSON"
echo "Raw response: $SETUP_RESPONSE"
exit 1
fi
SETUP_REQUIRED=$(echo "$SETUP_RESPONSE" | jq -r .setupRequired)
if [ "$SETUP_REQUIRED" = "true" ]; then
echo "Setup is required; attempting to create initial admin..."
SETUP_RESPONSE=$(curl -s -X POST $API_URL/setup \
-H "Content-Type: application/json" \
-d "{\"name\":\"Administrator\",\"email\":\"$ADMIN_EMAIL\",\"password\":\"$ADMIN_PASSWORD\"}")
echo "Setup response: $SETUP_RESPONSE"
if echo "$SETUP_RESPONSE" | jq -e .user >/dev/null 2>&1; then
echo "✅ Setup completed"
else
echo "⚠️ Setup request returned unexpected response; continuing to login attempt"
fi
fi
echo "Logging in..."
TOKEN=$(curl -s -X POST $API_URL/auth/login \
-H "Content-Type: application/json" \
-d "{\"email\":\"$ADMIN_EMAIL\",\"password\":\"$ADMIN_PASSWORD\"}" | jq -r .token)
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
echo "❌ Login failed"
exit 1
fi
echo "✅ Login successful"
echo "Creating Proxy Host..."
# Remove existing proxy host for the domain to make the test idempotent
EXISTING_ID=$(curl -s -H "Authorization: Bearer $TOKEN" $API_URL/proxy-hosts | jq -r --arg domain "test.localhost" '.[] | select(.domain_names == $domain) | .uuid' | head -n1)
if [ -n "$EXISTING_ID" ]; then
echo "Found existing proxy host (ID: $EXISTING_ID), deleting..."
curl -s -X DELETE $API_URL/proxy-hosts/$EXISTING_ID -H "Authorization: Bearer $TOKEN"
# Wait until the host is removed and Caddy has reloaded
for i in $(seq 1 10); do
sleep 1
STILL_EXISTS=$(curl -s -H "Authorization: Bearer $TOKEN" $API_URL/proxy-hosts | jq -r --arg domain "test.localhost" '.[] | select(.domain_names == $domain) | .uuid' | head -n1)
if [ -z "$STILL_EXISTS" ]; then
break
fi
echo "Waiting for API to delete existing proxy host..."
done
fi
# Start a lightweight test upstream server to ensure proxy has a target (local-only). If a
# whoami container is already running on the Docker network, prefer using that.
USE_HOST_WHOAMI=false
if command -v docker >/dev/null 2>&1; then
if docker ps --format '{{.Names}}' | grep -q '^whoami$'; then
USE_HOST_WHOAMI=true
fi
fi
if [ "$USE_HOST_WHOAMI" = "false" ]; then
python3 -c "import http.server, socketserver
class Handler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'Hostname: local-test')
def log_message(self, format, *args):
pass
httpd=socketserver.TCPServer(('0.0.0.0', 8081), Handler)
import threading
threading.Thread(target=httpd.serve_forever, daemon=True).start()
" &
else
echo "Using existing whoami container for upstream tests"
fi
# Prefer "whoami" when running inside CI/docker (it resolves on the docker network).
# For local runs, default to 127.0.0.1 since we start the test upstream on the host —
# but if charon runs inside Docker and the upstream is bound to the host, we must
# use host.docker.internal so Caddy inside the container can reach the host service.
FORWARD_HOST="127.0.0.1"
FORWARD_PORT="8081"
if [ "$USE_HOST_WHOAMI" = "true" ]; then
FORWARD_HOST="whoami"
FORWARD_PORT="80"
fi
if [ -n "$CI" ] || [ -n "$GITHUB_ACTIONS" ]; then
FORWARD_HOST="whoami"
# whoami image listens on port 80 inside its container
FORWARD_PORT="80"
fi
# If we're running charon in Docker locally and we didn't choose whoami, prefer
# host.docker.internal so that the containerized Caddy can reach a host-bound upstream.
if command -v docker >/dev/null 2>&1; then
if docker ps --format '{{.Names}}' | grep -q '^charon-debug$' || docker ps --format '{{.Image}}' | grep -q 'charon:local'; then
if [ "$FORWARD_HOST" = "127.0.0.1" ]; then
FORWARD_HOST="host.docker.internal"
fi
fi
fi
echo "Using forward host: $FORWARD_HOST:$FORWARD_PORT"
# Adjust the Caddy/Caddy proxy test port for local runs to avoid conflicts with
# host services on port 80.
if [ -z "$CADDY_PORT" ]; then
CADDY_PORT="80"
if [ -z "$CI" ] && [ -z "$GITHUB_ACTIONS" ]; then
# Use a non-privileged port locally when binding to host: 8082
CADDY_PORT="8082"
fi
fi
echo "Using Caddy host port: $CADDY_PORT"
# Retry creation up to 5 times if the apply config call fails due to Caddy reloads
RESPONSE=""
for attempt in 1 2 3 4 5; do
RESPONSE=$(curl -s -X POST $API_URL/proxy-hosts \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d '{
"domain_names": "test.localhost",
"forward_scheme": "http",
"forward_host": "'"$FORWARD_HOST"'",
"forward_port": '"$FORWARD_PORT"',
"access_list_id": null,
"certificate_id": null,
"ssl_forced": false,
"caching_enabled": false,
"block_exploits": false,
"allow_websocket_upgrade": true,
"http2_support": true,
"hsts_enabled": false,
"hsts_subdomains": false,
"locations": []
}')
# If Response contains a failure message indicating caddy apply failed, retry
if echo "$RESPONSE" | grep -q "Failed to apply configuration"; then
echo "Warning: failed to apply config on attempt $attempt, retrying..."
# Wait for Caddy admin API on host to respond to /config to reduce collisions
for i in $(seq 1 10); do
if curl -s -o /dev/null -w "%{http_code}" http://localhost:${CADDY_ADMIN_PORT:-20194}/config/ >/dev/null 2>&1; then
break
fi
sleep 1
done
sleep $attempt
continue
fi
break
done
ID=$(echo $RESPONSE | jq -r .uuid)
if [ -z "$ID" ] || [ "$ID" = "null" ]; then
echo "❌ Failed to create proxy host: $RESPONSE"
exit 1
fi
echo "✅ Proxy Host created (ID: $ID)"
echo "Testing Proxy..."
# We use Host header to route to the correct proxy host
# We hit localhost:80 (Caddy) which should route to whoami
HTTP_CODE=0
CONTENT=""
# Retry probing Caddy for the new route for up to 30 seconds
for i in $(seq 1 30); do
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Host: test.localhost" http://localhost:${CADDY_PORT} || true)
CONTENT=$(curl -s -H "Host: test.localhost" http://localhost:${CADDY_PORT} || true)
if [ "$HTTP_CODE" = "200" ] && echo "$CONTENT" | grep -q "Hostname:"; then
break
fi
echo "Waiting for Caddy to pick up new route ($i/30)..."
sleep 1
done
if [ "$HTTP_CODE" = "200" ] && echo "$CONTENT" | grep -q "Hostname:"; then
echo "✅ Proxy test passed! Content received from whoami."
else
echo "❌ Proxy test failed (Code: $HTTP_CODE)"
echo "Content: $CONTENT"
exit 1
fi

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
staged=$(git diff --cached --name-only | tr '\r' '\n' || true)
if [ -n "${staged}" ]; then
# Exclude the pre-commit-hooks directory and this script itself
filtered=$(echo "$staged" | grep -v '^scripts/pre-commit-hooks/' | grep -v '^data/backups/' || true)
if echo "$filtered" | grep -q "codeql-db"; then
echo "Error: Attempting to commit CodeQL database artifacts (codeql-db)." >&2
echo "These should not be committed. Remove them or add to .gitignore and try again." >&2
echo "Tip: Use 'scripts/repo_health_check.sh' to validate repository health." >&2
exit 1
fi
fi
exit 0

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
# Prevent committing any files under data/backups/ accidentally
staged_files=$(git diff --cached --name-only || true)
if [ -z "$staged_files" ]; then
exit 0
fi
for f in $staged_files; do
case "$f" in
data/backups/*)
echo "Error: Committing files under data/backups/ is blocked. Remove them from the commit and re-run." >&2
exit 1
;;
esac
done
exit 0

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# pre-commit hook: ensure large files added to git are tracked by Git LFS
MAX_BYTES=$((50 * 1024 * 1024))
FAILED=0
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM)
if [ -z "$STAGED_FILES" ]; then
exit 0
fi
while read -r f; do
[ -z "$f" ] && continue
if [ -f "$f" ]; then
size=$(stat -c%s "$f")
if [ "$size" -gt "$MAX_BYTES" ]; then
# check if tracked by LFS via git check-attr
filter_attr=$(git check-attr --stdin filter <<<"$f" | awk '{print $3}' || true)
if [ "$filter_attr" != "lfs" ]; then
echo "ERROR: Large file not tracked by Git LFS: $f ($size bytes)" >&2
FAILED=1
fi
fi
fi
done <<<"$STAGED_FILES"
if [ $FAILED -ne 0 ]; then
echo "You must track large files in Git LFS. Aborting commit." >&2
exit 1
fi
exit 0

View File

@@ -1,69 +0,0 @@
#!/bin/bash
# Check CodeQL SARIF results for HIGH/CRITICAL findings
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
FAILED=0
check_sarif() {
local sarif_file=$1
local lang=$2
if [ ! -f "$sarif_file" ]; then
echo -e "${YELLOW}⚠️ No SARIF file found: $sarif_file${NC}"
echo "Run CodeQL scan first: pre-commit run codeql-$lang-scan --all-files"
return 0
fi
echo "🔍 Checking $lang findings..."
# Check for findings using jq (if available)
if command -v jq &> /dev/null; then
# Count high/critical severity findings
HIGH_COUNT=$(jq -r '.runs[].results[] | select(.level == "error" or .level == "warning") | .level' "$sarif_file" 2>/dev/null | wc -l || echo 0)
if [ "$HIGH_COUNT" -gt 0 ]; then
echo -e "${RED}❌ Found $HIGH_COUNT potential security issues in $lang code${NC}"
echo ""
echo "Summary:"
jq -r '.runs[].results[] | "\(.level): \(.message.text) (\(.locations[0].physicalLocation.artifactLocation.uri):\(.locations[0].physicalLocation.region.startLine))"' "$sarif_file" 2>/dev/null | head -10
echo ""
echo "View full results: code $sarif_file"
FAILED=1
else
echo -e "${GREEN}✅ No security issues found in $lang code${NC}"
fi
else
# Fallback: check if file has results
if grep -q '"results"' "$sarif_file" && ! grep -q '"results": \[\]' "$sarif_file"; then
echo -e "${YELLOW}⚠️ CodeQL findings detected in $lang (install jq for details)${NC}"
echo "View results: code $sarif_file"
FAILED=1
else
echo -e "${GREEN}✅ No security issues found in $lang code${NC}"
fi
fi
}
echo "🔒 Checking CodeQL findings..."
echo ""
check_sarif "codeql-results-go.sarif" "go"
check_sarif "codeql-results-js.sarif" "js"
if [ $FAILED -eq 1 ]; then
echo ""
echo -e "${RED}❌ CodeQL scan found security issues. Please fix before committing.${NC}"
echo ""
echo "To view results:"
echo " - VS Code: Install SARIF Viewer extension"
echo " - Command line: jq . codeql-results-*.sarif"
exit 1
fi
echo ""
echo -e "${GREEN}✅ All CodeQL checks passed${NC}"

View File

@@ -1,38 +0,0 @@
#!/bin/bash
# Pre-commit CodeQL Go scan - CI-aligned
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
echo -e "${BLUE}🔍 Running CodeQL Go scan (CI-aligned)...${NC}"
echo ""
# Clean previous database
rm -rf codeql-db-go
# Create database
echo "📦 Creating CodeQL database..."
codeql database create codeql-db-go \
--language=go \
--source-root=backend \
--threads=0 \
--overwrite
echo ""
echo "📊 Analyzing with security-and-quality suite..."
# Analyze with CI-aligned suite
codeql database analyze codeql-db-go \
codeql/go-queries:codeql-suites/go-security-and-quality.qls \
--format=sarif-latest \
--output=codeql-results-go.sarif \
--sarif-add-baseline-file-info \
--threads=0
echo -e "${GREEN}✅ CodeQL Go scan complete${NC}"
echo "Results saved to: codeql-results-go.sarif"
echo ""
echo "Run 'pre-commit run codeql-check-findings' to validate findings"

View File

@@ -1,42 +0,0 @@
#!/bin/bash
# Pre-commit CodeQL JavaScript/TypeScript scan - CI-aligned
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
echo -e "${BLUE}🔍 Running CodeQL JavaScript/TypeScript scan (CI-aligned)...${NC}"
echo ""
# Remove generated artifacts that can create noisy/false findings during CodeQL analysis
rm -rf frontend/coverage frontend/dist playwright-report test-results coverage
# Clean previous database
rm -rf codeql-db-js
# Create database
echo "📦 Creating CodeQL database..."
codeql database create codeql-db-js \
--language=javascript \
--build-mode=none \
--source-root=frontend \
--threads=0 \
--overwrite
echo ""
echo "📊 Analyzing with security-and-quality suite..."
# Analyze with CI-aligned suite
codeql database analyze codeql-db-js \
codeql/javascript-queries:codeql-suites/javascript-security-and-quality.qls \
--format=sarif-latest \
--output=codeql-results-js.sarif \
--sarif-add-baseline-file-info \
--threads=0
echo -e "${GREEN}✅ CodeQL JavaScript/TypeScript scan complete${NC}"
echo "Results saved to: codeql-results-js.sarif"
echo ""
echo "Run 'pre-commit run codeql-check-findings' to validate findings"

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Wrapper script for golangci-lint fast linters in pre-commit
# This ensures golangci-lint works in both terminal and VS Code pre-commit integration
# Find golangci-lint in common locations
GOLANGCI_LINT=""
# Check if already in PATH
if command -v golangci-lint >/dev/null 2>&1; then
GOLANGCI_LINT="golangci-lint"
else
# Check common installation locations
COMMON_PATHS=(
"$HOME/go/bin/golangci-lint"
"/usr/local/bin/golangci-lint"
"/usr/bin/golangci-lint"
"${GOPATH:-$HOME/go}/bin/golangci-lint"
)
for path in "${COMMON_PATHS[@]}"; do
if [[ -x "$path" ]]; then
GOLANGCI_LINT="$path"
break
fi
done
fi
# Exit if not found
if [[ -z "$GOLANGCI_LINT" ]]; then
echo "ERROR: golangci-lint not found in PATH or common locations"
echo "Searched:"
echo " - PATH: $PATH"
echo " - $HOME/go/bin/golangci-lint"
echo " - /usr/local/bin/golangci-lint"
echo " - /usr/bin/golangci-lint"
echo ""
echo "Install from: https://golangci-lint.run/usage/install/"
exit 1
fi
# Change to backend directory and run golangci-lint
cd "$(dirname "$0")/../../backend" || exit 1
exec "$GOLANGCI_LINT" run --config .golangci-fast.yml ./...

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Wrapper script for golangci-lint full linters in pre-commit
# This ensures golangci-lint works in both terminal and VS Code pre-commit integration
# Find golangci-lint in common locations
GOLANGCI_LINT=""
# Check if already in PATH
if command -v golangci-lint >/dev/null 2>&1; then
GOLANGCI_LINT="golangci-lint"
else
# Check common installation locations
COMMON_PATHS=(
"$HOME/go/bin/golangci-lint"
"/usr/local/bin/golangci-lint"
"/usr/bin/golangci-lint"
"${GOPATH:-$HOME/go}/bin/golangci-lint"
)
for path in "${COMMON_PATHS[@]}"; do
if [[ -x "$path" ]]; then
GOLANGCI_LINT="$path"
break
fi
done
fi
# Exit if not found
if [[ -z "$GOLANGCI_LINT" ]]; then
echo "ERROR: golangci-lint not found in PATH or common locations"
echo "Searched:"
echo " - PATH: $PATH"
echo " - $HOME/go/bin/golangci-lint"
echo " - /usr/local/bin/golangci-lint"
echo " - /usr/bin/golangci-lint"
echo ""
echo "Install from: https://golangci-lint.run/usage/install/"
exit 1
fi
# Change to backend directory and run golangci-lint
cd "$(dirname "$0")/../../backend" || exit 1
exec "$GOLANGCI_LINT" run -v ./...

View File

@@ -1,292 +0,0 @@
#!/bin/bash
# QA Test Script: Certificate Page Authentication
# Tests authentication fixes for certificate endpoints
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
BASE_URL="${BASE_URL:-http://localhost:8080}"
API_URL="${BASE_URL}/api/v1"
COOKIE_FILE="/tmp/charon-test-cookies.txt"
# Derive repository root dynamically so script works outside specific paths
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
TEST_RESULTS="$REPO_ROOT/test-results/qa-auth-test-results.log"
# Clear previous results
> "$TEST_RESULTS"
> "$COOKIE_FILE"
echo -e "${BLUE}=== QA Test: Certificate Page Authentication ===${NC}"
echo "Testing authentication fixes for certificate endpoints"
echo "Base URL: $BASE_URL"
echo ""
# Function to log test results
log_test() {
local status=$1
local test_name=$2
local details=$3
echo "[$status] $test_name" | tee -a "$TEST_RESULTS"
if [ -n "$details" ]; then
echo " Details: $details" | tee -a "$TEST_RESULTS"
fi
}
# Function to print section header
section() {
echo -e "\n${BLUE}=== $1 ===${NC}\n"
echo "=== $1 ===" >> "$TEST_RESULTS"
}
# Phase 1: Certificate Page Authentication Tests
section "Phase 1: Certificate Page Authentication Tests"
# Test 1.1: Login and Cookie Verification
echo -e "${YELLOW}Test 1.1: Login and Cookie Verification${NC}"
# First, ensure test user exists (idempotent)
curl -s -X POST "$API_URL/auth/register" \
-H "Content-Type: application/json" \
-d '{"email":"qa-test@example.com","password":"QATestPass123!","name":"QA Test User"}' > /dev/null 2>&1
LOGIN_RESPONSE=$(curl -s -c "$COOKIE_FILE" -X POST "$API_URL/auth/login" \
-H "Content-Type: application/json" \
-d '{"email":"qa-test@example.com","password":"QATestPass123!"}' \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$LOGIN_RESPONSE" | tail -n1)
RESPONSE_BODY=$(echo "$LOGIN_RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Login successful" "HTTP $HTTP_CODE"
# Check if auth_token cookie exists
if grep -q "auth_token" "$COOKIE_FILE"; then
log_test "PASS" "auth_token cookie created" ""
# Extract cookie details
COOKIE_LINE=$(grep "auth_token" "$COOKIE_FILE")
echo " Cookie details: $COOKIE_LINE" | tee -a "$TEST_RESULTS"
# Note: HttpOnly and Secure flags are not visible in curl cookie file
# These would need to be verified in browser DevTools
log_test "INFO" "Cookie flags (HttpOnly, Secure, SameSite)" "Verify manually in browser DevTools"
else
log_test "FAIL" "auth_token cookie NOT created" "Cookie file: $COOKIE_FILE"
fi
else
log_test "FAIL" "Login failed" "HTTP $HTTP_CODE - $RESPONSE_BODY"
exit 1
fi
# Test 1.2: Certificate List (GET /api/v1/certificates)
echo -e "\n${YELLOW}Test 1.2: Certificate List (GET /api/v1/certificates)${NC}"
LIST_RESPONSE=$(curl -s -b "$COOKIE_FILE" "$API_URL/certificates" -w "\n%{http_code}" -v 2>&1)
HTTP_CODE=$(echo "$LIST_RESPONSE" | grep "< HTTP" | awk '{print $3}')
RESPONSE_BODY=$(echo "$LIST_RESPONSE" | grep -v "^[<>*]" | sed '/^$/d' | tail -n +2)
echo "Response: $RESPONSE_BODY" | tee -a "$TEST_RESULTS"
if echo "$LIST_RESPONSE" | grep -q "Cookie: auth_token"; then
log_test "PASS" "Request includes auth_token cookie" ""
else
log_test "WARN" "Could not verify Cookie header in request" "Check manually in browser Network tab"
fi
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Certificate list request successful" "HTTP $HTTP_CODE"
# Check if response is valid JSON array
if echo "$RESPONSE_BODY" | jq -e 'type == "array"' > /dev/null 2>&1; then
CERT_COUNT=$(echo "$RESPONSE_BODY" | jq 'length')
log_test "PASS" "Response is valid JSON array" "Count: $CERT_COUNT certificates"
else
log_test "WARN" "Response is not a JSON array" ""
fi
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Authentication failed - 401 Unauthorized" "Cookie not being sent or not valid"
echo "Response body: $RESPONSE_BODY" | tee -a "$TEST_RESULTS"
else
log_test "FAIL" "Certificate list request failed" "HTTP $HTTP_CODE"
fi
# Test 1.3: Certificate Upload (POST /api/v1/certificates)
echo -e "\n${YELLOW}Test 1.3: Certificate Upload (POST /api/v1/certificates)${NC}"
# Create test certificate and key
TEST_CERT_DIR="/tmp/charon-test-certs"
mkdir -p "$TEST_CERT_DIR"
# Generate self-signed certificate for testing
openssl req -x509 -newkey rsa:2048 -keyout "$TEST_CERT_DIR/test.key" -out "$TEST_CERT_DIR/test.crt" \
-days 1 -nodes -subj "/CN=qa-test.local" 2>/dev/null
if [ -f "$TEST_CERT_DIR/test.crt" ] && [ -f "$TEST_CERT_DIR/test.key" ]; then
log_test "INFO" "Test certificate generated" "$TEST_CERT_DIR"
# Upload certificate
UPLOAD_RESPONSE=$(curl -s -b "$COOKIE_FILE" -X POST "$API_URL/certificates" \
-F "name=QA-Test-Cert-$(date +%s)" \
-F "certificate_file=@$TEST_CERT_DIR/test.crt" \
-F "key_file=@$TEST_CERT_DIR/test.key" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "201" ]; then
log_test "PASS" "Certificate upload successful" "HTTP $HTTP_CODE"
# Extract certificate ID for later deletion
CERT_ID=$(echo "$RESPONSE_BODY" | jq -r '.id' 2>/dev/null || echo "")
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ]; then
log_test "INFO" "Certificate created with ID: $CERT_ID" ""
echo "$CERT_ID" > /tmp/charon-test-cert-id.txt
fi
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Upload authentication failed - 401 Unauthorized" "Cookie not being sent"
else
log_test "FAIL" "Certificate upload failed" "HTTP $HTTP_CODE - $RESPONSE_BODY"
fi
else
log_test "FAIL" "Could not generate test certificate" ""
fi
# Test 1.4: Certificate Delete (DELETE /api/v1/certificates/:id)
echo -e "\n${YELLOW}Test 1.4: Certificate Delete (DELETE /api/v1/certificates/:id)${NC}"
if [ -f /tmp/charon-test-cert-id.txt ]; then
CERT_ID=$(cat /tmp/charon-test-cert-id.txt)
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ]; then
DELETE_RESPONSE=$(curl -s -b "$COOKIE_FILE" -X DELETE "$API_URL/certificates/$CERT_ID" -w "\n%{http_code}")
HTTP_CODE=$(echo "$DELETE_RESPONSE" | tail -n1)
RESPONSE_BODY=$(echo "$DELETE_RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Certificate delete successful" "HTTP $HTTP_CODE"
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Delete authentication failed - 401 Unauthorized" "Cookie not being sent"
elif [ "$HTTP_CODE" = "409" ]; then
log_test "INFO" "Certificate in use (expected for active certs)" "HTTP $HTTP_CODE"
else
log_test "WARN" "Certificate delete failed" "HTTP $HTTP_CODE - $RESPONSE_BODY"
fi
else
log_test "SKIP" "Certificate delete test" "No certificate ID available"
fi
else
log_test "SKIP" "Certificate delete test" "Upload test did not create a certificate"
fi
# Test 1.5: Unauthorized Access
echo -e "\n${YELLOW}Test 1.5: Unauthorized Access${NC}"
# Remove cookies and try to access
rm -f "$COOKIE_FILE"
UNAUTH_RESPONSE=$(curl -s "$API_URL/certificates" -w "\n%{http_code}")
HTTP_CODE=$(echo "$UNAUTH_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "401" ]; then
log_test "PASS" "Unauthorized access properly rejected" "HTTP $HTTP_CODE"
else
log_test "FAIL" "Unauthorized access NOT rejected" "HTTP $HTTP_CODE (expected 401)"
fi
# Phase 2: Regression Testing Other Endpoints
section "Phase 2: Regression Testing Other Endpoints"
# Re-login for regression tests
echo -e "${YELLOW}Re-authenticating for regression tests...${NC}"
curl -s -c "$COOKIE_FILE" -X POST "$API_URL/auth/login" \
-H "Content-Type: application/json" \
-d '{"email":"qa-test@example.com","password":"QATestPass123!"}' > /dev/null
# Test 2.1: Proxy Hosts Page
echo -e "\n${YELLOW}Test 2.1: Proxy Hosts Page (GET /api/v1/proxy-hosts)${NC}"
HOSTS_RESPONSE=$(curl -s -b "$COOKIE_FILE" "$API_URL/proxy-hosts" -w "\n%{http_code}")
HTTP_CODE=$(echo "$HOSTS_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Proxy hosts list successful" "HTTP $HTTP_CODE"
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Proxy hosts authentication failed" "HTTP $HTTP_CODE"
else
log_test "WARN" "Proxy hosts request failed" "HTTP $HTTP_CODE"
fi
# Test 2.2: Backups Page
echo -e "\n${YELLOW}Test 2.2: Backups Page (GET /api/v1/backups)${NC}"
BACKUPS_RESPONSE=$(curl -s -b "$COOKIE_FILE" "$API_URL/backups" -w "\n%{http_code}")
HTTP_CODE=$(echo "$BACKUPS_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Backups list successful" "HTTP $HTTP_CODE"
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Backups authentication failed" "HTTP $HTTP_CODE"
else
log_test "WARN" "Backups request failed" "HTTP $HTTP_CODE"
fi
# Test 2.3: Settings Page
echo -e "\n${YELLOW}Test 2.3: Settings Page (GET /api/v1/settings)${NC}"
SETTINGS_RESPONSE=$(curl -s -b "$COOKIE_FILE" "$API_URL/settings" -w "\n%{http_code}")
HTTP_CODE=$(echo "$SETTINGS_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Settings list successful" "HTTP $HTTP_CODE"
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Settings authentication failed" "HTTP $HTTP_CODE"
else
log_test "WARN" "Settings request failed" "HTTP $HTTP_CODE"
fi
# Test 2.4: User Management
echo -e "\n${YELLOW}Test 2.4: User Management (GET /api/v1/users)${NC}"
USERS_RESPONSE=$(curl -s -b "$COOKIE_FILE" "$API_URL/users" -w "\n%{http_code}")
HTTP_CODE=$(echo "$USERS_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ]; then
log_test "PASS" "Users list successful" "HTTP $HTTP_CODE"
elif [ "$HTTP_CODE" = "401" ]; then
log_test "FAIL" "Users authentication failed" "HTTP $HTTP_CODE"
else
log_test "WARN" "Users request failed" "HTTP $HTTP_CODE"
fi
# Summary
section "Test Summary"
echo -e "\n${BLUE}=== Test Results Summary ===${NC}\n"
TOTAL_TESTS=$(grep -c "^\[" "$TEST_RESULTS" || echo "0")
PASSED=$(grep -c "^\[PASS\]" "$TEST_RESULTS" || echo "0")
FAILED=$(grep -c "^\[FAIL\]" "$TEST_RESULTS" || echo "0")
WARNINGS=$(grep -c "^\[WARN\]" "$TEST_RESULTS" || echo "0")
SKIPPED=$(grep -c "^\[SKIP\]" "$TEST_RESULTS" || echo "0")
echo "Total Tests: $TOTAL_TESTS"
echo -e "${GREEN}Passed: $PASSED${NC}"
echo -e "${RED}Failed: $FAILED${NC}"
echo -e "${YELLOW}Warnings: $WARNINGS${NC}"
echo "Skipped: $SKIPPED"
echo ""
echo "Full test results saved to: $TEST_RESULTS"
echo ""
# Exit with error if any tests failed
if [ "$FAILED" -gt 0 ]; then
echo -e "${RED}Some tests FAILED. Review the results above.${NC}"
exit 1
else
echo -e "${GREEN}All critical tests PASSED!${NC}"
exit 0
fi

View File

@@ -1,408 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Brief: Integration test for Rate Limiting using Docker Compose and built image
# Steps:
# 1. Build the local image if not present: docker build -t charon:local .
# 2. Start Charon container with rate limiting enabled
# 3. Create a test proxy host via API
# 4. Configure rate limiting with short windows (3 requests per 10 seconds)
# 5. Send rapid requests and verify:
# - First N requests return HTTP 200
# - Request N+1 returns HTTP 429
# - Retry-After header is present on blocked response
# 6. Wait for window to reset, verify requests allowed again
# 7. Clean up test resources
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Configuration
# ============================================================================
RATE_LIMIT_REQUESTS=3
RATE_LIMIT_WINDOW_SEC=10
RATE_LIMIT_BURST=1
CONTAINER_NAME="charon-ratelimit-test"
BACKEND_CONTAINER="ratelimit-backend"
TEST_DOMAIN="ratelimit.local"
# ============================================================================
# Helper Functions
# ============================================================================
# Verifies rate limit handler is present in Caddy config
verify_rate_limit_config() {
local retries=10
local wait=3
echo "Verifying rate limit config in Caddy..."
for i in $(seq 1 $retries); do
# Fetch Caddy config via admin API
local caddy_config
caddy_config=$(curl -s http://localhost:2119/config 2>/dev/null || echo "")
if [ -z "$caddy_config" ]; then
echo " Attempt $i/$retries: Caddy admin API not responding, retrying..."
sleep $wait
continue
fi
# Check for rate_limit handler
if echo "$caddy_config" | grep -q '"handler":"rate_limit"'; then
echo " ✓ rate_limit handler found in Caddy config"
return 0
else
echo " Attempt $i/$retries: rate_limit handler not found, waiting..."
fi
sleep $wait
done
echo " ✗ rate_limit handler verification failed after $retries attempts"
return 1
}
# Dumps debug information on failure
on_failure() {
local exit_code=$?
echo ""
echo "=============================================="
echo "=== FAILURE DEBUG INFO (exit code: $exit_code) ==="
echo "=============================================="
echo ""
echo "=== Charon API Logs (last 150 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -150 || echo "Could not retrieve container logs"
echo ""
echo "=== Caddy Admin API Config ==="
curl -s http://localhost:2119/config 2>/dev/null | head -300 || echo "Could not retrieve Caddy config"
echo ""
echo "=== Security Config in API ==="
curl -s http://localhost:8280/api/v1/security/config 2>/dev/null || echo "Could not retrieve security config"
echo ""
echo "=== Proxy Hosts ==="
curl -s http://localhost:8280/api/v1/proxy-hosts 2>/dev/null | head -50 || echo "Could not retrieve proxy hosts"
echo ""
echo "=============================================="
echo "=== END DEBUG INFO ==="
echo "=============================================="
}
# Cleanup function
cleanup() {
echo "Cleaning up test resources..."
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
rm -f "${TMP_COOKIE:-}" 2>/dev/null || true
echo "Cleanup complete"
}
# Set up trap to dump debug info on any error
trap on_failure ERR
echo "=============================================="
echo "=== Rate Limit Integration Test Starting ==="
echo "=============================================="
echo ""
# Check dependencies
if ! command -v docker >/dev/null 2>&1; then
echo "docker is not available; aborting"
exit 1
fi
if ! command -v curl >/dev/null 2>&1; then
echo "curl is not available; aborting"
exit 1
fi
# ============================================================================
# Step 1: Build image if needed
# ============================================================================
if ! docker image inspect charon:local >/dev/null 2>&1; then
echo "Building charon:local image..."
docker build -t charon:local .
else
echo "Using existing charon:local image"
fi
# ============================================================================
# Step 2: Start Charon container
# ============================================================================
echo "Stopping any existing test containers..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
# Ensure network exists
if ! docker network inspect containers_default >/dev/null 2>&1; then
echo "Creating containers_default network..."
docker network create containers_default
fi
echo "Starting Charon container..."
docker run -d --name ${CONTAINER_NAME} \
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--network containers_default \
-p 8180:80 -p 8143:443 -p 8280:8080 -p 2119:2019 \
-e CHARON_ENV=development \
-e CHARON_DEBUG=1 \
-e CHARON_HTTP_PORT=8080 \
-e CHARON_DB_PATH=/app/data/charon.db \
-e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 \
-e CHARON_CADDY_CONFIG_DIR=/app/data/caddy \
-e CHARON_CADDY_BINARY=caddy \
-v charon_ratelimit_data:/app/data \
-v caddy_ratelimit_data:/data \
-v caddy_ratelimit_config:/config \
charon:local
echo "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f http://localhost:8280/api/v1/health >/dev/null 2>&1; then
echo "✓ Charon API is ready"
break
fi
if [ $i -eq 30 ]; then
echo "✗ Charon API failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
# ============================================================================
# Step 3: Create backend container
# ============================================================================
echo ""
echo "Creating backend container for proxy host..."
docker run -d --name ${BACKEND_CONTAINER} --network containers_default kennethreitz/httpbin
echo "Waiting for httpbin backend to be ready..."
for i in {1..20}; do
if docker exec ${CONTAINER_NAME} sh -c "curl -sf http://${BACKEND_CONTAINER}/get" >/dev/null 2>&1; then
echo "✓ httpbin backend is ready"
break
fi
if [ $i -eq 20 ]; then
echo "✗ httpbin backend failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
# ============================================================================
# Step 4: Register user and authenticate
# ============================================================================
echo ""
echo "Registering admin user and logging in..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"ratelimit@example.local","password":"password123","name":"Rate Limit Tester"}' \
http://localhost:8280/api/v1/auth/register >/dev/null 2>&1 || true
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"ratelimit@example.local","password":"password123"}' \
-c ${TMP_COOKIE} \
http://localhost:8280/api/v1/auth/login >/dev/null
echo "✓ Authentication complete"
# ============================================================================
# Step 5: Create proxy host
# ============================================================================
echo ""
echo "Creating proxy host '${TEST_DOMAIN}' pointing to backend..."
PROXY_HOST_PAYLOAD=$(cat <<EOF
{
"name": "ratelimit-backend",
"domain_names": "${TEST_DOMAIN}",
"forward_scheme": "http",
"forward_host": "${BACKEND_CONTAINER}",
"forward_port": 80,
"enabled": true
}
EOF
)
CREATE_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${PROXY_HOST_PAYLOAD}" \
-b ${TMP_COOKIE} \
http://localhost:8280/api/v1/proxy-hosts)
CREATE_STATUS=$(echo "$CREATE_RESP" | tail -n1)
if [ "$CREATE_STATUS" = "201" ]; then
echo "✓ Proxy host created successfully"
else
echo " Proxy host may already exist (status: $CREATE_STATUS)"
fi
# ============================================================================
# Step 6: Configure rate limiting
# ============================================================================
echo ""
echo "Configuring rate limiting: ${RATE_LIMIT_REQUESTS} requests per ${RATE_LIMIT_WINDOW_SEC} seconds..."
SEC_CFG_PAYLOAD=$(cat <<EOF
{
"name": "default",
"enabled": true,
"rate_limit_enable": true,
"rate_limit_requests": ${RATE_LIMIT_REQUESTS},
"rate_limit_window_sec": ${RATE_LIMIT_WINDOW_SEC},
"rate_limit_burst": ${RATE_LIMIT_BURST},
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
curl -s -X POST -H "Content-Type: application/json" \
-d "${SEC_CFG_PAYLOAD}" \
-b ${TMP_COOKIE} \
http://localhost:8280/api/v1/security/config >/dev/null
echo "✓ Rate limiting configured"
echo "Waiting for Caddy to apply configuration..."
sleep 5
# Verify rate limit handler is configured
if ! verify_rate_limit_config; then
echo "WARNING: Rate limit handler verification failed (Caddy may still be loading)"
echo "Proceeding with test anyway..."
fi
# ============================================================================
# Step 7: Test rate limiting enforcement
# ============================================================================
echo ""
echo "=============================================="
echo "=== Testing Rate Limit Enforcement ==="
echo "=============================================="
echo ""
echo "Sending ${RATE_LIMIT_REQUESTS} rapid requests (should all return 200)..."
SUCCESS_COUNT=0
for i in $(seq 1 ${RATE_LIMIT_REQUESTS}); do
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" -H "Host: ${TEST_DOMAIN}" http://localhost:8180/get)
if [ "$RESPONSE" = "200" ]; then
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
echo " Request $i: HTTP $RESPONSE"
else
echo " Request $i: HTTP $RESPONSE (expected 200)"
fi
# Small delay to avoid overwhelming, but still within the window
sleep 0.1
done
if [ $SUCCESS_COUNT -ne ${RATE_LIMIT_REQUESTS} ]; then
echo ""
echo "✗ Not all allowed requests succeeded ($SUCCESS_COUNT/${RATE_LIMIT_REQUESTS})"
echo "Rate limit enforcement test FAILED"
cleanup
exit 1
fi
echo ""
echo "Sending request ${RATE_LIMIT_REQUESTS}+1 (should return 429 Too Many Requests)..."
# Capture headers too for Retry-After check
BLOCKED_RESPONSE=$(curl -s -D - -o /dev/null -H "Host: ${TEST_DOMAIN}" http://localhost:8180/get)
BLOCKED_STATUS=$(echo "$BLOCKED_RESPONSE" | head -1 | grep -o '[0-9]\{3\}' | head -1)
if [ "$BLOCKED_STATUS" = "429" ]; then
echo " ✓ Request blocked with HTTP 429 as expected"
# Check for Retry-After header
if echo "$BLOCKED_RESPONSE" | grep -qi "Retry-After"; then
RETRY_AFTER=$(echo "$BLOCKED_RESPONSE" | grep -i "Retry-After" | head -1)
echo " ✓ Retry-After header present: $RETRY_AFTER"
else
echo " ⚠ Retry-After header not found (may be plugin-dependent)"
fi
else
echo " ✗ Expected HTTP 429, got HTTP $BLOCKED_STATUS"
echo ""
echo "=== DEBUG: SecurityConfig from API ==="
curl -s -b ${TMP_COOKIE} http://localhost:8280/api/v1/security/config | jq .
echo ""
echo "=== DEBUG: SecurityStatus from API ==="
curl -s -b ${TMP_COOKIE} http://localhost:8280/api/v1/security/status | jq .
echo ""
echo "=== DEBUG: Caddy config (first proxy route handlers) ==="
curl -s http://localhost:2119/config/ | jq '.apps.http.servers.charon_server.routes[0].handle // []'
echo ""
echo "=== DEBUG: Container logs (last 100 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -100
echo ""
echo "Rate limit enforcement test FAILED"
echo "Container left running for manual inspection"
echo "Run: docker logs ${CONTAINER_NAME}"
echo "Run: docker rm -f ${CONTAINER_NAME} ${BACKEND_CONTAINER}"
exit 1
fi
# ============================================================================
# Step 8: Test window reset
# ============================================================================
echo ""
echo "=============================================="
echo "=== Testing Window Reset ==="
echo "=============================================="
echo ""
echo "Waiting for rate limit window to reset (${RATE_LIMIT_WINDOW_SEC} seconds + buffer)..."
sleep $((RATE_LIMIT_WINDOW_SEC + 2))
echo "Sending request after window reset (should return 200)..."
RESET_RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" -H "Host: ${TEST_DOMAIN}" http://localhost:8180/get)
if [ "$RESET_RESPONSE" = "200" ]; then
echo " ✓ Request allowed after window reset (HTTP 200)"
else
echo " ✗ Expected HTTP 200 after reset, got HTTP $RESET_RESPONSE"
echo ""
echo "Rate limit window reset test FAILED"
cleanup
exit 1
fi
# ============================================================================
# Step 9: Cleanup and report
# ============================================================================
echo ""
echo "=============================================="
echo "=== Rate Limit Integration Test Results ==="
echo "=============================================="
echo ""
echo "✓ Rate limit enforcement succeeded"
echo " - ${RATE_LIMIT_REQUESTS} requests allowed within window"
echo " - Request ${RATE_LIMIT_REQUESTS}+1 blocked with HTTP 429"
echo " - Requests allowed again after window reset"
echo ""
# Remove test proxy host from database
echo "Removing test proxy host from database..."
INTEGRATION_UUID=$(curl -s -b ${TMP_COOKIE} http://localhost:8280/api/v1/proxy-hosts | \
grep -o '"uuid":"[^"]*"[^}]*"domain_names":"'${TEST_DOMAIN}'"' | head -n1 | \
grep -o '"uuid":"[^"]*"' | sed 's/"uuid":"\([^"]*\)"/\1/')
if [ -n "$INTEGRATION_UUID" ]; then
curl -s -X DELETE -b ${TMP_COOKIE} \
"http://localhost:8280/api/v1/proxy-hosts/${INTEGRATION_UUID}?delete_uptime=true" >/dev/null
echo "✓ Deleted test proxy host ${INTEGRATION_UUID}"
fi
cleanup
echo ""
echo "=============================================="
echo "=== ALL RATE LIMIT TESTS PASSED ==="
echo "=============================================="
echo ""

View File

@@ -1,104 +0,0 @@
#!/bin/bash
# Release script for Charon
# Creates a new semantic version release with tag and GitHub release
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Functions
error() {
echo -e "${RED}Error: $1${NC}" >&2
exit 1
}
success() {
echo -e "${GREEN}$1${NC}"
}
warning() {
echo -e "${YELLOW}$1${NC}"
}
# Check if we're in a git repository
if ! git rev-parse --git-dir > /dev/null 2>&1; then
error "Not in a git repository"
fi
# Check for uncommitted changes
if [[ -n $(git status -s) ]]; then
error "You have uncommitted changes. Please commit or stash them first."
fi
# Check if on correct branch
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [[ "$CURRENT_BRANCH" != "main" && "$CURRENT_BRANCH" != "development" ]]; then
warning "You are on branch '$CURRENT_BRANCH'. Releases are typically from 'main' or 'development'."
read -p "Continue anyway? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 0
fi
fi
# Get current version from .version file
CURRENT_VERSION=$(cat .version 2>/dev/null || echo "0.0.0")
echo "Current version: $CURRENT_VERSION"
# Prompt for new version
echo ""
echo "Enter new version (e.g., 1.0.0, 1.0.0-beta.1, 1.0.0-rc.1):"
read -r NEW_VERSION
# Validate semantic version format
if ! [[ "$NEW_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then
error "Invalid semantic version format. Expected: MAJOR.MINOR.PATCH[-PRERELEASE]"
fi
# Check if tag already exists
if git rev-parse "v$NEW_VERSION" >/dev/null 2>&1; then
error "Tag v$NEW_VERSION already exists"
fi
# Update .version file
echo "$NEW_VERSION" > .version
success "Updated .version to $NEW_VERSION"
# Commit version bump
git add .version
git commit -m "chore: bump version to $NEW_VERSION"
success "Committed version bump"
# Create annotated tag
git tag -a "v$NEW_VERSION" -m "Release v$NEW_VERSION"
success "Created tag v$NEW_VERSION"
# Show what will be pushed
echo ""
echo "Ready to push:"
echo " - Commit: $(git rev-parse HEAD)"
echo " - Tag: v$NEW_VERSION"
echo " - Branch: $CURRENT_BRANCH"
echo ""
# Confirm push
read -p "Push to remote? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
git push origin "$CURRENT_BRANCH"
git push origin "v$NEW_VERSION"
success "Pushed to remote!"
echo ""
success "Release workflow triggered!"
echo " - GitHub will create a release with changelog"
echo " - Docker images will be built and published"
echo " - View progress at: https://github.com/Wikid82/charon/actions"
else
warning "Not pushed. You can push later with:"
echo " git push origin $CURRENT_BRANCH"
echo " git push origin v$NEW_VERSION"
fi

View File

@@ -1,70 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Repo health check script
# Exits 0 when everything is OK, non-zero otherwise.
MAX_MB=${MAX_MB-100} # threshold in MB for detecting large files
LFS_ALLOW_MB=${LFS_ALLOW_MB-50} # threshold for LFS requirement
echo "Running repo health checks..."
echo "Repository path: $(pwd)"
# Git object/pack stats
echo "-- Git pack stats --"
git count-objects -vH || true
# Disk usage for repository (human & bytes)
echo "-- Disk usage (top-level) --"
du -sh . || true
du -sb . | awk '{print "Total bytes:", $1}' || true
echo "-- Largest files (>${MAX_MB}MB) --"
find . -type f -size +"${MAX_MB}"M -not -path "./.git/*" -print -exec du -h {} + | sort -hr | head -n 50 > /tmp/repo_big_files.txt || true
if [ -s /tmp/repo_big_files.txt ]; then
echo "Large files found:"
cat /tmp/repo_big_files.txt
else
echo "No large files found (> ${MAX_MB}MB)"
fi
echo "-- CodeQL DB directories present? --"
if [ -d "codeql-db" ] || ls codeql-db-* >/dev/null 2>&1; then
echo "Found codeql-db directories. These should not be committed." >&2
exit 2
else
echo "No codeql-db directories found in repo root. OK"
fi
echo "-- Detect files > ${LFS_ALLOW_MB}MB not using Git LFS --"
FAILED=0
# Use NUL-separated find results to safely handle filenames with spaces/newlines
found_big_files=0
while IFS= read -r -d '' f; do
found_big_files=1
# check if file path is tracked by LFS
if git ls-files --stage -- "${f}" >/dev/null 2>&1; then
# check attr filter value
filter_attr=$(git check-attr --stdin filter <<<"${f}" | awk '{print $3}') || true
if [ "$filter_attr" != "lfs" ]; then
echo "Large file not tracked by Git LFS: ${f}" >&2
FAILED=1
fi
else
# file not in git index yet, still flagged to maintainers
echo "Large untracked file (in working tree): ${f}" >&2
FAILED=1
fi
done < <(find . -type f -size +"${LFS_ALLOW_MB}"M -not -path "./.git/*" -print0)
if [ "$found_big_files" -eq 0 ]; then
echo "No files larger than ${LFS_ALLOW_MB}MB found"
fi
if [ $FAILED -ne 0 ]; then
echo "Repository health check failed: Large files not tracked by LFS or codeql-db committed." >&2
exit 3
fi
echo "Repo health check complete: OK"
exit 0

View File

@@ -1,71 +0,0 @@
#!/bin/bash
# Local security scanning script for pre-commit
# Scans Go dependencies for vulnerabilities using govulncheck (fast, no Docker needed)
# For full Trivy scans, run: make security-scan-full
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Get script directory and repo root
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
echo "🔒 Running local security scan..."
# Check if govulncheck is installed
if ! command -v govulncheck &> /dev/null; then
echo -e "${YELLOW}Installing govulncheck...${NC}"
go install golang.org/x/vuln/cmd/govulncheck@latest
fi
# Run govulncheck on backend Go code
echo "📦 Scanning Go dependencies for vulnerabilities..."
cd "$REPO_ROOT/backend"
# Run govulncheck and capture output
VULN_OUTPUT=$(govulncheck ./... 2>&1) || true
# Check for actual vulnerabilities (not just "No vulnerabilities found")
if echo "$VULN_OUTPUT" | grep -q "Vulnerability"; then
echo -e "${RED}❌ Vulnerabilities found in Go dependencies:${NC}"
echo "$VULN_OUTPUT"
# Count HIGH/CRITICAL vulnerabilities
HIGH_COUNT=$(echo "$VULN_OUTPUT" | grep -c "Severity: HIGH\|CRITICAL" || true)
if [ "$HIGH_COUNT" -gt 0 ]; then
echo -e "${RED}Found $HIGH_COUNT HIGH/CRITICAL vulnerabilities. Please fix before committing.${NC}"
exit 1
else
echo -e "${YELLOW}⚠️ Found vulnerabilities, but none are HIGH/CRITICAL. Consider fixing.${NC}"
# Don't fail for lower severity - just warn
fi
else
echo -e "${GREEN}✅ No known vulnerabilities in Go dependencies${NC}"
fi
cd "$REPO_ROOT"
# Check for outdated dependencies with known CVEs (quick check)
echo ""
echo "📋 Checking for outdated security-sensitive packages..."
# Check key packages - only show those with updates available (indicated by [...])
cd "$REPO_ROOT/backend"
OUTDATED=$(go list -m -u all 2>/dev/null | grep -E "(crypto|net|quic)" | grep '\[' | head -10 || true)
if [ -n "$OUTDATED" ]; then
echo -e "${YELLOW}⚠️ Outdated packages found:${NC}"
echo "$OUTDATED"
else
echo -e "${GREEN}All security-sensitive packages are up to date${NC}"
fi
cd "$REPO_ROOT"
echo ""
echo -e "${GREEN}✅ Security scan complete${NC}"
echo ""
echo "💡 For a full container scan, run: make security-scan-full"

View File

@@ -1,223 +0,0 @@
#!/bin/bash
# E2E Test Environment Setup Script
# Sets up the local environment for running Playwright E2E tests
#
# Usage: ./scripts/setup-e2e-env.sh
#
# This script:
# 1. Checks prerequisites (docker, node, npx)
# 2. Installs npm dependencies
# 3. Installs Playwright browsers (chromium only for speed)
# 4. Creates .env.test if not exists
# 5. Starts the Docker test environment
# 6. Waits for health check
# 7. Outputs success message with URLs
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
COMPOSE_FILE=".docker/compose/docker-compose.test.yml"
HEALTH_URL="http://localhost:8080/api/v1/health"
HEALTH_TIMEOUT=60
# Get script directory and project root
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# Change to project root
cd "${PROJECT_ROOT}"
echo -e "${BLUE}🚀 Setting up E2E test environment...${NC}"
echo ""
# Function to check if a command exists
check_command() {
local cmd="$1"
local name="${2:-$1}"
if command -v "${cmd}" >/dev/null 2>&1; then
echo -e " ${GREEN}${NC} ${name} found: $(command -v "${cmd}")"
return 0
else
echo -e " ${RED}${NC} ${name} not found"
return 1
fi
}
# Function to wait for health check
wait_for_health() {
local url="$1"
local timeout="$2"
local start_time
start_time=$(date +%s)
echo -e "${BLUE}⏳ Waiting for service to be healthy (timeout: ${timeout}s)...${NC}"
while true; do
local current_time
current_time=$(date +%s)
local elapsed=$((current_time - start_time))
if [[ ${elapsed} -ge ${timeout} ]]; then
echo -e "${RED}❌ Health check timed out after ${timeout}s${NC}"
echo ""
echo "Container logs:"
docker compose -f "${COMPOSE_FILE}" logs --tail=50
return 1
fi
if curl -sf "${url}" >/dev/null 2>&1; then
echo -e "${GREEN}✅ Service is healthy!${NC}"
return 0
fi
printf " Checking... (%ds elapsed)\r" "${elapsed}"
sleep 2
done
}
# Step 1: Check prerequisites
echo -e "${BLUE}📋 Step 1: Checking prerequisites...${NC}"
PREREQS_OK=true
if ! check_command "docker" "Docker"; then
PREREQS_OK=false
fi
if ! check_command "node" "Node.js"; then
PREREQS_OK=false
else
NODE_VERSION=$(node --version)
echo -e " Version: ${NODE_VERSION}"
fi
if ! check_command "npx" "npx"; then
PREREQS_OK=false
fi
if ! check_command "npm" "npm"; then
PREREQS_OK=false
fi
if [[ "${PREREQS_OK}" != "true" ]]; then
echo ""
echo -e "${RED}❌ Prerequisites check failed. Please install missing dependencies.${NC}"
exit 1
fi
# Check Docker daemon is running
if ! docker info >/dev/null 2>&1; then
echo -e "${RED}❌ Docker daemon is not running. Please start Docker.${NC}"
exit 1
fi
echo -e " ${GREEN}${NC} Docker daemon is running"
echo ""
# Step 2: Install npm dependencies
echo -e "${BLUE}📦 Step 2: Installing npm dependencies...${NC}"
npm ci --silent
echo -e "${GREEN}✅ Dependencies installed${NC}"
echo ""
# Step 3: Install Playwright browsers
echo -e "${BLUE}🎭 Step 3: Installing Playwright browsers (chromium only)...${NC}"
npx playwright install chromium --with-deps
echo -e "${GREEN}✅ Playwright browsers installed${NC}"
echo ""
# Step 4: Create .env.test if not exists
echo -e "${BLUE}📝 Step 4: Setting up environment configuration...${NC}"
ENV_TEST_FILE=".env.test"
if [[ ! -f "${ENV_TEST_FILE}" ]]; then
if [[ -f ".env.test.example" ]]; then
cp ".env.test.example" "${ENV_TEST_FILE}"
echo -e " ${GREEN}${NC} Created ${ENV_TEST_FILE} from .env.test.example"
else
# Create minimal .env.test
cat > "${ENV_TEST_FILE}" <<EOF
# E2E Test Environment Configuration
# Generated by setup-e2e-env.sh
NODE_ENV=test
DATABASE_URL=sqlite:./data/charon_test.db
BASE_URL=http://localhost:8080
PLAYWRIGHT_BASE_URL=http://localhost:8080
TEST_USER_EMAIL=test-admin@charon.local
TEST_USER_PASSWORD=TestPassword123!
DOCKER_HOST=unix:///var/run/docker.sock
ENABLE_CROWDSEC=false
ENABLE_WAF=false
LOG_LEVEL=warn
EOF
echo -e " ${GREEN}${NC} Created ${ENV_TEST_FILE} with default values"
fi
else
echo -e " ${YELLOW}${NC} ${ENV_TEST_FILE} already exists, skipping"
fi
# Check for encryption key
if [[ -z "${CHARON_ENCRYPTION_KEY:-}" ]]; then
if ! grep -q "CHARON_ENCRYPTION_KEY" "${ENV_TEST_FILE}" 2>/dev/null; then
# Generate a random encryption key for testing
RANDOM_KEY=$(openssl rand -base64 32 2>/dev/null || head -c 32 /dev/urandom | base64)
echo "CHARON_ENCRYPTION_KEY=${RANDOM_KEY}" >> "${ENV_TEST_FILE}"
echo -e " ${GREEN}${NC} Generated test encryption key"
fi
fi
echo ""
# Step 5: Start Docker test environment
echo -e "${BLUE}🐳 Step 5: Starting Docker test environment...${NC}"
# Stop any existing containers first
if docker compose -f "${COMPOSE_FILE}" ps -q 2>/dev/null | grep -q .; then
echo " Stopping existing containers..."
docker compose -f "${COMPOSE_FILE}" down --volumes --remove-orphans 2>/dev/null || true
fi
# Build and start
echo " Building and starting containers..."
if [[ -f "${ENV_TEST_FILE}" ]]; then
# shellcheck source=/dev/null
set -a
source "${ENV_TEST_FILE}"
set +a
fi
docker compose -f "${COMPOSE_FILE}" up -d --build
echo -e "${GREEN}✅ Docker containers started${NC}"
echo ""
# Step 6: Wait for health check
wait_for_health "${HEALTH_URL}" "${HEALTH_TIMEOUT}"
echo ""
# Step 7: Success message
echo -e "${GREEN}════════════════════════════════════════════════════════════${NC}"
echo -e "${GREEN}✅ E2E test environment is ready!${NC}"
echo -e "${GREEN}════════════════════════════════════════════════════════════${NC}"
echo ""
echo -e " ${BLUE}📍 Application:${NC} http://localhost:8080"
echo -e " ${BLUE}📍 Health Check:${NC} http://localhost:8080/api/v1/health"
echo ""
echo -e " ${BLUE}🧪 Run tests:${NC}"
echo " npm run test:e2e # All tests"
echo " npx playwright test --project=chromium # Chromium only"
echo " npx playwright test --ui # Interactive UI mode"
echo ""
echo -e " ${BLUE}🛑 Stop environment:${NC}"
echo " docker compose -f ${COMPOSE_FILE} down"
echo ""
echo -e " ${BLUE}📋 View logs:${NC}"
echo " docker compose -f ${COMPOSE_FILE} logs -f"
echo ""

View File

@@ -1,29 +0,0 @@
#!/bin/bash
set -e
# ⚠️ DEPRECATED: This script is deprecated and will be removed in v2.0.0
# Please use: .github/skills/scripts/skill-runner.sh security-scan-trivy
# For more info: docs/AGENT_SKILLS_MIGRATION.md
echo "⚠️ WARNING: This script is deprecated and will be removed in v2.0.0" >&2
echo " Please use: .github/skills/scripts/skill-runner.sh security-scan-trivy" >&2
echo " For more info: docs/AGENT_SKILLS_MIGRATION.md" >&2
echo "" >&2
sleep 1
# Build the local image first to ensure it's up to date
echo "Building charon:local..."
docker build -t charon:local .
# Run Trivy scan
echo "Running Trivy scan on charon:local..."
docker run --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $HOME/.cache/trivy:/root/.cache/trivy \
-v $(pwd)/.trivy_logs:/logs \
aquasec/trivy:latest image \
--severity CRITICAL,HIGH \
--output /logs/trivy-report.txt \
charon:local
echo "Scan complete. Report saved to .trivy_logs/trivy-report.txt"
cat .trivy_logs/trivy-report.txt

View File

@@ -1,69 +0,0 @@
#!/bin/bash
# Validates E2E authentication setup for TestDataManager
set -eo pipefail
echo "=== E2E Authentication Validation ==="
# Check 0: Verify required dependencies
if ! command -v jq &> /dev/null; then
echo "❌ jq is required but not installed."
echo " Install with: brew install jq (macOS) or apt-get install jq (Linux)"
exit 1
fi
echo "✅ jq is installed"
# Check 1: Verify PLAYWRIGHT_BASE_URL uses localhost
if [[ -n "$PLAYWRIGHT_BASE_URL" && "$PLAYWRIGHT_BASE_URL" != *"localhost"* ]]; then
echo "❌ PLAYWRIGHT_BASE_URL ($PLAYWRIGHT_BASE_URL) does not use localhost"
echo " Fix: export PLAYWRIGHT_BASE_URL=http://localhost:8080"
exit 1
fi
echo "✅ PLAYWRIGHT_BASE_URL is localhost or unset (defaults to localhost)"
# Check 2: Verify Docker container is running
if ! docker ps | grep -q charon-e2e; then
echo "⚠️ charon-e2e container not running. Starting..."
docker compose -f .docker/compose/docker-compose.e2e.yml up -d
echo "Waiting for container health..."
sleep 10
fi
echo "✅ charon-e2e container is running"
# Check 3: Verify API is accessible at localhost:8080
if ! curl -sf http://localhost:8080/api/v1/health > /dev/null; then
echo "❌ API not accessible at http://localhost:8080"
exit 1
fi
echo "✅ API accessible at localhost:8080"
# Check 4: Run auth setup and verify cookie domain
echo ""
echo "Running auth setup..."
if ! npx playwright test --project=setup; then
echo "❌ Auth setup failed"
exit 1
fi
# Check 5: Verify stored cookie domain
AUTH_FILE="playwright/.auth/user.json"
if [[ -f "$AUTH_FILE" ]]; then
COOKIE_DOMAIN=$(jq -r '.cookies[] | select(.name=="auth_token") | .domain // empty' "$AUTH_FILE" 2>/dev/null || echo "")
if [[ -z "$COOKIE_DOMAIN" ]]; then
echo "❌ No auth_token cookie found in $AUTH_FILE"
exit 1
elif [[ "$COOKIE_DOMAIN" == "localhost" || "$COOKIE_DOMAIN" == ".localhost" ]]; then
echo "✅ Auth cookie domain is localhost"
else
echo "❌ Auth cookie domain is '$COOKIE_DOMAIN' (expected 'localhost')"
exit 1
fi
else
echo "❌ Auth state file not found at $AUTH_FILE"
exit 1
fi
echo ""
echo "=== All validation checks passed ==="
echo "You can now run the user management tests:"
echo " npx playwright test tests/settings/user-management.spec.ts --project=chromium"

View File

@@ -1,99 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Verification script for CrowdSec app-level configuration
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
echo "=== CrowdSec App-Level Configuration Verification ==="
echo ""
# Step 1: Verify backend tests pass
echo "1. Running backend tests..."
cd backend
if go test ./internal/caddy/... -run "CrowdSec" -v; then
echo "✅ All CrowdSec tests pass"
else
echo "❌ CrowdSec tests failed"
exit 1
fi
echo ""
echo "2. Checking generated config structure..."
# Create a simple test Go program to generate config
cat > /tmp/test_crowdsec_config.go << 'EOF'
package main
import (
"encoding/json"
"fmt"
"os"
)
func main() {
// Minimal test: verify CrowdSecApp struct exists and marshals correctly
type CrowdSecApp struct {
APIUrl string `json:"api_url"`
APIKey string `json:"api_key"`
TickerInterval string `json:"ticker_interval,omitempty"`
EnableStreaming *bool `json:"enable_streaming,omitempty"`
}
enableStreaming := true
app := CrowdSecApp{
APIUrl: "http://127.0.0.1:8085",
APIKey: "test-key",
TickerInterval: "60s",
EnableStreaming: &enableStreaming,
}
data, err := json.MarshalIndent(app, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to marshal: %v\n", err)
os.Exit(1)
}
fmt.Println(string(data))
// Verify it has all required fields
var parsed map[string]interface{}
if err := json.Unmarshal(data, &parsed); err != nil {
fmt.Fprintf(os.Stderr, "Failed to unmarshal: %v\n", err)
os.Exit(1)
}
required := []string{"api_url", "api_key", "ticker_interval", "enable_streaming"}
for _, field := range required {
if _, ok := parsed[field]; !ok {
fmt.Fprintf(os.Stderr, "Missing required field: %s\n", field)
os.Exit(1)
}
}
fmt.Println("\n✅ CrowdSecApp structure is valid")
}
EOF
if go run /tmp/test_crowdsec_config.go; then
echo "✅ CrowdSecApp struct marshals correctly"
else
echo "❌ CrowdSecApp struct validation failed"
exit 1
fi
echo ""
echo "3. Summary:"
echo "✅ App-level CrowdSec configuration implementation is complete"
echo "✅ Handler is minimal (just {\"handler\": \"crowdsec\"})"
echo "✅ Config is populated in apps.crowdsec section"
echo ""
echo "Next steps to verify in running container:"
echo " 1. Enable CrowdSec in Security dashboard"
echo " 2. Check Caddy config: docker exec charon curl http://localhost:2019/config/ | jq '.apps.crowdsec'"
echo " 3. Check handler: docker exec charon curl http://localhost:2019/config/ | jq '.apps.http.servers[].routes[].handle[] | select(.handler == \"crowdsec\")'"
echo " 4. Test blocking: docker exec charon cscli decisions add --ip 10.255.255.250 --duration 5m"
echo " 5. Verify: curl -H 'X-Forwarded-For: 10.255.255.250' http://localhost/"
cd "$PROJECT_ROOT"

View File

@@ -1,569 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Brief: Integration test for WAF (Coraza) functionality
# Steps:
# 1. Build the local image if not present: docker build -t charon:local .
# 2. Start Charon container with Cerberus/WAF features enabled
# 3. Start httpbin as backend for proxy testing
# 4. Create test user and authenticate
# 5. Create proxy host pointing to backend
# 6. Test WAF ruleset creation (XSS, SQLi)
# 7. Test WAF blocking mode (expect HTTP 403 for attacks)
# 8. Test legitimate requests pass through (HTTP 200)
# 9. Test monitor mode (attacks pass with HTTP 200)
# 10. Verify Caddy config has WAF handler
# 11. Clean up test resources
# Ensure we operate from repo root
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$PROJECT_ROOT"
# ============================================================================
# Configuration
# ============================================================================
CONTAINER_NAME="charon-waf-test"
BACKEND_CONTAINER="waf-backend"
TEST_DOMAIN="waf.test.local"
# Use unique non-conflicting ports
API_PORT=8380
HTTP_PORT=8180
HTTPS_PORT=8143
CADDY_ADMIN_PORT=2119
# ============================================================================
# Colors for output
# ============================================================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_test() { echo -e "${BLUE}[TEST]${NC} $1"; }
# ============================================================================
# Test counters
# ============================================================================
PASSED=0
FAILED=0
pass_test() {
PASSED=$((PASSED + 1))
echo -e " ${GREEN}✓ PASS${NC}"
}
fail_test() {
FAILED=$((FAILED + 1))
echo -e " ${RED}✗ FAIL${NC}: $1"
}
# Assert HTTP status code
assert_http() {
local expected=$1
local actual=$2
local desc=$3
if [ "$actual" = "$expected" ]; then
log_info "$desc: HTTP $actual"
PASSED=$((PASSED + 1))
else
log_error "$desc: HTTP $actual (expected $expected)"
FAILED=$((FAILED + 1))
fi
}
# ============================================================================
# Helper Functions
# ============================================================================
# Dumps debug information on failure
on_failure() {
local exit_code=$?
echo ""
echo "=============================================="
echo "=== FAILURE DEBUG INFO (exit code: $exit_code) ==="
echo "=============================================="
echo ""
echo "=== Charon API Logs (last 150 lines) ==="
docker logs ${CONTAINER_NAME} 2>&1 | tail -150 || echo "Could not retrieve container logs"
echo ""
echo "=== Caddy Admin API Config ==="
curl -sL "http://localhost:${CADDY_ADMIN_PORT}/config/" 2>/dev/null | head -300 || echo "Could not retrieve Caddy config"
echo ""
echo "=== Security Config in API ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/security/config" 2>/dev/null || echo "Could not retrieve security config"
echo ""
echo "=== Security Rulesets ==="
curl -s -b "${TMP_COOKIE:-/dev/null}" "http://localhost:${API_PORT}/api/v1/security/rulesets" 2>/dev/null || echo "Could not retrieve rulesets"
echo ""
echo "=============================================="
echo "=== END DEBUG INFO ==="
echo "=============================================="
}
# Cleanup function
cleanup() {
log_info "Cleaning up test resources..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
rm -f "${TMP_COOKIE:-}" 2>/dev/null || true
log_info "Cleanup complete"
}
# Set up trap to dump debug info on any error and always cleanup
trap on_failure ERR
trap cleanup EXIT
echo "=============================================="
echo "=== WAF Integration Test Starting ==="
echo "=============================================="
echo ""
# Check dependencies
if ! command -v docker >/dev/null 2>&1; then
log_error "docker is not available; aborting"
exit 1
fi
if ! command -v curl >/dev/null 2>&1; then
log_error "curl is not available; aborting"
exit 1
fi
# ============================================================================
# Step 1: Build image if needed
# ============================================================================
if ! docker image inspect charon:local >/dev/null 2>&1; then
log_info "Building charon:local image..."
docker build -t charon:local .
else
log_info "Using existing charon:local image"
fi
# ============================================================================
# Step 2: Start containers
# ============================================================================
log_info "Stopping any existing test containers..."
docker rm -f ${CONTAINER_NAME} 2>/dev/null || true
docker rm -f ${BACKEND_CONTAINER} 2>/dev/null || true
# Ensure network exists
if ! docker network inspect containers_default >/dev/null 2>&1; then
log_info "Creating containers_default network..."
docker network create containers_default
fi
log_info "Starting httpbin backend container..."
docker run -d --name ${BACKEND_CONTAINER} --network containers_default kennethreitz/httpbin
log_info "Starting Charon container with Cerberus enabled..."
docker run -d --name ${CONTAINER_NAME} \
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--network containers_default \
-p ${HTTP_PORT}:80 -p ${HTTPS_PORT}:443 -p ${API_PORT}:8080 -p ${CADDY_ADMIN_PORT}:2019 \
-e CHARON_ENV=development \
-e CHARON_DEBUG=1 \
-e CHARON_HTTP_PORT=8080 \
-e CHARON_DB_PATH=/app/data/charon.db \
-e CHARON_FRONTEND_DIR=/app/frontend/dist \
-e CHARON_CADDY_ADMIN_API=http://localhost:2019 \
-e CHARON_CADDY_CONFIG_DIR=/app/data/caddy \
-e CHARON_CADDY_BINARY=caddy \
-e CERBERUS_SECURITY_CERBERUS_ENABLED=true \
-e CHARON_SECURITY_WAF_MODE=block \
-v charon_waf_test_data:/app/data \
-v caddy_waf_test_data:/data \
-v caddy_waf_test_config:/config \
charon:local
log_info "Waiting for Charon API to be ready..."
for i in {1..30}; do
if curl -s -f "http://localhost:${API_PORT}/api/v1/health" >/dev/null 2>&1; then
log_info "Charon API is ready"
break
fi
if [ $i -eq 30 ]; then
log_error "Charon API failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
echo ""
log_info "Waiting for httpbin backend to be ready..."
for i in {1..20}; do
if docker exec ${CONTAINER_NAME} sh -c "curl -sf http://${BACKEND_CONTAINER}/get" >/dev/null 2>&1; then
log_info "httpbin backend is ready"
break
fi
if [ $i -eq 20 ]; then
log_error "httpbin backend failed to start"
exit 1
fi
echo -n '.'
sleep 1
done
echo ""
# ============================================================================
# Step 3: Register user and authenticate
# ============================================================================
log_info "Registering admin user and logging in..."
TMP_COOKIE=$(mktemp)
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"waf-test@example.local","password":"password123","name":"WAF Tester"}' \
"http://localhost:${API_PORT}/api/v1/auth/register" >/dev/null 2>&1 || true
curl -s -X POST -H "Content-Type: application/json" \
-d '{"email":"waf-test@example.local","password":"password123"}' \
-c "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/auth/login" >/dev/null
log_info "Authentication complete"
# ============================================================================
# Step 4: Create proxy host
# ============================================================================
log_info "Creating proxy host '${TEST_DOMAIN}' pointing to backend..."
PROXY_HOST_PAYLOAD=$(cat <<EOF
{
"name": "waf-test-backend",
"domain_names": "${TEST_DOMAIN}",
"forward_scheme": "http",
"forward_host": "${BACKEND_CONTAINER}",
"forward_port": 80,
"enabled": true
}
EOF
)
CREATE_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${PROXY_HOST_PAYLOAD}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/proxy-hosts")
CREATE_STATUS=$(echo "$CREATE_RESP" | tail -n1)
if [ "$CREATE_STATUS" = "201" ]; then
log_info "Proxy host created successfully"
else
log_info "Proxy host may already exist (status: $CREATE_STATUS)"
fi
# Wait for Caddy to apply config
sleep 3
echo ""
echo "=============================================="
echo "=== Running WAF Test Cases ==="
echo "=============================================="
echo ""
# ============================================================================
# TC-1: Create XSS ruleset
# ============================================================================
log_test "TC-1: Create XSS Ruleset"
XSS_RULESET=$(cat <<'EOF'
{
"name": "test-xss",
"content": "SecRule REQUEST_BODY|ARGS|ARGS_NAMES \"<script\" \"id:12345,phase:2,deny,status:403,msg:'XSS Attack Detected'\""
}
EOF
)
XSS_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${XSS_RULESET}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/rulesets")
XSS_STATUS=$(echo "$XSS_RESP" | tail -n1)
if [ "$XSS_STATUS" = "200" ] || [ "$XSS_STATUS" = "201" ]; then
log_info " XSS ruleset created"
pass_test
else
fail_test "Failed to create XSS ruleset (HTTP $XSS_STATUS)"
fi
# ============================================================================
# TC-2: Enable WAF in block mode
# ============================================================================
log_test "TC-2: Enable WAF (Block Mode)"
WAF_CONFIG=$(cat <<'EOF'
{
"name": "default",
"enabled": true,
"waf_mode": "block",
"waf_rules_source": "test-xss",
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
WAF_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${WAF_CONFIG}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/config")
WAF_STATUS=$(echo "$WAF_RESP" | tail -n1)
if [ "$WAF_STATUS" = "200" ]; then
log_info " WAF enabled in block mode with test-xss ruleset"
pass_test
else
fail_test "Failed to enable WAF (HTTP $WAF_STATUS)"
fi
# Wait for Caddy to reload with WAF config
log_info "Waiting for Caddy to apply WAF configuration..."
sleep 5
# ============================================================================
# TC-3: Test XSS blocking (expect HTTP 403)
# ============================================================================
log_test "TC-3: XSS Blocking (expect HTTP 403)"
# Test XSS in POST body
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
-d "<script>alert(1)</script>" \
"http://localhost:${HTTP_PORT}/post")
assert_http "403" "$RESP" "XSS script tag (POST body)"
# Test XSS in query parameter
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?q=%3Cscript%3Ealert(1)%3C/script%3E")
assert_http "403" "$RESP" "XSS script tag (query param)"
# ============================================================================
# TC-4: Test legitimate request (expect HTTP 200)
# ============================================================================
log_test "TC-4: Legitimate Request (expect HTTP 200)"
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
-d "name=john&age=25" \
"http://localhost:${HTTP_PORT}/post")
assert_http "200" "$RESP" "Legitimate POST request"
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?name=john&age=25")
assert_http "200" "$RESP" "Legitimate GET request"
# ============================================================================
# TC-5: Switch to monitor mode, verify XSS passes (expect HTTP 200)
# ============================================================================
log_test "TC-5: Switch to Monitor Mode"
MONITOR_CONFIG=$(cat <<'EOF'
{
"name": "default",
"enabled": true,
"waf_mode": "monitor",
"waf_rules_source": "test-xss",
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
curl -s -X POST -H "Content-Type: application/json" \
-d "${MONITOR_CONFIG}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/config" >/dev/null
log_info " Switched to monitor mode, waiting for Caddy reload..."
sleep 5
# Verify XSS passes in monitor mode
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
-d "<script>alert(1)</script>" \
"http://localhost:${HTTP_PORT}/post")
assert_http "200" "$RESP" "XSS in monitor mode (allowed through)"
# ============================================================================
# TC-6: Create SQLi ruleset
# ============================================================================
log_test "TC-6: Create SQLi Ruleset"
SQLI_RULESET=$(cat <<'EOF'
{
"name": "test-sqli",
"content": "SecRule ARGS|ARGS_NAMES|REQUEST_BODY \"(?i:OR\\s+1\\s*=\\s*1|UNION\\s+SELECT)\" \"id:12346,phase:2,deny,status:403,msg:'SQL Injection Detected'\""
}
EOF
)
SQLI_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${SQLI_RULESET}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/rulesets")
SQLI_STATUS=$(echo "$SQLI_RESP" | tail -n1)
if [ "$SQLI_STATUS" = "200" ] || [ "$SQLI_STATUS" = "201" ]; then
log_info " SQLi ruleset created"
pass_test
else
fail_test "Failed to create SQLi ruleset (HTTP $SQLI_STATUS)"
fi
# ============================================================================
# TC-7: Enable SQLi ruleset in block mode, test SQLi blocking (expect HTTP 403)
# ============================================================================
log_test "TC-7: SQLi Blocking (expect HTTP 403)"
SQLI_CONFIG=$(cat <<'EOF'
{
"name": "default",
"enabled": true,
"waf_mode": "block",
"waf_rules_source": "test-sqli",
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
curl -s -X POST -H "Content-Type: application/json" \
-d "${SQLI_CONFIG}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/config" >/dev/null
log_info " Switched to SQLi ruleset in block mode, waiting for Caddy reload..."
sleep 5
# Test SQLi OR 1=1
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?id=1%20OR%201=1")
assert_http "403" "$RESP" "SQLi OR 1=1 (query param)"
# Test SQLi UNION SELECT
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?id=1%20UNION%20SELECT%20*%20FROM%20users")
assert_http "403" "$RESP" "SQLi UNION SELECT (query param)"
# ============================================================================
# TC-8: Create combined ruleset, test both attacks blocked
# ============================================================================
log_test "TC-8: Combined Ruleset (XSS + SQLi)"
COMBINED_RULESET=$(cat <<'EOF'
{
"name": "combined-protection",
"content": "SecRule ARGS|REQUEST_BODY \"(?i:OR\\s+1\\s*=\\s*1|UNION\\s+SELECT)\" \"id:20001,phase:2,deny,status:403,msg:'SQLi'\"\nSecRule ARGS|REQUEST_BODY \"<script\" \"id:20002,phase:2,deny,status:403,msg:'XSS'\""
}
EOF
)
COMBINED_RESP=$(curl -s -w "\n%{http_code}" -X POST -H "Content-Type: application/json" \
-d "${COMBINED_RULESET}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/rulesets")
COMBINED_STATUS=$(echo "$COMBINED_RESP" | tail -n1)
if [ "$COMBINED_STATUS" = "200" ] || [ "$COMBINED_STATUS" = "201" ]; then
log_info " Combined ruleset created"
PASSED=$((PASSED + 1))
else
fail_test "Failed to create combined ruleset (HTTP $COMBINED_STATUS)"
fi
# Enable combined ruleset
COMBINED_CONFIG=$(cat <<'EOF'
{
"name": "default",
"enabled": true,
"waf_mode": "block",
"waf_rules_source": "combined-protection",
"admin_whitelist": "0.0.0.0/0"
}
EOF
)
curl -s -X POST -H "Content-Type: application/json" \
-d "${COMBINED_CONFIG}" \
-b "${TMP_COOKIE}" \
"http://localhost:${API_PORT}/api/v1/security/config" >/dev/null
log_info " Switched to combined ruleset, waiting for Caddy reload..."
sleep 5
# Test both attacks blocked
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?id=1%20OR%201=1")
assert_http "403" "$RESP" "Combined - SQLi blocked"
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
-d "<script>alert(1)</script>" \
"http://localhost:${HTTP_PORT}/post")
assert_http "403" "$RESP" "Combined - XSS blocked"
# Test legitimate request still passes
RESP=$(curl -s -o /dev/null -w "%{http_code}" \
-H "Host: ${TEST_DOMAIN}" \
"http://localhost:${HTTP_PORT}/get?name=john&age=25")
assert_http "200" "$RESP" "Combined - Legitimate request passes"
# ============================================================================
# TC-9: Verify Caddy config has WAF handler
# ============================================================================
log_test "TC-9: Verify Caddy Config has WAF Handler"
# Note: Caddy admin API requires trailing slash, and -L follows redirects
CADDY_CONFIG=$(curl -sL "http://localhost:${CADDY_ADMIN_PORT}/config/" 2>/dev/null || echo "")
if echo "$CADDY_CONFIG" | grep -q '"handler":"waf"'; then
log_info " ✓ WAF handler found in Caddy config"
PASSED=$((PASSED + 1))
else
fail_test "WAF handler NOT found in Caddy config"
fi
if echo "$CADDY_CONFIG" | grep -q 'SecRuleEngine'; then
log_info " ✓ SecRuleEngine directive found"
PASSED=$((PASSED + 1))
else
log_warn " SecRuleEngine directive not found (may be in Include file)"
PASSED=$((PASSED + 1))
fi
# ============================================================================
# Results Summary
# ============================================================================
echo ""
echo "=============================================="
echo "=== WAF Integration Test Results ==="
echo "=============================================="
echo ""
echo -e " ${GREEN}Passed:${NC} $PASSED"
echo -e " ${RED}Failed:${NC} $FAILED"
echo ""
if [ $FAILED -eq 0 ]; then
echo "=============================================="
echo "=== All WAF tests passed ==="
echo "=============================================="
echo ""
exit 0
else
echo "=============================================="
echo "=== WAF TESTS FAILED ==="
echo "=============================================="
echo ""
exit 1
fi