diff --git a/.dockerignore b/.dockerignore
index 7210f97b..ec257925 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -46,6 +46,7 @@ backend/cmd/api/data/*.db
*.sqlite
*.sqlite3
cpm.db
+charon.db
# IDE
.vscode/
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
index c3d5c67d..e3ca3a43 100644
--- a/.github/copilot-instructions.md
+++ b/.github/copilot-instructions.md
@@ -1,4 +1,4 @@
-# CaddyProxyManager+ Copilot Instructions
+# Charon Copilot Instructions
## ๐จ CRITICAL ARCHITECTURE RULES ๐จ
- **Single Frontend Source**: All frontend code MUST reside in `frontend/`. NEVER create `backend/frontend/` or any other nested frontend directory.
@@ -7,7 +7,7 @@
## Big Picture
- `backend/cmd/api` loads config, opens SQLite, then hands off to `internal/server` where routes from `internal/api/routes` are registered.
-- `internal/config` respects `CPM_ENV`, `CPM_HTTP_PORT`, `CPM_DB_PATH`, `CPM_FRONTEND_DIR` and creates the `data/` directory; lean on these instead of hard-coded paths.
+- `internal/config` respects `CHARON_ENV`, `CHARON_HTTP_PORT`, `CHARON_DB_PATH`, `CHARON_FRONTEND_DIR` (CHARON_ preferred; CPM_ still supported) and creates the `data/` directory; lean on these instead of hard-coded paths.
- All HTTP endpoints live under `/api/v1/*`; keep new handlers inside `internal/api/handlers` and register them via `routes.Register` so `db.AutoMigrate` runs for their models.
- `internal/server` also mounts the built React app (via `attachFrontend`) whenever `frontend/dist` exists, falling back to JSON `{"error": ...}` for any `/api/*` misses.
- Persistent types live in `internal/models`; GORM auto-migrates them each boot, so evolve schemas there before touching handlers or the frontend.
@@ -41,9 +41,9 @@
- **Feature Documentation**: When adding new features, update `docs/features.md` to include the new capability. This is the canonical list of all features shown to users.
- **README**: The main `README.md` is a marketing/welcome page. Keep it brief with top features, quick start, and links to docs. All detailed documentation belongs in `docs/`.
- **Link Format**: Use GitHub Pages URLs for documentation links, not relative paths:
- - Docs: `https://wikid82.github.io/cpmp/` (index) or `https://wikid82.github.io/cpmp/features` (specific page, no `.md`)
- - Repo files (CONTRIBUTING, LICENSE): `https://github.com/Wikid82/cpmp/blob/main/CONTRIBUTING.md`
- - Issues/Discussions: `https://github.com/Wikid82/cpmp/issues` or `https://github.com/Wikid82/cpmp/discussions`
+ - Docs: `https://wikid82.github.io/charon/` (index) or `https://wikid82.github.io/charon/features` (specific page, no `.md`)
+ - Repo files (CONTRIBUTING, LICENSE): `https://github.com/Wikid82/charon/blob/main/CONTRIBUTING.md`
+ - Issues/Discussions: `https://github.com/Wikid82/charon/issues` or `https://github.com/Wikid82/charon/discussions`
## CI/CD & Commit Conventions
- **Docker Builds**: The `docker-publish` workflow skips builds for commits starting with `chore:`.
diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml
new file mode 100644
index 00000000..85ff1f0f
--- /dev/null
+++ b/.github/release-drafter.yml
@@ -0,0 +1,26 @@
+name-template: 'v$NEXT_PATCH_VERSION'
+tag-template: 'v$NEXT_PATCH_VERSION'
+categories:
+ - title: '๐ Features'
+ labels:
+ - 'feature'
+ - 'feat'
+ - title: '๐ Fixes'
+ labels:
+ - 'bug'
+ - 'fix'
+ - title: '๐งฐ Maintenance'
+ labels:
+ - 'chore'
+ - title: '๐งช Tests'
+ labels:
+ - 'test'
+change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
+template: |
+ ## What's Changed
+
+ $CHANGES
+
+ ----
+
+ Full Changelog: https://github.com/${{ github.repository }}/compare/$FROM_TAG...$TO_TAG
diff --git a/.github/workflows/auto-changelog.yml b/.github/workflows/auto-changelog.yml
new file mode 100644
index 00000000..0f7cf602
--- /dev/null
+++ b/.github/workflows/auto-changelog.yml
@@ -0,0 +1,17 @@
+name: Auto Changelog (Release Drafter)
+
+on:
+ push:
+ branches: [ main ]
+ release:
+ types: [published]
+
+jobs:
+ update-draft:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Draft Release
+ uses: release-drafter/release-drafter@v5
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/auto-versioning.yml b/.github/workflows/auto-versioning.yml
new file mode 100644
index 00000000..781e4640
--- /dev/null
+++ b/.github/workflows/auto-versioning.yml
@@ -0,0 +1,53 @@
+name: Auto Versioning and Release
+
+on:
+ push:
+ branches: [ main ]
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ version:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Generate semantic version (fallback script)
+ id: semver
+ run: |
+ # Ensure git tags are fetched
+ git fetch --tags --quiet || true
+ # Get latest tag or default to v0.0.0
+ TAG=$(git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0")
+ echo "Detected latest tag: $TAG"
+ # Set outputs for downstream steps
+ echo "version=$TAG" >> $GITHUB_OUTPUT
+ echo "release_notes=Fallback: using latest tag only" >> $GITHUB_OUTPUT
+ echo "changed=false" >> $GITHUB_OUTPUT
+
+ - name: Show version
+ run: |
+ echo "Next version: ${{ steps.semver.outputs.version }}"
+
+ - name: Create annotated tag and push
+ if: ${{ steps.semver.outputs.changed }}
+ run: |
+ git tag -a v${{ steps.semver.outputs.version }} -m "Release v${{ steps.semver.outputs.version }}"
+ git push origin --tags
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Create GitHub Release (tag-only, no workspace changes)
+ if: ${{ steps.semver.outputs.changed }}
+ uses: softprops/action-gh-release@v1
+ with:
+ tag_name: ${{ steps.semver.outputs.version }}
+ name: Release ${{ steps.semver.outputs.version }}
+ body: ${{ steps.semver.outputs.release_notes }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 0fe949b2..e9dcd739 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -17,7 +17,7 @@ on:
env:
REGISTRY: ghcr.io
- IMAGE_NAME: ${{ github.repository_owner }}/cpmp
+ IMAGE_NAME: ${{ github.repository_owner }}/charon
jobs:
build-and-push:
@@ -83,13 +83,24 @@ jobs:
DIGEST=$(docker inspect --format='{{index .RepoDigests 0}}' caddy:2-alpine)
echo "image=$DIGEST" >> $GITHUB_OUTPUT
+ - name: Choose Registry Token
+ if: github.event_name != 'pull_request' && steps.skip.outputs.skip_build != 'true'
+ run: |
+ if [ -n "${{ secrets.CHARON_TOKEN }}" ]; then
+ echo "Using CHARON_TOKEN" >&2
+ echo "REGISTRY_PASSWORD=${{ secrets.CHARON_TOKEN }}" >> $GITHUB_ENV
+ else
+ echo "Using CPMP_TOKEN fallback" >&2
+ echo "REGISTRY_PASSWORD=${{ secrets.CPMP_TOKEN }}" >> $GITHUB_ENV
+ fi
+
- name: Log in to Container Registry
if: github.event_name != 'pull_request' && steps.skip.outputs.skip_build != 'true'
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
- password: ${{ secrets.CPMP_TOKEN }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Extract metadata (tags, labels)
if: steps.skip.outputs.skip_build != 'true'
@@ -201,31 +212,41 @@ jobs:
echo "tag=sha-$(echo ${{ github.sha }} | cut -c1-7)" >> $GITHUB_OUTPUT
fi
+ - name: Choose Registry Token
+ run: |
+ if [ -n "${{ secrets.CHARON_TOKEN }}" ]; then
+ echo "Using CHARON_TOKEN" >&2
+ echo "REGISTRY_PASSWORD=${{ secrets.CHARON_TOKEN }}" >> $GITHUB_ENV
+ else
+ echo "Using CPMP_TOKEN fallback" >&2
+ echo "REGISTRY_PASSWORD=${{ secrets.CPMP_TOKEN }}" >> $GITHUB_ENV
+ fi
+
- name: Log in to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
- password: ${{ secrets.CPMP_TOKEN }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Pull Docker image
run: docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
- name: Create Docker Network
- run: docker network create cpmp-test-net
+ run: docker network create charon-test-net
- name: Run Upstream Service (whoami)
run: |
docker run -d \
--name whoami \
- --network cpmp-test-net \
+ --network charon-test-net \
traefik/whoami
- - name: Run CPMP Container
+ - name: Run Charon Container
run: |
docker run -d \
--name test-container \
- --network cpmp-test-net \
+ --network charon-test-net \
-p 8080:8080 \
-p 80:80 \
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
@@ -242,7 +263,7 @@ jobs:
run: |
docker stop test-container whoami || true
docker rm test-container whoami || true
- docker network rm cpmp-test-net || true
+ docker network rm charon-test-net || true
- name: Create test summary
if: always()
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index e35d1511..be3672be 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -54,7 +54,7 @@ jobs:
+
+
package caddy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+)
+
+// Client wraps the Caddy admin API.
+type Client struct {
+ baseURL string
+ httpClient *http.Client
+}
+
+// NewClient creates a Caddy API client.
+func NewClient(adminAPIURL string) *Client {
+ return &Client{
+ baseURL: adminAPIURL,
+ httpClient: &http.Client{
+ Timeout: 30 * time.Second,
+ },
+ }
+}
+
+// Load atomically replaces Caddy's entire configuration.
+// This is the primary method for applying configuration changes.
+func (c *Client) Load(ctx context.Context, config *Config) error {
+ body, err := json.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("marshal config: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.baseURL+"/load", bytes.NewReader(body))
+ if err != nil {
+ return fmt.Errorf("create request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ bodyBytes, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes))
+ }
+
+ return nil
+}
+
+// GetConfig retrieves the current running configuration from Caddy.
+func (c *Client) GetConfig(ctx context.Context) (*Config, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil)
+ if err != nil {
+ return nil, fmt.Errorf("create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ bodyBytes, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes))
+ }
+
+ var config Config
+ if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
+ return nil, fmt.Errorf("decode response: %w", err)
+ }
+
+ return &config, nil
+}
+
+// Ping checks if Caddy admin API is reachable.
+func (c *Client) Ping(ctx context.Context) error {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil)
+ if err != nil {
+ return fmt.Errorf("create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("caddy unreachable: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("caddy returned status %d", resp.StatusCode)
+ }
+
+ return nil
+}
+
+
+
package caddy
+
+import (
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/Wikid82/charon/backend/internal/models"
+)
+
+// GenerateConfig creates a Caddy JSON configuration from proxy hosts.
+// This is the core transformation layer from our database model to Caddy config.
+func GenerateConfig(hosts []models.ProxyHost, storageDir string, acmeEmail string, frontendDir string, sslProvider string, acmeStaging bool) (*Config, error) {
+ // Define log file paths
+ // We assume storageDir is like ".../data/caddy/data", so we go up to ".../data/logs"
+ // storageDir is .../data/caddy/data
+ // Dir -> .../data/caddy
+ // Dir -> .../data
+ logDir := filepath.Join(filepath.Dir(filepath.Dir(storageDir)), "logs")
+ logFile := filepath.Join(logDir, "access.log")
+
+ config := &Config{
+ Logging: &LoggingConfig{
+ Logs: map[string]*LogConfig{
+ "access": {
+ Level: "INFO",
+ Writer: &WriterConfig{
+ Output: "file",
+ Filename: logFile,
+ Roll: true,
+ RollSize: 10, // 10 MB
+ RollKeep: 5, // Keep 5 files
+ RollKeepDays: 7, // Keep for 7 days
+ },
+ Encoder: &EncoderConfig{
+ Format: "json",
+ },
+ Include: []string{"http.log.access.access_log"},
+ },
+ },
+ },
+ Apps: Apps{
+ HTTP: &HTTPApp{
+ Servers: map[string]*Server{},
+ },
+ },
+ Storage: Storage{
+ System: "file_system",
+ Root: storageDir,
+ },
+ }
+
+ if acmeEmail != "" {
+ var issuers []interface{}
+
+ // Configure issuers based on provider preference
+ switch sslProvider {
+ case "letsencrypt":
+ acmeIssuer := map[string]interface{}{
+ "module": "acme",
+ "email": acmeEmail,
+ }
+ if acmeStaging {
+ acmeIssuer["ca"] = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ }
+ issuers = append(issuers, acmeIssuer)
+ case "zerossl":
+ issuers = append(issuers, map[string]interface{}{
+ "module": "zerossl",
+ })
+ default: // "both" or empty
+ acmeIssuer := map[string]interface{}{
+ "module": "acme",
+ "email": acmeEmail,
+ }
+ if acmeStaging {
+ acmeIssuer["ca"] = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ }
+ issuers = append(issuers, acmeIssuer)
+ issuers = append(issuers, map[string]interface{}{
+ "module": "zerossl",
+ })
+ }
+
+ config.Apps.TLS = &TLSApp{
+ Automation: &AutomationConfig{
+ Policies: []*AutomationPolicy{
+ {
+ IssuersRaw: issuers,
+ },
+ },
+ },
+ }
+ }
+
+ // Collect CUSTOM certificates only (not Let's Encrypt - those are managed by ACME)
+ // Only custom/uploaded certificates should be loaded via LoadPEM
+ customCerts := make(map[uint]models.SSLCertificate)
+ for _, host := range hosts {
+ if host.CertificateID != nil && host.Certificate != nil {
+ // Only include custom certificates, not ACME-managed ones
+ if host.Certificate.Provider == "custom" {
+ customCerts[*host.CertificateID] = *host.Certificate
+ }
+ }
+ }
+
+ if len(customCerts) > 0 {
+ var loadPEM []LoadPEMConfig
+ for _, cert := range customCerts {
+ // Validate that custom cert has both certificate and key
+ if cert.Certificate == "" || cert.PrivateKey == "" {
+ fmt.Printf("Warning: Custom certificate %s missing certificate or key, skipping\n", cert.Name)
+ continue
+ }
+ loadPEM = append(loadPEM, LoadPEMConfig{
+ Certificate: cert.Certificate,
+ Key: cert.PrivateKey,
+ Tags: []string{cert.UUID},
+ })
+ }
+
+ if len(loadPEM) > 0 {
+ if config.Apps.TLS == nil {
+ config.Apps.TLS = &TLSApp{}
+ }
+ config.Apps.TLS.Certificates = &CertificatesConfig{
+ LoadPEM: loadPEM,
+ }
+ }
+ }
+
+ if len(hosts) == 0 && frontendDir == "" {
+ return config, nil
+ }
+
+ // Initialize routes slice
+ routes := make([]*Route, 0)
+
+ // Track processed domains to prevent duplicates (Ghost Host fix)
+ processedDomains := make(map[string]bool)
+
+ // Sort hosts by UpdatedAt desc to prefer newer configs in case of duplicates
+ // Note: This assumes the input slice is already sorted or we don't care about order beyond duplicates
+ // The caller (ApplyConfig) fetches all hosts. We should probably sort them here or there.
+ // For now, we'll just process them. If we encounter a duplicate domain, we skip it.
+ // To ensure we keep the *latest* one, we should iterate in reverse or sort.
+ // But ApplyConfig uses db.Find(&hosts), which usually returns by ID asc.
+ // So later IDs (newer) come last.
+ // We want to keep the NEWER one.
+ // So we should iterate backwards? Or just overwrite?
+ // Caddy config structure is a list of servers/routes.
+ // If we have multiple routes matching the same host, Caddy uses the first one?
+ // Actually, Caddy matches routes in order.
+ // If we emit two routes for "example.com", the first one will catch it.
+ // So we want the NEWEST one to be FIRST in the list?
+ // Or we want to only emit ONE route for "example.com".
+ // If we emit only one, it should be the newest one.
+ // So we should process hosts from newest to oldest, and skip duplicates.
+
+ // Let's iterate in reverse order (assuming input is ID ASC)
+ for i := len(hosts) - 1; i >= 0; i-- {
+ host := hosts[i]
+
+ if !host.Enabled {
+ continue
+ }
+
+ if host.DomainNames == "" {
+ // Log warning?
+ continue
+ }
+
+ // Parse comma-separated domains
+ rawDomains := strings.Split(host.DomainNames, ",")
+ var uniqueDomains []string
+
+ for _, d := range rawDomains {
+ d = strings.TrimSpace(d)
+ d = strings.ToLower(d) // Normalize to lowercase
+ if d == "" {
+ continue
+ }
+ if processedDomains[d] {
+ fmt.Printf("Warning: Skipping duplicate domain %s for host %s (Ghost Host detection)\n", d, host.UUID)
+ continue
+ }
+ processedDomains[d] = true
+ uniqueDomains = append(uniqueDomains, d)
+ }
+
+ if len(uniqueDomains) == 0 {
+ continue
+ }
+
+ // Build handlers for this host
+ handlers := make([]Handler, 0)
+
+ // Add Access Control List (ACL) handler if configured
+ if host.AccessListID != nil && host.AccessList != nil && host.AccessList.Enabled {
+ aclHandler, err := buildACLHandler(host.AccessList)
+ if err != nil {
+ fmt.Printf("Warning: Failed to build ACL handler for host %s: %v\n", host.UUID, err)
+ } else if aclHandler != nil {
+ handlers = append(handlers, aclHandler)
+ }
+ }
+
+ // Add HSTS header if enabled
+ if host.HSTSEnabled {
+ hstsValue := "max-age=31536000"
+ if host.HSTSSubdomains {
+ hstsValue += "; includeSubDomains"
+ }
+ handlers = append(handlers, HeaderHandler(map[string][]string{
+ "Strict-Transport-Security": {hstsValue},
+ }))
+ }
+
+ // Add exploit blocking if enabled
+ if host.BlockExploits {
+ handlers = append(handlers, BlockExploitsHandler())
+ }
+
+ // Handle custom locations first (more specific routes)
+ for _, loc := range host.Locations {
+ dial := fmt.Sprintf("%s:%d", loc.ForwardHost, loc.ForwardPort)
+ locRoute := &Route{
+ Match: []Match{
+ {
+ Host: uniqueDomains,
+ Path: []string{loc.Path, loc.Path + "/*"},
+ },
+ },
+ Handle: []Handler{
+ ReverseProxyHandler(dial, host.WebsocketSupport, host.Application),
+ },
+ Terminal: true,
+ }
+ routes = append(routes, locRoute)
+ }
+
+ // Main proxy handler
+ dial := fmt.Sprintf("%s:%d", host.ForwardHost, host.ForwardPort)
+ // Insert user advanced config (if present) as headers or handlers before the reverse proxy
+ // so user-specified headers/handlers are applied prior to proxying.
+ if host.AdvancedConfig != "" {
+ var parsed interface{}
+ if err := json.Unmarshal([]byte(host.AdvancedConfig), &parsed); err != nil {
+ fmt.Printf("Warning: Failed to parse advanced_config for host %s: %v\n", host.UUID, err)
+ } else {
+ switch v := parsed.(type) {
+ case map[string]interface{}:
+ // Append as a handler
+ // Ensure it has a "handler" key
+ if _, ok := v["handler"]; ok {
+ handlers = append(handlers, Handler(v))
+ } else {
+ fmt.Printf("Warning: advanced_config for host %s is not a handler object\n", host.UUID)
+ }
+ case []interface{}:
+ for _, it := range v {
+ if m, ok := it.(map[string]interface{}); ok {
+ if _, ok2 := m["handler"]; ok2 {
+ handlers = append(handlers, Handler(m))
+ }
+ }
+ }
+ default:
+ fmt.Printf("Warning: advanced_config for host %s has unexpected JSON structure\n", host.UUID)
+ }
+ }
+ }
+ mainHandlers := append(handlers, ReverseProxyHandler(dial, host.WebsocketSupport, host.Application))
+
+ route := &Route{
+ Match: []Match{
+ {Host: uniqueDomains},
+ },
+ Handle: mainHandlers,
+ Terminal: true,
+ }
+
+ routes = append(routes, route)
+ }
+
+ // Add catch-all 404 handler
+ // This matches any request that wasn't handled by previous routes
+ if frontendDir != "" {
+ catchAllRoute := &Route{
+ Handle: []Handler{
+ RewriteHandler("/unknown.html"),
+ FileServerHandler(frontendDir),
+ },
+ Terminal: true,
+ }
+ routes = append(routes, catchAllRoute)
+ }
+
+ config.Apps.HTTP.Servers["charon_server"] = &Server{
+ Listen: []string{":80", ":443"},
+ Routes: routes,
+ AutoHTTPS: &AutoHTTPSConfig{
+ Disable: false,
+ DisableRedir: false,
+ },
+ Logs: &ServerLogs{
+ DefaultLoggerName: "access_log",
+ },
+ }
+
+ return config, nil
+}
+
+// buildACLHandler creates access control handlers based on the AccessList configuration
+func buildACLHandler(acl *models.AccessList) (Handler, error) {
+ // For geo-blocking, we use CEL (Common Expression Language) matcher with caddy-geoip2 placeholders
+ // For IP-based ACLs, we use Caddy's native remote_ip matcher
+
+ if strings.HasPrefix(acl.Type, "geo_") {
+ // Geo-blocking using caddy-geoip2
+ countryCodes := strings.Split(acl.CountryCodes, ",")
+ var trimmedCodes []string
+ for _, code := range countryCodes {
+ trimmedCodes = append(trimmedCodes, `"`+strings.TrimSpace(code)+`"`)
+ }
+
+ var expression string
+ if acl.Type == "geo_whitelist" {
+ // Allow only these countries
+ expression = fmt.Sprintf("{geoip2.country_code} in [%s]", strings.Join(trimmedCodes, ", "))
+ } else {
+ // geo_blacklist: Block these countries
+ expression = fmt.Sprintf("{geoip2.country_code} not_in [%s]", strings.Join(trimmedCodes, ", "))
+ }
+
+ return Handler{
+ "handler": "subroute",
+ "routes": []map[string]interface{}{
+ {
+ "match": []map[string]interface{}{
+ {
+ "not": []map[string]interface{}{
+ {
+ "expression": expression,
+ },
+ },
+ },
+ },
+ "handle": []map[string]interface{}{
+ {
+ "handler": "static_response",
+ "status_code": 403,
+ "body": "Access denied: Geographic restriction",
+ },
+ },
+ "terminal": true,
+ },
+ },
+ }, nil
+ }
+
+ // IP/CIDR-based ACLs using Caddy's native remote_ip matcher
+ if acl.LocalNetworkOnly {
+ // Allow only RFC1918 private networks
+ return Handler{
+ "handler": "subroute",
+ "routes": []map[string]interface{}{
+ {
+ "match": []map[string]interface{}{
+ {
+ "not": []map[string]interface{}{
+ {
+ "remote_ip": map[string]interface{}{
+ "ranges": []string{
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "127.0.0.0/8",
+ "169.254.0.0/16",
+ "fc00::/7",
+ "fe80::/10",
+ "::1/128",
+ },
+ },
+ },
+ },
+ },
+ },
+ "handle": []map[string]interface{}{
+ {
+ "handler": "static_response",
+ "status_code": 403,
+ "body": "Access denied: Not a local network IP",
+ },
+ },
+ "terminal": true,
+ },
+ },
+ }, nil
+ }
+
+ // Parse IP rules
+ if acl.IPRules == "" {
+ return nil, nil
+ }
+
+ var rules []models.AccessListRule
+ if err := json.Unmarshal([]byte(acl.IPRules), &rules); err != nil {
+ return nil, fmt.Errorf("invalid IP rules JSON: %w", err)
+ }
+
+ if len(rules) == 0 {
+ return nil, nil
+ }
+
+ // Extract CIDR ranges
+ var cidrs []string
+ for _, rule := range rules {
+ cidrs = append(cidrs, rule.CIDR)
+ }
+
+ if acl.Type == "whitelist" {
+ // Allow only these IPs (block everything else)
+ return Handler{
+ "handler": "subroute",
+ "routes": []map[string]interface{}{
+ {
+ "match": []map[string]interface{}{
+ {
+ "not": []map[string]interface{}{
+ {
+ "remote_ip": map[string]interface{}{
+ "ranges": cidrs,
+ },
+ },
+ },
+ },
+ },
+ "handle": []map[string]interface{}{
+ {
+ "handler": "static_response",
+ "status_code": 403,
+ "body": "Access denied: IP not in whitelist",
+ },
+ },
+ "terminal": true,
+ },
+ },
+ }, nil
+ }
+
+ if acl.Type == "blacklist" {
+ // Block these IPs (allow everything else)
+ return Handler{
+ "handler": "subroute",
+ "routes": []map[string]interface{}{
+ {
+ "match": []map[string]interface{}{
+ {
+ "remote_ip": map[string]interface{}{
+ "ranges": cidrs,
+ },
+ },
+ },
+ "handle": []map[string]interface{}{
+ {
+ "handler": "static_response",
+ "status_code": 403,
+ "body": "Access denied: IP blacklisted",
+ },
+ },
+ "terminal": true,
+ },
+ },
+ }, nil
+ }
+
+ return nil, nil
+}
+
+
+
package caddy
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/Wikid82/charon/backend/internal/models"
+)
+
+// Executor defines an interface for executing shell commands.
+type Executor interface {
+ Execute(name string, args ...string) ([]byte, error)
+}
+
+// DefaultExecutor implements Executor using os/exec.
+type DefaultExecutor struct{}
+
+func (e *DefaultExecutor) Execute(name string, args ...string) ([]byte, error) {
+ return exec.Command(name, args...).Output()
+}
+
+// CaddyConfig represents the root structure of Caddy's JSON config.
+type CaddyConfig struct {
+ Apps *CaddyApps `json:"apps,omitempty"`
+}
+
+// CaddyApps contains application-specific configurations.
+type CaddyApps struct {
+ HTTP *CaddyHTTP `json:"http,omitempty"`
+}
+
+// CaddyHTTP represents the HTTP app configuration.
+type CaddyHTTP struct {
+ Servers map[string]*CaddyServer `json:"servers,omitempty"`
+}
+
+// CaddyServer represents a single server configuration.
+type CaddyServer struct {
+ Listen []string `json:"listen,omitempty"`
+ Routes []*CaddyRoute `json:"routes,omitempty"`
+ TLSConnectionPolicies interface{} `json:"tls_connection_policies,omitempty"`
+}
+
+// CaddyRoute represents a single route with matchers and handlers.
+type CaddyRoute struct {
+ Match []*CaddyMatcher `json:"match,omitempty"`
+ Handle []*CaddyHandler `json:"handle,omitempty"`
+}
+
+// CaddyMatcher represents route matching criteria.
+type CaddyMatcher struct {
+ Host []string `json:"host,omitempty"`
+}
+
+// CaddyHandler represents a handler in the route.
+type CaddyHandler struct {
+ Handler string `json:"handler"`
+ Upstreams interface{} `json:"upstreams,omitempty"`
+ Headers interface{} `json:"headers,omitempty"`
+ Routes interface{} `json:"routes,omitempty"` // For subroute handlers
+}
+
+// ParsedHost represents a single host detected during Caddyfile import.
+type ParsedHost struct {
+ DomainNames string `json:"domain_names"`
+ ForwardScheme string `json:"forward_scheme"`
+ ForwardHost string `json:"forward_host"`
+ ForwardPort int `json:"forward_port"`
+ SSLForced bool `json:"ssl_forced"`
+ WebsocketSupport bool `json:"websocket_support"`
+ RawJSON string `json:"raw_json"` // Original Caddy JSON for this route
+ Warnings []string `json:"warnings"` // Unsupported features
+}
+
+// ImportResult contains parsed hosts and detected conflicts.
+type ImportResult struct {
+ Hosts []ParsedHost `json:"hosts"`
+ Conflicts []string `json:"conflicts"`
+ Errors []string `json:"errors"`
+}
+
+// Importer handles Caddyfile parsing and conversion to CPM+ models.
+type Importer struct {
+ caddyBinaryPath string
+ executor Executor
+}
+
+// NewImporter creates a new Caddyfile importer.
+func NewImporter(binaryPath string) *Importer {
+ if binaryPath == "" {
+ binaryPath = "caddy" // Default to PATH
+ }
+ return &Importer{
+ caddyBinaryPath: binaryPath,
+ executor: &DefaultExecutor{},
+ }
+}
+
+// ParseCaddyfile reads a Caddyfile and converts it to Caddy JSON.
+func (i *Importer) ParseCaddyfile(caddyfilePath string) ([]byte, error) {
+ if _, err := os.Stat(caddyfilePath); os.IsNotExist(err) {
+ return nil, fmt.Errorf("caddyfile not found: %s", caddyfilePath)
+ }
+
+ output, err := i.executor.Execute(i.caddyBinaryPath, "adapt", "--config", caddyfilePath, "--adapter", "caddyfile")
+ if err != nil {
+ return nil, fmt.Errorf("caddy adapt failed: %w (output: %s)", err, string(output))
+ }
+
+ return output, nil
+}
+
+// extractHandlers recursively extracts handlers from a list, flattening subroutes.
+func (i *Importer) extractHandlers(handles []*CaddyHandler) []*CaddyHandler {
+ var result []*CaddyHandler
+
+ for _, handler := range handles {
+ // If this is a subroute, extract handlers from its first route
+ if handler.Handler == "subroute" {
+ if routes, ok := handler.Routes.([]interface{}); ok && len(routes) > 0 {
+ if subroute, ok := routes[0].(map[string]interface{}); ok {
+ if subhandles, ok := subroute["handle"].([]interface{}); ok {
+ // Convert the subhandles to CaddyHandler objects
+ for _, sh := range subhandles {
+ if shMap, ok := sh.(map[string]interface{}); ok {
+ subHandler := &CaddyHandler{}
+ if handlerType, ok := shMap["handler"].(string); ok {
+ subHandler.Handler = handlerType
+ }
+ if upstreams, ok := shMap["upstreams"]; ok {
+ subHandler.Upstreams = upstreams
+ }
+ if headers, ok := shMap["headers"]; ok {
+ subHandler.Headers = headers
+ }
+ result = append(result, subHandler)
+ }
+ }
+ }
+ }
+ }
+ } else {
+ // Regular handler, add it directly
+ result = append(result, handler)
+ }
+ }
+
+ return result
+}
+
+// ExtractHosts parses Caddy JSON and extracts proxy host information.
+func (i *Importer) ExtractHosts(caddyJSON []byte) (*ImportResult, error) {
+ var config CaddyConfig
+ if err := json.Unmarshal(caddyJSON, &config); err != nil {
+ return nil, fmt.Errorf("parsing caddy json: %w", err)
+ }
+
+ result := &ImportResult{
+ Hosts: []ParsedHost{},
+ Conflicts: []string{},
+ Errors: []string{},
+ }
+
+ if config.Apps == nil || config.Apps.HTTP == nil || config.Apps.HTTP.Servers == nil {
+ return result, nil // Empty config
+ }
+
+ seenDomains := make(map[string]bool)
+
+ for serverName, server := range config.Apps.HTTP.Servers {
+ // Detect if this server uses SSL based on listen address or TLS policies
+ serverUsesSSL := server.TLSConnectionPolicies != nil
+ for _, listenAddr := range server.Listen {
+ // Check if listening on :443 or any HTTPS port indicator
+ if strings.Contains(listenAddr, ":443") || strings.HasSuffix(listenAddr, "443") {
+ serverUsesSSL = true
+ break
+ }
+ }
+
+ for routeIdx, route := range server.Routes {
+ for _, match := range route.Match {
+ for _, hostMatcher := range match.Host {
+ domain := hostMatcher
+
+ // Check for duplicate domains (report domain names only)
+ if seenDomains[domain] {
+ result.Conflicts = append(result.Conflicts, domain)
+ continue
+ }
+ seenDomains[domain] = true
+
+ // Extract reverse proxy handler
+ host := ParsedHost{
+ DomainNames: domain,
+ SSLForced: strings.HasPrefix(domain, "https") || serverUsesSSL,
+ }
+
+ // Find reverse_proxy handler (may be nested in subroute)
+ handlers := i.extractHandlers(route.Handle)
+
+ for _, handler := range handlers {
+ if handler.Handler == "reverse_proxy" {
+ upstreams, _ := handler.Upstreams.([]interface{})
+ if len(upstreams) > 0 {
+ if upstream, ok := upstreams[0].(map[string]interface{}); ok {
+ dial, _ := upstream["dial"].(string)
+ if dial != "" {
+ hostStr, portStr, err := net.SplitHostPort(dial)
+ if err == nil {
+ host.ForwardHost = hostStr
+ if _, err := fmt.Sscanf(portStr, "%d", &host.ForwardPort); err != nil {
+ host.ForwardPort = 80
+ }
+ } else {
+ // Fallback: assume dial is just the host or has some other format
+ // Try to handle simple "host:port" manually if net.SplitHostPort failed for some reason
+ // or assume it's just a host
+ parts := strings.Split(dial, ":")
+ if len(parts) == 2 {
+ host.ForwardHost = parts[0]
+ if _, err := fmt.Sscanf(parts[1], "%d", &host.ForwardPort); err != nil {
+ host.ForwardPort = 80
+ }
+ } else {
+ host.ForwardHost = dial
+ host.ForwardPort = 80
+ }
+ }
+ }
+ }
+ }
+
+ // Check for websocket support
+ if headers, ok := handler.Headers.(map[string]interface{}); ok {
+ if upgrade, ok := headers["Upgrade"].([]interface{}); ok {
+ for _, v := range upgrade {
+ if v == "websocket" {
+ host.WebsocketSupport = true
+ break
+ }
+ }
+ }
+ }
+
+ // Default scheme
+ host.ForwardScheme = "http"
+ if host.SSLForced {
+ host.ForwardScheme = "https"
+ }
+ }
+
+ // Detect unsupported features
+ if handler.Handler == "rewrite" {
+ host.Warnings = append(host.Warnings, "Rewrite rules not supported - manual configuration required")
+ }
+ if handler.Handler == "file_server" {
+ host.Warnings = append(host.Warnings, "File server directives not supported")
+ }
+ }
+
+ // Store raw JSON for this route
+ routeJSON, _ := json.Marshal(map[string]interface{}{
+ "server": serverName,
+ "route": routeIdx,
+ "data": route,
+ })
+ host.RawJSON = string(routeJSON)
+
+ result.Hosts = append(result.Hosts, host)
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// ImportFile performs complete import: parse Caddyfile and extract hosts.
+func (i *Importer) ImportFile(caddyfilePath string) (*ImportResult, error) {
+ caddyJSON, err := i.ParseCaddyfile(caddyfilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ return i.ExtractHosts(caddyJSON)
+}
+
+// ConvertToProxyHosts converts parsed hosts to ProxyHost models.
+func ConvertToProxyHosts(parsedHosts []ParsedHost) []models.ProxyHost {
+ hosts := make([]models.ProxyHost, 0, len(parsedHosts))
+
+ for _, parsed := range parsedHosts {
+ if parsed.ForwardHost == "" || parsed.ForwardPort == 0 {
+ continue // Skip invalid entries
+ }
+
+ hosts = append(hosts, models.ProxyHost{
+ Name: parsed.DomainNames, // Can be customized by user during review
+ DomainNames: parsed.DomainNames,
+ ForwardScheme: parsed.ForwardScheme,
+ ForwardHost: parsed.ForwardHost,
+ ForwardPort: parsed.ForwardPort,
+ SSLForced: parsed.SSLForced,
+ WebsocketSupport: parsed.WebsocketSupport,
+ })
+ }
+
+ return hosts
+}
+
+// ValidateCaddyBinary checks if the Caddy binary is available.
+func (i *Importer) ValidateCaddyBinary() error {
+ _, err := i.executor.Execute(i.caddyBinaryPath, "version")
+ if err != nil {
+ return errors.New("caddy binary not found or not executable")
+ }
+ return nil
+}
+
+// BackupCaddyfile creates a timestamped backup of the original Caddyfile.
+func BackupCaddyfile(originalPath, backupDir string) (string, error) {
+ if err := os.MkdirAll(backupDir, 0755); err != nil {
+ return "", fmt.Errorf("creating backup directory: %w", err)
+ }
+
+ timestamp := fmt.Sprintf("%d", os.Getpid()) // Simple timestamp placeholder
+ backupPath := filepath.Join(backupDir, fmt.Sprintf("Caddyfile.%s.backup", timestamp))
+
+ input, err := os.ReadFile(originalPath)
+ if err != nil {
+ return "", fmt.Errorf("reading original file: %w", err)
+ }
+
+ if err := os.WriteFile(backupPath, input, 0644); err != nil {
+ return "", fmt.Errorf("writing backup: %w", err)
+ }
+
+ return backupPath, nil
+}
+
+
+
package caddy
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
+
+ "gorm.io/gorm"
+
+ "github.com/Wikid82/charon/backend/internal/models"
+)
+
+// Test hooks to allow overriding OS and JSON functions
+var (
+ writeFileFunc = os.WriteFile
+ readFileFunc = os.ReadFile
+ removeFileFunc = os.Remove
+ readDirFunc = os.ReadDir
+ statFunc = os.Stat
+ jsonMarshalFunc = json.MarshalIndent
+)
+
+// Manager orchestrates Caddy configuration lifecycle: generate, validate, apply, rollback.
+type Manager struct {
+ client *Client
+ db *gorm.DB
+ configDir string
+ frontendDir string
+ acmeStaging bool
+}
+
+// NewManager creates a configuration manager.
+func NewManager(client *Client, db *gorm.DB, configDir string, frontendDir string, acmeStaging bool) *Manager {
+ return &Manager{
+ client: client,
+ db: db,
+ configDir: configDir,
+ frontendDir: frontendDir,
+ acmeStaging: acmeStaging,
+ }
+}
+
+// ApplyConfig generates configuration from database, validates it, applies to Caddy with rollback on failure.
+func (m *Manager) ApplyConfig(ctx context.Context) error {
+ // Fetch all proxy hosts from database
+ var hosts []models.ProxyHost
+ if err := m.db.Preload("Locations").Preload("Certificate").Preload("AccessList").Find(&hosts).Error; err != nil {
+ return fmt.Errorf("fetch proxy hosts: %w", err)
+ }
+
+ // Fetch ACME email setting
+ var acmeEmailSetting models.Setting
+ var acmeEmail string
+ if err := m.db.Where("key = ?", "caddy.acme_email").First(&acmeEmailSetting).Error; err == nil {
+ acmeEmail = acmeEmailSetting.Value
+ }
+
+ // Fetch SSL Provider setting
+ var sslProviderSetting models.Setting
+ var sslProvider string
+ if err := m.db.Where("key = ?", "caddy.ssl_provider").First(&sslProviderSetting).Error; err == nil {
+ sslProvider = sslProviderSetting.Value
+ }
+
+ // Generate Caddy config
+ config, err := GenerateConfig(hosts, filepath.Join(m.configDir, "data"), acmeEmail, m.frontendDir, sslProvider, m.acmeStaging)
+ if err != nil {
+ return fmt.Errorf("generate config: %w", err)
+ }
+
+ // Validate before applying
+ if err := Validate(config); err != nil {
+ return fmt.Errorf("validation failed: %w", err)
+ }
+
+ // Save snapshot for rollback
+ snapshotPath, err := m.saveSnapshot(config)
+ if err != nil {
+ return fmt.Errorf("save snapshot: %w", err)
+ }
+
+ // Calculate config hash for audit trail
+ configJSON, _ := json.Marshal(config)
+ configHash := fmt.Sprintf("%x", sha256.Sum256(configJSON))
+
+ // Apply to Caddy
+ if err := m.client.Load(ctx, config); err != nil {
+ // Remove the failed snapshot so rollback uses the previous one
+ _ = removeFileFunc(snapshotPath)
+
+ // Rollback on failure
+ if rollbackErr := m.rollback(ctx); rollbackErr != nil {
+ // If rollback fails, we still want to record the failure
+ m.recordConfigChange(configHash, false, err.Error())
+ return fmt.Errorf("apply failed: %w, rollback also failed: %v", err, rollbackErr)
+ }
+
+ // Record failed attempt
+ m.recordConfigChange(configHash, false, err.Error())
+ return fmt.Errorf("apply failed (rolled back): %w", err)
+ }
+
+ // Record successful application
+ m.recordConfigChange(configHash, true, "")
+
+ // Cleanup old snapshots (keep last 10)
+ if err := m.rotateSnapshots(10); err != nil {
+ // Non-fatal - log but don't fail
+ fmt.Printf("warning: snapshot rotation failed: %v\n", err)
+ }
+
+ return nil
+}
+
+// saveSnapshot stores the config to disk with timestamp.
+func (m *Manager) saveSnapshot(config *Config) (string, error) {
+ timestamp := time.Now().Unix()
+ filename := fmt.Sprintf("config-%d.json", timestamp)
+ path := filepath.Join(m.configDir, filename)
+
+ configJSON, err := jsonMarshalFunc(config, "", " ")
+ if err != nil {
+ return "", fmt.Errorf("marshal config: %w", err)
+ }
+
+ if err := writeFileFunc(path, configJSON, 0644); err != nil {
+ return "", fmt.Errorf("write snapshot: %w", err)
+ }
+
+ return path, nil
+}
+
+// rollback loads the most recent snapshot from disk.
+func (m *Manager) rollback(ctx context.Context) error {
+ snapshots, err := m.listSnapshots()
+ if err != nil || len(snapshots) == 0 {
+ return fmt.Errorf("no snapshots available for rollback")
+ }
+
+ // Load most recent snapshot
+ latestSnapshot := snapshots[len(snapshots)-1]
+ configJSON, err := readFileFunc(latestSnapshot)
+ if err != nil {
+ return fmt.Errorf("read snapshot: %w", err)
+ }
+
+ var config Config
+ if err := json.Unmarshal(configJSON, &config); err != nil {
+ return fmt.Errorf("unmarshal snapshot: %w", err)
+ }
+
+ // Apply the snapshot
+ if err := m.client.Load(ctx, &config); err != nil {
+ return fmt.Errorf("load snapshot: %w", err)
+ }
+
+ return nil
+}
+
+// listSnapshots returns all snapshot file paths sorted by modification time.
+func (m *Manager) listSnapshots() ([]string, error) {
+ entries, err := readDirFunc(m.configDir)
+ if err != nil {
+ return nil, fmt.Errorf("read config dir: %w", err)
+ }
+
+ var snapshots []string
+ for _, entry := range entries {
+ if entry.IsDir() || filepath.Ext(entry.Name()) != ".json" {
+ continue
+ }
+ snapshots = append(snapshots, filepath.Join(m.configDir, entry.Name()))
+ }
+
+ // Sort by modification time
+ sort.Slice(snapshots, func(i, j int) bool {
+ infoI, _ := statFunc(snapshots[i])
+ infoJ, _ := statFunc(snapshots[j])
+ return infoI.ModTime().Before(infoJ.ModTime())
+ })
+
+ return snapshots, nil
+}
+
+// rotateSnapshots keeps only the N most recent snapshots.
+func (m *Manager) rotateSnapshots(keep int) error {
+ snapshots, err := m.listSnapshots()
+ if err != nil {
+ return err
+ }
+
+ if len(snapshots) <= keep {
+ return nil
+ }
+
+ // Delete oldest snapshots
+ toDelete := snapshots[:len(snapshots)-keep]
+ for _, path := range toDelete {
+ if err := removeFileFunc(path); err != nil {
+ return fmt.Errorf("delete snapshot %s: %w", path, err)
+ }
+ }
+
+ return nil
+}
+
+// recordConfigChange stores an audit record in the database.
+func (m *Manager) recordConfigChange(configHash string, success bool, errorMsg string) {
+ record := models.CaddyConfig{
+ ConfigHash: configHash,
+ AppliedAt: time.Now(),
+ Success: success,
+ ErrorMsg: errorMsg,
+ }
+
+ // Best effort - don't fail if audit logging fails
+ m.db.Create(&record)
+}
+
+// Ping checks if Caddy is reachable.
+func (m *Manager) Ping(ctx context.Context) error {
+ return m.client.Ping(ctx)
+}
+
+// GetCurrentConfig retrieves the running config from Caddy.
+func (m *Manager) GetCurrentConfig(ctx context.Context) (*Config, error) {
+ return m.client.GetConfig(ctx)
+}
+
+
+
package caddy
+
+// Config represents Caddy's top-level JSON configuration structure.
+// Reference: https://caddyserver.com/docs/json/
+type Config struct {
+ Apps Apps `json:"apps"`
+ Logging *LoggingConfig `json:"logging,omitempty"`
+ Storage Storage `json:"storage,omitempty"`
+}
+
+// LoggingConfig configures Caddy's logging facility.
+type LoggingConfig struct {
+ Logs map[string]*LogConfig `json:"logs,omitempty"`
+ Sinks *SinkConfig `json:"sinks,omitempty"`
+}
+
+// LogConfig configures a specific logger.
+type LogConfig struct {
+ Writer *WriterConfig `json:"writer,omitempty"`
+ Encoder *EncoderConfig `json:"encoder,omitempty"`
+ Level string `json:"level,omitempty"`
+ Include []string `json:"include,omitempty"`
+ Exclude []string `json:"exclude,omitempty"`
+}
+
+// WriterConfig configures the log writer (output).
+type WriterConfig struct {
+ Output string `json:"output"`
+ Filename string `json:"filename,omitempty"`
+ Roll bool `json:"roll,omitempty"`
+ RollSize int `json:"roll_size_mb,omitempty"`
+ RollKeep int `json:"roll_keep,omitempty"`
+ RollKeepDays int `json:"roll_keep_days,omitempty"`
+}
+
+// EncoderConfig configures the log format.
+type EncoderConfig struct {
+ Format string `json:"format"` // "json", "console", etc.
+}
+
+// SinkConfig configures log sinks (e.g. stderr).
+type SinkConfig struct {
+ Writer *WriterConfig `json:"writer,omitempty"`
+}
+
+// Storage configures the storage module.
+type Storage struct {
+ System string `json:"module"`
+ Root string `json:"root,omitempty"`
+}
+
+// Apps contains all Caddy app modules.
+type Apps struct {
+ HTTP *HTTPApp `json:"http,omitempty"`
+ TLS *TLSApp `json:"tls,omitempty"`
+}
+
+// HTTPApp configures the HTTP app.
+type HTTPApp struct {
+ Servers map[string]*Server `json:"servers"`
+}
+
+// Server represents an HTTP server instance.
+type Server struct {
+ Listen []string `json:"listen"`
+ Routes []*Route `json:"routes"`
+ AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
+ Logs *ServerLogs `json:"logs,omitempty"`
+}
+
+// AutoHTTPSConfig controls automatic HTTPS behavior.
+type AutoHTTPSConfig struct {
+ Disable bool `json:"disable,omitempty"`
+ DisableRedir bool `json:"disable_redirects,omitempty"`
+ Skip []string `json:"skip,omitempty"`
+}
+
+// ServerLogs configures access logging.
+type ServerLogs struct {
+ DefaultLoggerName string `json:"default_logger_name,omitempty"`
+}
+
+// Route represents an HTTP route (matcher + handlers).
+type Route struct {
+ Match []Match `json:"match,omitempty"`
+ Handle []Handler `json:"handle"`
+ Terminal bool `json:"terminal,omitempty"`
+}
+
+// Match represents a request matcher.
+type Match struct {
+ Host []string `json:"host,omitempty"`
+ Path []string `json:"path,omitempty"`
+}
+
+// Handler is the interface for all handler types.
+// Actual types will implement handler-specific fields.
+type Handler map[string]interface{}
+
+// ReverseProxyHandler creates a reverse_proxy handler.
+// application: "none", "plex", "jellyfin", "emby", "homeassistant", "nextcloud", "vaultwarden"
+func ReverseProxyHandler(dial string, enableWS bool, application string) Handler {
+ h := Handler{
+ "handler": "reverse_proxy",
+ "flush_interval": -1, // Disable buffering for better streaming performance (Plex, etc.)
+ "upstreams": []map[string]interface{}{
+ {"dial": dial},
+ },
+ }
+
+ // Build headers configuration
+ headers := make(map[string]interface{})
+ requestHeaders := make(map[string]interface{})
+ setHeaders := make(map[string][]string)
+
+ // WebSocket support
+ if enableWS {
+ setHeaders["Upgrade"] = []string{"{http.request.header.Upgrade}"}
+ setHeaders["Connection"] = []string{"{http.request.header.Connection}"}
+ }
+
+ // Application-specific headers for proper client IP forwarding
+ // These are critical for media servers behind tunnels/CGNAT
+ switch application {
+ case "plex":
+ // Pass-through common Plex headers for improved compatibility when proxying
+ setHeaders["X-Plex-Client-Identifier"] = []string{"{http.request.header.X-Plex-Client-Identifier}"}
+ setHeaders["X-Plex-Device"] = []string{"{http.request.header.X-Plex-Device}"}
+ setHeaders["X-Plex-Device-Name"] = []string{"{http.request.header.X-Plex-Device-Name}"}
+ setHeaders["X-Plex-Platform"] = []string{"{http.request.header.X-Plex-Platform}"}
+ setHeaders["X-Plex-Platform-Version"] = []string{"{http.request.header.X-Plex-Platform-Version}"}
+ setHeaders["X-Plex-Product"] = []string{"{http.request.header.X-Plex-Product}"}
+ setHeaders["X-Plex-Token"] = []string{"{http.request.header.X-Plex-Token}"}
+ setHeaders["X-Plex-Version"] = []string{"{http.request.header.X-Plex-Version}"}
+ // Also set X-Real-IP for accurate client IP reporting
+ setHeaders["X-Real-IP"] = []string{"{http.request.remote.host}"}
+ setHeaders["X-Forwarded-Host"] = []string{"{http.request.host}"}
+ case "jellyfin", "emby", "homeassistant", "nextcloud", "vaultwarden":
+ // X-Real-IP is required by most apps to identify the real client
+ // Caddy already sets X-Forwarded-For and X-Forwarded-Proto by default
+ setHeaders["X-Real-IP"] = []string{"{http.request.remote.host}"}
+ // Some apps also check these headers
+ setHeaders["X-Forwarded-Host"] = []string{"{http.request.host}"}
+ }
+
+ // Only add headers config if we have headers to set
+ if len(setHeaders) > 0 {
+ requestHeaders["set"] = setHeaders
+ headers["request"] = requestHeaders
+ h["headers"] = headers
+ }
+
+ return h
+}
+
+// HeaderHandler creates a handler that sets HTTP response headers.
+func HeaderHandler(headers map[string][]string) Handler {
+ return Handler{
+ "handler": "headers",
+ "response": map[string]interface{}{
+ "set": headers,
+ },
+ }
+}
+
+// BlockExploitsHandler creates a handler that blocks common exploits.
+// This uses Caddy's request matchers to block malicious patterns.
+func BlockExploitsHandler() Handler {
+ return Handler{
+ "handler": "vars",
+ // Placeholder for future exploit blocking logic
+ // Can be extended with specific matchers for SQL injection, XSS, etc.
+ }
+}
+
+// RewriteHandler creates a rewrite handler.
+func RewriteHandler(uri string) Handler {
+ return Handler{
+ "handler": "rewrite",
+ "uri": uri,
+ }
+}
+
+// FileServerHandler creates a file_server handler.
+func FileServerHandler(root string) Handler {
+ return Handler{
+ "handler": "file_server",
+ "root": root,
+ }
+}
+
+// TLSApp configures the TLS app for certificate management.
+type TLSApp struct {
+ Automation *AutomationConfig `json:"automation,omitempty"`
+ Certificates *CertificatesConfig `json:"certificates,omitempty"`
+}
+
+// CertificatesConfig configures manual certificate loading.
+type CertificatesConfig struct {
+ LoadPEM []LoadPEMConfig `json:"load_pem,omitempty"`
+}
+
+// LoadPEMConfig defines a PEM-loaded certificate.
+type LoadPEMConfig struct {
+ Certificate string `json:"certificate"`
+ Key string `json:"key"`
+ Tags []string `json:"tags,omitempty"`
+}
+
+// AutomationConfig controls certificate automation.
+type AutomationConfig struct {
+ Policies []*AutomationPolicy `json:"policies,omitempty"`
+}
+
+// AutomationPolicy defines certificate management for specific domains.
+type AutomationPolicy struct {
+ Subjects []string `json:"subjects,omitempty"`
+ IssuersRaw []interface{} `json:"issuers,omitempty"`
+}
+
+
+
package caddy
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// Validate performs pre-flight validation on a Caddy config before applying it.
+func Validate(cfg *Config) error {
+ if cfg == nil {
+ return fmt.Errorf("config cannot be nil")
+ }
+
+ if cfg.Apps.HTTP == nil {
+ return nil // Empty config is valid
+ }
+
+ // Track seen hosts to detect duplicates
+ seenHosts := make(map[string]bool)
+
+ for serverName, server := range cfg.Apps.HTTP.Servers {
+ if len(server.Listen) == 0 {
+ return fmt.Errorf("server %s has no listen addresses", serverName)
+ }
+
+ // Validate listen addresses
+ for _, addr := range server.Listen {
+ if err := validateListenAddr(addr); err != nil {
+ return fmt.Errorf("invalid listen address %s in server %s: %w", addr, serverName, err)
+ }
+ }
+
+ // Validate routes
+ for i, route := range server.Routes {
+ if err := validateRoute(route, seenHosts); err != nil {
+ return fmt.Errorf("invalid route %d in server %s: %w", i, serverName, err)
+ }
+ }
+ }
+
+ // Validate JSON marshalling works
+ if _, err := json.Marshal(cfg); err != nil {
+ return fmt.Errorf("config cannot be marshalled to JSON: %w", err)
+ }
+
+ return nil
+}
+
+func validateListenAddr(addr string) error {
+ // Strip network type prefix if present (tcp/, udp/)
+ if idx := strings.Index(addr, "/"); idx != -1 {
+ addr = addr[idx+1:]
+ }
+
+ // Parse host:port
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return fmt.Errorf("invalid address format: %w", err)
+ }
+
+ // Validate port
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return fmt.Errorf("invalid port: %w", err)
+ }
+ if port < 1 || port > 65535 {
+ return fmt.Errorf("port %d out of range (1-65535)", port)
+ }
+
+ // Validate host (allow empty for wildcard binding)
+ if host != "" && net.ParseIP(host) == nil {
+ return fmt.Errorf("invalid IP address: %s", host)
+ }
+
+ return nil
+}
+
+func validateRoute(route *Route, seenHosts map[string]bool) error {
+ if len(route.Handle) == 0 {
+ return fmt.Errorf("route has no handlers")
+ }
+
+ // Check for duplicate host matchers
+ for _, match := range route.Match {
+ for _, host := range match.Host {
+ if seenHosts[host] {
+ return fmt.Errorf("duplicate host matcher: %s", host)
+ }
+ seenHosts[host] = true
+ }
+ }
+
+ // Validate handlers
+ for i, handler := range route.Handle {
+ if err := validateHandler(handler); err != nil {
+ return fmt.Errorf("invalid handler %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func validateHandler(handler Handler) error {
+ handlerType, ok := handler["handler"].(string)
+ if !ok {
+ return fmt.Errorf("handler missing 'handler' field")
+ }
+
+ switch handlerType {
+ case "reverse_proxy":
+ return validateReverseProxy(handler)
+ case "file_server", "static_response":
+ return nil // Accept other common handlers
+ default:
+ // Unknown handlers are allowed (Caddy is extensible)
+ return nil
+ }
+}
+
+func validateReverseProxy(handler Handler) error {
+ upstreams, ok := handler["upstreams"].([]map[string]interface{})
+ if !ok {
+ return fmt.Errorf("reverse_proxy missing upstreams")
+ }
+
+ if len(upstreams) == 0 {
+ return fmt.Errorf("reverse_proxy has no upstreams")
+ }
+
+ for i, upstream := range upstreams {
+ dial, ok := upstream["dial"].(string)
+ if !ok || dial == "" {
+ return fmt.Errorf("upstream %d missing dial address", i)
+ }
+
+ // Validate dial address format (host:port)
+ if _, _, err := net.SplitHostPort(dial); err != nil {
+ return fmt.Errorf("upstream %d has invalid dial address %s: %w", i, dial, err)
+ }
+ }
+
+ return nil
+}
+
+
+
+
+
+