diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d89db56..361c3622 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,3 +98,50 @@ jobs: - name: Build frontend working-directory: frontend run: npm run build + + docker-build-test: + name: Docker - Build & Integration Test + runs-on: ubuntu-latest + needs: [backend-test, frontend-build] + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker image + uses: docker/build-push-action@v5 + with: + context: . + load: true + tags: caddyproxymanager-plus:test + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Start services with docker-compose + run: | + docker-compose up -d + sleep 10 # Wait for services to be ready + + - name: Check app health + run: | + curl --retry 5 --retry-delay 3 --retry-connrefused http://localhost:8080/api/v1/health + + - name: Check Caddy admin API + run: | + curl --retry 5 --retry-delay 3 --retry-connrefused http://localhost:2019/config/ + + - name: Run integration tests + run: | + # Future: run integration tests against running containers + echo "Integration tests placeholder - will be implemented with Issue #4" + + - name: Show logs on failure + if: failure() + run: | + docker-compose logs app + docker-compose logs caddy + + - name: Cleanup + if: always() + run: docker-compose down -v diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml new file mode 100644 index 00000000..6d76eb45 --- /dev/null +++ b/.github/workflows/docker-publish.yml @@ -0,0 +1,74 @@ +name: Docker Build & Publish + +on: + push: + branches: + - main + - development + tags: + - 'v*.*.*' + pull_request: + branches: + - main + - development + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + # Tag 'latest' for main branch + type=raw,value=latest,enable={{is_default_branch}} + # Tag 'development' for development branch + type=raw,value=development,enable=${{ github.ref == 'refs/heads/development' }} + # Semver tags for version releases (v1.0.0 -> 1.0.0, 1.0, 1) + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + # SHA for all builds + type=sha,prefix={{branch}}- + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: | + BUILD_DATE=${{ github.event.head_commit.timestamp }} + VCS_REF=${{ github.sha }} + VERSION=${{ steps.meta.outputs.version }} + + - name: Image digest + run: echo ${{ steps.build-and-push.outputs.digest }} diff --git a/DOCKER.md b/DOCKER.md new file mode 100644 index 00000000..2d6c055b --- /dev/null +++ b/DOCKER.md @@ -0,0 +1,234 @@ +# Docker Deployment Guide + +CaddyProxyManager+ is designed for Docker-first deployment, making it easy for home users to run Caddy without learning Caddyfile syntax. + +## Quick Start + +```bash +# Clone the repository +git clone https://github.com/Wikid82/CaddyProxyManagerPlus.git +cd CaddyProxyManagerPlus + +# Start the stack +docker-compose up -d + +# Access the UI +open http://localhost:8080 +``` + +## Architecture + +The Docker stack consists of two services: + +1. **app** (`caddyproxymanager-plus`): Management interface + - Manages proxy host configuration + - Provides web UI on port 8080 + - Communicates with Caddy via admin API + +2. **caddy**: Reverse proxy server + - Handles incoming traffic on ports 80/443 + - Automatic HTTPS with Let's Encrypt + - Configured dynamically via JSON API + +``` +┌──────────────┐ +│ Internet │ +└──────┬───────┘ + │ :80, :443 + ▼ +┌──────────────┐ Admin API ┌──────────────┐ +│ Caddy │◄───────:2019───────┤ CPM+ App │ +│ (Proxy) │ │ (Manager) │ +└──────┬───────┘ └──────┬───────┘ + │ │ + ▼ ▼ + Your Services :8080 (Web UI) +``` + +## Environment Variables + +Configure CPM+ via environment variables in `docker-compose.yml`: + +```yaml +environment: + - CPM_ENV=production # production | development + - CPM_HTTP_PORT=8080 # Management UI port + - CPM_DB_PATH=/app/data/cpm.db # SQLite database location + - CPM_CADDY_ADMIN_API=http://caddy:2019 # Caddy admin endpoint + - CPM_CADDY_CONFIG_DIR=/app/data/caddy # Config snapshots +``` + +## Volumes + +Three persistent volumes store your data: + +- **app_data**: CPM+ database, config snapshots, logs +- **caddy_data**: Caddy certificates, ACME account data +- **caddy_config**: Caddy runtime configuration + +To backup your configuration: + +```bash +# Backup volumes +docker run --rm -v cpm_app_data:/data -v $(pwd):/backup alpine tar czf /backup/cpm-backup.tar.gz /data + +# Restore from backup +docker run --rm -v cpm_app_data:/data -v $(pwd):/backup alpine tar xzf /backup/cpm-backup.tar.gz -C / +``` + +## Ports + +Default port mapping: + +- **80**: HTTP (Caddy) - redirects to HTTPS +- **443/tcp**: HTTPS (Caddy) +- **443/udp**: HTTP/3 (Caddy) +- **8080**: Management UI (CPM+) +- **2019**: Caddy admin API (internal only, exposed in dev mode) + +## Development Mode + +Development mode exposes the Caddy admin API externally for debugging: + +```bash +docker-compose -f docker-compose.yml -f docker-compose.dev.yml up +``` + +Access Caddy admin API: `http://localhost:2019/config/` + +## Health Checks + +CPM+ includes a health check endpoint: + +```bash +# Check if app is running +curl http://localhost:8080/api/v1/health + +# Check Caddy status +docker-compose exec caddy caddy version +``` + +## Troubleshooting + +### App can't reach Caddy + +**Symptom**: "Caddy unreachable" errors in logs + +**Solution**: Ensure both containers are on the same network: +```bash +docker-compose ps # Check both services are "Up" +docker-compose logs caddy # Check Caddy logs +``` + +### Certificates not working + +**Symptom**: HTTP works but HTTPS fails + +**Check**: +1. Port 80/443 are accessible from the internet +2. DNS points to your server +3. Caddy logs: `docker-compose logs caddy | grep -i acme` + +### Config changes not applied + +**Symptom**: Changes in UI don't affect routing + +**Debug**: +```bash +# View current Caddy config +curl http://localhost:2019/config/ | jq + +# Check CPM+ logs +docker-compose logs app + +# Manual config reload +curl -X POST http://localhost:8080/api/v1/caddy/reload +``` + +## Updating + +Pull the latest images and restart: + +```bash +docker-compose pull +docker-compose up -d +``` + +For specific versions: + +```bash +# Edit docker-compose.yml to pin version +image: ghcr.io/wikid82/caddyproxymanagerplus:v1.0.0 + +docker-compose up -d +``` + +## Building from Source + +```bash +# Build multi-arch images +docker buildx build --platform linux/amd64,linux/arm64 -t caddyproxymanager-plus:local . + +# Or use Make +make docker-build +``` + +## Security Considerations + +1. **Caddy admin API**: Keep port 2019 internal (not exposed in production compose) +2. **Management UI**: Add authentication (Issue #7) before exposing to internet +3. **Certificates**: Caddy stores private keys in `caddy_data` - protect this volume +4. **Database**: SQLite file contains all config - backup regularly + +## Integration with Existing Caddy + +If you already have Caddy running, you can point CPM+ to it: + +```yaml +environment: + - CPM_CADDY_ADMIN_API=http://your-caddy-host:2019 +``` + +**Warning**: CPM+ will replace Caddy's entire configuration. Backup first! + +## Platform-Specific Notes + +### Synology NAS + +Use Container Manager (Docker GUI): +1. Import `docker-compose.yml` +2. Map port 80/443 to your NAS IP +3. Enable auto-restart + +### Unraid + +1. Use Docker Compose Manager plugin +2. Add compose file to `/boot/config/plugins/compose.manager/projects/cpm/` +3. Start via web UI + +### Home Assistant Add-on + +Coming soon in Beta release. + +## Performance Tuning + +For high-traffic deployments: + +```yaml +# docker-compose.yml +services: + caddy: + deploy: + resources: + limits: + memory: 512M + reservations: + memory: 256M +``` + +## Next Steps + +- Configure your first proxy host via UI +- Enable automatic HTTPS (happens automatically) +- Add authentication (Issue #7) +- Integrate CrowdSec (Issue #15) diff --git a/Makefile b/Makefile index eb1da5b7..33dcd814 100644 --- a/Makefile +++ b/Makefile @@ -52,11 +52,23 @@ clean: # Build Docker image docker-build: - docker build -t caddyproxymanager-plus:latest . + docker-compose build -# Run Docker container +# Run Docker containers (production) docker-run: - docker run -p 8080:8080 -v cpm-data:/app/data caddyproxymanager-plus:latest + docker-compose up -d + +# Run Docker containers (development) +docker-dev: + docker-compose -f docker-compose.yml -f docker-compose.dev.yml up + +# Stop Docker containers +docker-stop: + docker-compose down + +# View Docker logs +docker-logs: + docker-compose logs -f # Development mode (requires tmux) dev: diff --git a/README.md b/README.md index 48165f59..7c6aefd6 100644 --- a/README.md +++ b/README.md @@ -70,17 +70,37 @@ cd frontend npm run build ``` -### Docker Deployment +### Docker Deployment (Recommended) + +CaddyProxyManager+ is designed to run in Docker with Caddy as a sidecar container. + ```bash -# Build the image -make docker-build +# Production deployment +docker-compose up -d -# Run the container -make docker-run +# Development mode (exposes Caddy admin API on :2019) +docker-compose -f docker-compose.yml -f docker-compose.dev.yml up +``` -# Or manually: -docker build -t caddyproxymanager-plus . -docker run -p 8080:8080 -v cpm-data:/app/data caddyproxymanager-plus +The docker-compose stack includes: +- **app**: CaddyProxyManager+ management interface (`:8080`) +- **caddy**: Caddy reverse proxy (`:80`, `:443`, `:443/udp` for HTTP/3) + +Data is persisted in Docker volumes: +- `app_data`: CPM+ database and config snapshots +- `caddy_data`: Caddy certificates and data +- `caddy_config`: Caddy configuration + +**Docker images** are published to GitHub Container Registry: +```bash +# Latest stable (from main branch) +docker pull ghcr.io/wikid82/caddyproxymanagerplus:latest + +# Development (from development branch) +docker pull ghcr.io/wikid82/caddyproxymanagerplus:development + +# Specific version +docker pull ghcr.io/wikid82/caddyproxymanagerplus:v1.0.0 ``` ### Tooling diff --git a/backend/internal/api/routes/routes.go b/backend/internal/api/routes/routes.go index cb231fff..795d5fb4 100644 --- a/backend/internal/api/routes/routes.go +++ b/backend/internal/api/routes/routes.go @@ -12,8 +12,8 @@ import ( // Register wires up API routes and performs automatic migrations. func Register(router *gin.Engine, db *gorm.DB) error { - if err := db.AutoMigrate(&models.ProxyHost{}); err != nil { - return fmt.Errorf("auto migrate proxy host: %w", err) + if err := db.AutoMigrate(&models.ProxyHost{}, &models.CaddyConfig{}); err != nil { + return fmt.Errorf("auto migrate: %w", err) } router.GET("/api/v1/health", handlers.HealthHandler) diff --git a/backend/internal/caddy/client.go b/backend/internal/caddy/client.go new file mode 100644 index 00000000..c6408116 --- /dev/null +++ b/backend/internal/caddy/client.go @@ -0,0 +1,101 @@ +package caddy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" +) + +// Client wraps the Caddy admin API. +type Client struct { + baseURL string + httpClient *http.Client +} + +// NewClient creates a Caddy API client. +func NewClient(adminAPIURL string) *Client { + return &Client{ + baseURL: adminAPIURL, + httpClient: &http.Client{ + Timeout: 30 * time.Second, + }, + } +} + +// Load atomically replaces Caddy's entire configuration. +// This is the primary method for applying configuration changes. +func (c *Client) Load(ctx context.Context, config *Config) error { + body, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("marshal config: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.baseURL+"/load", bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + return nil +} + +// GetConfig retrieves the current running configuration from Caddy. +func (c *Client) GetConfig(ctx context.Context) (*Config, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + var config Config + if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { + return nil, fmt.Errorf("decode response: %w", err) + } + + return &config, nil +} + +// Ping checks if Caddy admin API is reachable. +func (c *Client) Ping(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("caddy unreachable: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("caddy returned status %d", resp.StatusCode) + } + + return nil +} diff --git a/backend/internal/caddy/client_test.go b/backend/internal/caddy/client_test.go new file mode 100644 index 00000000..bcc8e0fb --- /dev/null +++ b/backend/internal/caddy/client_test.go @@ -0,0 +1,94 @@ +package caddy + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models" +) + +func TestClient_Load_Success(t *testing.T) { + // Mock Caddy admin API + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "/load", r.URL.Path) + require.Equal(t, http.MethodPost, r.Method) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + client := NewClient(server.URL) + config, _ := GenerateConfig([]models.ProxyHost{ + { + UUID: "test", + Domain: "test.com", + TargetHost: "app", + TargetPort: 8080, + }, + }) + + err := client.Load(context.Background(), config) + require.NoError(t, err) +} + +func TestClient_Load_Failure(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "invalid config"}`)) + })) + defer server.Close() + + client := NewClient(server.URL) + config := &Config{} + + err := client.Load(context.Background(), config) + require.Error(t, err) + require.Contains(t, err.Error(), "400") +} + +func TestClient_GetConfig_Success(t *testing.T) { + testConfig := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "test": {Listen: []string{":80"}}, + }, + }, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "/config/", r.URL.Path) + require.Equal(t, http.MethodGet, r.Method) + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(testConfig) + })) + defer server.Close() + + client := NewClient(server.URL) + config, err := client.GetConfig(context.Background()) + require.NoError(t, err) + require.NotNil(t, config) + require.NotNil(t, config.Apps.HTTP) +} + +func TestClient_Ping_Success(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + client := NewClient(server.URL) + err := client.Ping(context.Background()) + require.NoError(t, err) +} + +func TestClient_Ping_Unreachable(t *testing.T) { + client := NewClient("http://localhost:9999") + err := client.Ping(context.Background()) + require.Error(t, err) +} diff --git a/backend/internal/caddy/config.go b/backend/internal/caddy/config.go new file mode 100644 index 00000000..a10f57b6 --- /dev/null +++ b/backend/internal/caddy/config.go @@ -0,0 +1,62 @@ +package caddy + +import ( + "fmt" + + "github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models" +) + +// GenerateConfig creates a Caddy JSON configuration from proxy hosts. +// This is the core transformation layer from our database model to Caddy config. +func GenerateConfig(hosts []models.ProxyHost) (*Config, error) { + if len(hosts) == 0 { + return &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{}, + }, + }, + }, nil + } + + routes := make([]*Route, 0, len(hosts)) + + for _, host := range hosts { + if host.Domain == "" { + return nil, fmt.Errorf("proxy host %s has empty domain", host.UUID) + } + + dial := fmt.Sprintf("%s:%d", host.TargetHost, host.TargetPort) + + route := &Route{ + Match: []Match{ + {Host: []string{host.Domain}}, + }, + Handle: []Handler{ + ReverseProxyHandler(dial, host.EnableWS), + }, + Terminal: true, + } + + routes = append(routes, route) + } + + config := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "cpm_server": { + Listen: []string{":80", ":443"}, + Routes: routes, + AutoHTTPS: &AutoHTTPSConfig{ + // Enable automatic HTTPS by default + Disable: false, + }, + }, + }, + }, + }, + } + + return config, nil +} diff --git a/backend/internal/caddy/config_test.go b/backend/internal/caddy/config_test.go new file mode 100644 index 00000000..6d524728 --- /dev/null +++ b/backend/internal/caddy/config_test.go @@ -0,0 +1,110 @@ +package caddy + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models" +) + +func TestGenerateConfig_Empty(t *testing.T) { + config, err := GenerateConfig([]models.ProxyHost{}) + require.NoError(t, err) + require.NotNil(t, config) + require.NotNil(t, config.Apps.HTTP) + require.Empty(t, config.Apps.HTTP.Servers) +} + +func TestGenerateConfig_SingleHost(t *testing.T) { + hosts := []models.ProxyHost{ + { + UUID: "test-uuid", + Name: "Media", + Domain: "media.example.com", + TargetScheme: "http", + TargetHost: "media", + TargetPort: 32400, + EnableTLS: true, + EnableWS: false, + }, + } + + config, err := GenerateConfig(hosts) + require.NoError(t, err) + require.NotNil(t, config) + require.NotNil(t, config.Apps.HTTP) + require.Len(t, config.Apps.HTTP.Servers, 1) + + server := config.Apps.HTTP.Servers["cpm_server"] + require.NotNil(t, server) + require.Contains(t, server.Listen, ":80") + require.Contains(t, server.Listen, ":443") + require.Len(t, server.Routes, 1) + + route := server.Routes[0] + require.Len(t, route.Match, 1) + require.Equal(t, []string{"media.example.com"}, route.Match[0].Host) + require.Len(t, route.Handle, 1) + require.True(t, route.Terminal) + + handler := route.Handle[0] + require.Equal(t, "reverse_proxy", handler["handler"]) +} + +func TestGenerateConfig_MultipleHosts(t *testing.T) { + hosts := []models.ProxyHost{ + { + UUID: "uuid-1", + Domain: "site1.example.com", + TargetHost: "app1", + TargetPort: 8080, + }, + { + UUID: "uuid-2", + Domain: "site2.example.com", + TargetHost: "app2", + TargetPort: 8081, + }, + } + + config, err := GenerateConfig(hosts) + require.NoError(t, err) + require.Len(t, config.Apps.HTTP.Servers["cpm_server"].Routes, 2) +} + +func TestGenerateConfig_WebSocketEnabled(t *testing.T) { + hosts := []models.ProxyHost{ + { + UUID: "uuid-ws", + Domain: "ws.example.com", + TargetHost: "wsapp", + TargetPort: 3000, + EnableWS: true, + }, + } + + config, err := GenerateConfig(hosts) + require.NoError(t, err) + + route := config.Apps.HTTP.Servers["cpm_server"].Routes[0] + handler := route.Handle[0] + + // Check WebSocket headers are present + require.NotNil(t, handler["headers"]) +} + +func TestGenerateConfig_EmptyDomain(t *testing.T) { + hosts := []models.ProxyHost{ + { + UUID: "bad-uuid", + Domain: "", + TargetHost: "app", + TargetPort: 8080, + }, + } + + _, err := GenerateConfig(hosts) + require.Error(t, err) + require.Contains(t, err.Error(), "empty domain") +} diff --git a/backend/internal/caddy/manager.go b/backend/internal/caddy/manager.go new file mode 100644 index 00000000..cb41bfd3 --- /dev/null +++ b/backend/internal/caddy/manager.go @@ -0,0 +1,199 @@ +package caddy + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "time" + + "gorm.io/gorm" + + "github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models" +) + +// Manager orchestrates Caddy configuration lifecycle: generate, validate, apply, rollback. +type Manager struct { + client *Client + db *gorm.DB + configDir string +} + +// NewManager creates a configuration manager. +func NewManager(client *Client, db *gorm.DB, configDir string) *Manager { + return &Manager{ + client: client, + db: db, + configDir: configDir, + } +} + +// ApplyConfig generates configuration from database, validates it, applies to Caddy with rollback on failure. +func (m *Manager) ApplyConfig(ctx context.Context) error { + // Fetch all proxy hosts from database + var hosts []models.ProxyHost + if err := m.db.Find(&hosts).Error; err != nil { + return fmt.Errorf("fetch proxy hosts: %w", err) + } + + // Generate Caddy config + config, err := GenerateConfig(hosts) + if err != nil { + return fmt.Errorf("generate config: %w", err) + } + + // Validate before applying + if err := Validate(config); err != nil { + return fmt.Errorf("validation failed: %w", err) + } + + // Save snapshot for rollback + if _, err := m.saveSnapshot(config); err != nil { + return fmt.Errorf("save snapshot: %w", err) + } + + // Calculate config hash for audit trail + configJSON, _ := json.Marshal(config) + configHash := fmt.Sprintf("%x", sha256.Sum256(configJSON)) + + // Apply to Caddy + if err := m.client.Load(ctx, config); err != nil { + // Rollback on failure + if rollbackErr := m.rollback(ctx); rollbackErr != nil { + return fmt.Errorf("apply failed: %w, rollback also failed: %v", err, rollbackErr) + } + + // Record failed attempt + m.recordConfigChange(configHash, false, err.Error()) + return fmt.Errorf("apply failed (rolled back): %w", err) + } + + // Record successful application + m.recordConfigChange(configHash, true, "") + + // Cleanup old snapshots (keep last 10) + if err := m.rotateSnapshots(10); err != nil { + // Non-fatal - log but don't fail + fmt.Printf("warning: snapshot rotation failed: %v\n", err) + } + + return nil +} + +// saveSnapshot stores the config to disk with timestamp. +func (m *Manager) saveSnapshot(config *Config) (string, error) { + timestamp := time.Now().Unix() + filename := fmt.Sprintf("config-%d.json", timestamp) + path := filepath.Join(m.configDir, filename) + + configJSON, err := json.MarshalIndent(config, "", " ") + if err != nil { + return "", fmt.Errorf("marshal config: %w", err) + } + + if err := os.WriteFile(path, configJSON, 0644); err != nil { + return "", fmt.Errorf("write snapshot: %w", err) + } + + return path, nil +} + +// rollback loads the most recent snapshot from disk. +func (m *Manager) rollback(ctx context.Context) error { + snapshots, err := m.listSnapshots() + if err != nil || len(snapshots) == 0 { + return fmt.Errorf("no snapshots available for rollback") + } + + // Load most recent snapshot + latestSnapshot := snapshots[len(snapshots)-1] + configJSON, err := os.ReadFile(latestSnapshot) + if err != nil { + return fmt.Errorf("read snapshot: %w", err) + } + + var config Config + if err := json.Unmarshal(configJSON, &config); err != nil { + return fmt.Errorf("unmarshal snapshot: %w", err) + } + + // Apply the snapshot + if err := m.client.Load(ctx, &config); err != nil { + return fmt.Errorf("load snapshot: %w", err) + } + + return nil +} + +// listSnapshots returns all snapshot file paths sorted by modification time. +func (m *Manager) listSnapshots() ([]string, error) { + entries, err := os.ReadDir(m.configDir) + if err != nil { + return nil, fmt.Errorf("read config dir: %w", err) + } + + var snapshots []string + for _, entry := range entries { + if entry.IsDir() || filepath.Ext(entry.Name()) != ".json" { + continue + } + snapshots = append(snapshots, filepath.Join(m.configDir, entry.Name())) + } + + // Sort by modification time + sort.Slice(snapshots, func(i, j int) bool { + infoI, _ := os.Stat(snapshots[i]) + infoJ, _ := os.Stat(snapshots[j]) + return infoI.ModTime().Before(infoJ.ModTime()) + }) + + return snapshots, nil +} + +// rotateSnapshots keeps only the N most recent snapshots. +func (m *Manager) rotateSnapshots(keep int) error { + snapshots, err := m.listSnapshots() + if err != nil { + return err + } + + if len(snapshots) <= keep { + return nil + } + + // Delete oldest snapshots + toDelete := snapshots[:len(snapshots)-keep] + for _, path := range toDelete { + if err := os.Remove(path); err != nil { + return fmt.Errorf("delete snapshot %s: %w", path, err) + } + } + + return nil +} + +// recordConfigChange stores an audit record in the database. +func (m *Manager) recordConfigChange(configHash string, success bool, errorMsg string) { + record := models.CaddyConfig{ + ConfigHash: configHash, + AppliedAt: time.Now(), + Success: success, + ErrorMsg: errorMsg, + } + + // Best effort - don't fail if audit logging fails + m.db.Create(&record) +} + +// Ping checks if Caddy is reachable. +func (m *Manager) Ping(ctx context.Context) error { + return m.client.Ping(ctx) +} + +// GetCurrentConfig retrieves the running config from Caddy. +func (m *Manager) GetCurrentConfig(ctx context.Context) (*Config, error) { + return m.client.GetConfig(ctx) +} diff --git a/backend/internal/caddy/types.go b/backend/internal/caddy/types.go new file mode 100644 index 00000000..55d7394c --- /dev/null +++ b/backend/internal/caddy/types.go @@ -0,0 +1,95 @@ +package caddy + +// Config represents Caddy's top-level JSON configuration structure. +// Reference: https://caddyserver.com/docs/json/ +type Config struct { + Apps Apps `json:"apps"` +} + +// Apps contains all Caddy app modules. +type Apps struct { + HTTP *HTTPApp `json:"http,omitempty"` + TLS *TLSApp `json:"tls,omitempty"` +} + +// HTTPApp configures the HTTP app. +type HTTPApp struct { + Servers map[string]*Server `json:"servers"` +} + +// Server represents an HTTP server instance. +type Server struct { + Listen []string `json:"listen"` + Routes []*Route `json:"routes"` + AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"` + Logs *ServerLogs `json:"logs,omitempty"` +} + +// AutoHTTPSConfig controls automatic HTTPS behavior. +type AutoHTTPSConfig struct { + Disable bool `json:"disable,omitempty"` + DisableRedir bool `json:"disable_redirects,omitempty"` + Skip []string `json:"skip,omitempty"` +} + +// ServerLogs configures access logging. +type ServerLogs struct { + DefaultLoggerName string `json:"default_logger_name,omitempty"` +} + +// Route represents an HTTP route (matcher + handlers). +type Route struct { + Match []Match `json:"match,omitempty"` + Handle []Handler `json:"handle"` + Terminal bool `json:"terminal,omitempty"` +} + +// Match represents a request matcher. +type Match struct { + Host []string `json:"host,omitempty"` + Path []string `json:"path,omitempty"` +} + +// Handler is the interface for all handler types. +// Actual types will implement handler-specific fields. +type Handler map[string]interface{} + +// ReverseProxyHandler creates a reverse_proxy handler. +func ReverseProxyHandler(dial string, enableWS bool) Handler { + h := Handler{ + "handler": "reverse_proxy", + "upstreams": []map[string]interface{}{ + {"dial": dial}, + }, + } + + if enableWS { + // Enable WebSocket support by preserving upgrade headers + h["headers"] = map[string]interface{}{ + "request": map[string]interface{}{ + "set": map[string][]string{ + "Upgrade": {"{http.request.header.Upgrade}"}, + "Connection": {"{http.request.header.Connection}"}, + }, + }, + } + } + + return h +} + +// TLSApp configures the TLS app for certificate management. +type TLSApp struct { + Automation *AutomationConfig `json:"automation,omitempty"` +} + +// AutomationConfig controls certificate automation. +type AutomationConfig struct { + Policies []*AutomationPolicy `json:"policies,omitempty"` +} + +// AutomationPolicy defines certificate management for specific domains. +type AutomationPolicy struct { + Subjects []string `json:"subjects,omitempty"` + IssuersRaw []interface{} `json:"issuers,omitempty"` +} diff --git a/backend/internal/caddy/validator.go b/backend/internal/caddy/validator.go new file mode 100644 index 00000000..c160afbf --- /dev/null +++ b/backend/internal/caddy/validator.go @@ -0,0 +1,146 @@ +package caddy + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" +) + +// Validate performs pre-flight validation on a Caddy config before applying it. +func Validate(cfg *Config) error { + if cfg == nil { + return fmt.Errorf("config cannot be nil") + } + + if cfg.Apps.HTTP == nil { + return nil // Empty config is valid + } + + // Track seen hosts to detect duplicates + seenHosts := make(map[string]bool) + + for serverName, server := range cfg.Apps.HTTP.Servers { + if len(server.Listen) == 0 { + return fmt.Errorf("server %s has no listen addresses", serverName) + } + + // Validate listen addresses + for _, addr := range server.Listen { + if err := validateListenAddr(addr); err != nil { + return fmt.Errorf("invalid listen address %s in server %s: %w", addr, serverName, err) + } + } + + // Validate routes + for i, route := range server.Routes { + if err := validateRoute(route, seenHosts); err != nil { + return fmt.Errorf("invalid route %d in server %s: %w", i, serverName, err) + } + } + } + + // Validate JSON marshalling works + if _, err := json.Marshal(cfg); err != nil { + return fmt.Errorf("config cannot be marshalled to JSON: %w", err) + } + + return nil +} + +func validateListenAddr(addr string) error { + // Strip network type prefix if present (tcp/, udp/) + if idx := strings.Index(addr, "/"); idx != -1 { + addr = addr[idx+1:] + } + + // Parse host:port + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return fmt.Errorf("invalid address format: %w", err) + } + + // Validate port + port, err := strconv.Atoi(portStr) + if err != nil { + return fmt.Errorf("invalid port: %w", err) + } + if port < 1 || port > 65535 { + return fmt.Errorf("port %d out of range (1-65535)", port) + } + + // Validate host (allow empty for wildcard binding) + if host != "" && net.ParseIP(host) == nil { + return fmt.Errorf("invalid IP address: %s", host) + } + + return nil +} + +func validateRoute(route *Route, seenHosts map[string]bool) error { + if len(route.Handle) == 0 { + return fmt.Errorf("route has no handlers") + } + + // Check for duplicate host matchers + for _, match := range route.Match { + for _, host := range match.Host { + if seenHosts[host] { + return fmt.Errorf("duplicate host matcher: %s", host) + } + seenHosts[host] = true + } + } + + // Validate handlers + for i, handler := range route.Handle { + if err := validateHandler(handler); err != nil { + return fmt.Errorf("invalid handler %d: %w", i, err) + } + } + + return nil +} + +func validateHandler(handler Handler) error { + handlerType, ok := handler["handler"].(string) + if !ok { + return fmt.Errorf("handler missing 'handler' field") + } + + switch handlerType { + case "reverse_proxy": + return validateReverseProxy(handler) + case "file_server", "static_response": + return nil // Accept other common handlers + default: + // Unknown handlers are allowed (Caddy is extensible) + return nil + } +} + +func validateReverseProxy(handler Handler) error { + upstreams, ok := handler["upstreams"].([]map[string]interface{}) + if !ok { + return fmt.Errorf("reverse_proxy missing upstreams") + } + + if len(upstreams) == 0 { + return fmt.Errorf("reverse_proxy has no upstreams") + } + + for i, upstream := range upstreams { + dial, ok := upstream["dial"].(string) + if !ok || dial == "" { + return fmt.Errorf("upstream %d missing dial address", i) + } + + // Validate dial address format (host:port) + if _, _, err := net.SplitHostPort(dial); err != nil { + return fmt.Errorf("upstream %d has invalid dial address %s: %w", i, dial, err) + } + } + + return nil +} diff --git a/backend/internal/caddy/validator_test.go b/backend/internal/caddy/validator_test.go new file mode 100644 index 00000000..fa28a354 --- /dev/null +++ b/backend/internal/caddy/validator_test.go @@ -0,0 +1,124 @@ +package caddy + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models" +) + +func TestValidate_EmptyConfig(t *testing.T) { + config := &Config{} + err := Validate(config) + require.NoError(t, err) +} + +func TestValidate_ValidConfig(t *testing.T) { + hosts := []models.ProxyHost{ + { + UUID: "test", + Domain: "test.example.com", + TargetHost: "app", + TargetPort: 8080, + }, + } + + config, _ := GenerateConfig(hosts) + err := Validate(config) + require.NoError(t, err) +} + +func TestValidate_DuplicateHosts(t *testing.T) { + config := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "srv": { + Listen: []string{":80"}, + Routes: []*Route{ + { + Match: []Match{{Host: []string{"test.com"}}}, + Handle: []Handler{ + ReverseProxyHandler("app:8080", false), + }, + }, + { + Match: []Match{{Host: []string{"test.com"}}}, + Handle: []Handler{ + ReverseProxyHandler("app2:8080", false), + }, + }, + }, + }, + }, + }, + }, + } + + err := Validate(config) + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate host") +} + +func TestValidate_NoListenAddresses(t *testing.T) { + config := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "srv": { + Listen: []string{}, + Routes: []*Route{}, + }, + }, + }, + }, + } + + err := Validate(config) + require.Error(t, err) + require.Contains(t, err.Error(), "no listen addresses") +} + +func TestValidate_InvalidPort(t *testing.T) { + config := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "srv": { + Listen: []string{":99999"}, + Routes: []*Route{}, + }, + }, + }, + }, + } + + err := Validate(config) + require.Error(t, err) + require.Contains(t, err.Error(), "out of range") +} + +func TestValidate_NoHandlers(t *testing.T) { + config := &Config{ + Apps: Apps{ + HTTP: &HTTPApp{ + Servers: map[string]*Server{ + "srv": { + Listen: []string{":80"}, + Routes: []*Route{ + { + Match: []Match{{Host: []string{"test.com"}}}, + Handle: []Handler{}, + }, + }, + }, + }, + }, + }, + } + + err := Validate(config) + require.Error(t, err) + require.Contains(t, err.Error(), "no handlers") +} diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 47ffe194..01557409 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -8,25 +8,33 @@ import ( // Config captures runtime configuration sourced from environment variables. type Config struct { - Environment string - HTTPPort string - DatabasePath string - FrontendDir string + Environment string + HTTPPort string + DatabasePath string + FrontendDir string + CaddyAdminAPI string + CaddyConfigDir string } // Load reads env vars and falls back to defaults so the server can boot with zero configuration. func Load() (Config, error) { cfg := Config{ - Environment: getEnv("CPM_ENV", "development"), - HTTPPort: getEnv("CPM_HTTP_PORT", "8080"), - DatabasePath: getEnv("CPM_DB_PATH", filepath.Join("data", "cpm.db")), - FrontendDir: getEnv("CPM_FRONTEND_DIR", filepath.Clean(filepath.Join("..", "frontend", "dist"))), + Environment: getEnv("CPM_ENV", "development"), + HTTPPort: getEnv("CPM_HTTP_PORT", "8080"), + DatabasePath: getEnv("CPM_DB_PATH", filepath.Join("data", "cpm.db")), + FrontendDir: getEnv("CPM_FRONTEND_DIR", filepath.Clean(filepath.Join("..", "frontend", "dist"))), + CaddyAdminAPI: getEnv("CPM_CADDY_ADMIN_API", "http://localhost:2019"), + CaddyConfigDir: getEnv("CPM_CADDY_CONFIG_DIR", filepath.Join("data", "caddy")), } if err := os.MkdirAll(filepath.Dir(cfg.DatabasePath), 0o755); err != nil { return Config{}, fmt.Errorf("ensure data directory: %w", err) } + if err := os.MkdirAll(cfg.CaddyConfigDir, 0o755); err != nil { + return Config{}, fmt.Errorf("ensure caddy config directory: %w", err) + } + return cfg, nil } diff --git a/backend/internal/models/caddy_config.go b/backend/internal/models/caddy_config.go new file mode 100644 index 00000000..4b4ea08e --- /dev/null +++ b/backend/internal/models/caddy_config.go @@ -0,0 +1,14 @@ +package models + +import ( + "time" +) + +// CaddyConfig stores an audit trail of Caddy configuration changes. +type CaddyConfig struct { + ID uint `json:"id" gorm:"primaryKey"` + ConfigHash string `json:"config_hash" gorm:"index"` + AppliedAt time.Time `json:"applied_at"` + Success bool `json:"success"` + ErrorMsg string `json:"error_msg"` +} diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 00000000..94235a8e --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,30 @@ +version: '3.9' + +# Development override - use with: docker-compose -f docker-compose.yml -f docker-compose.dev.yml up + +services: + caddy: + # Development: expose admin API externally for debugging + ports: + - "80:80" + - "443:443" + - "443:443/udp" + - "2019:2019" # Caddy admin API (dev only) + command: caddy run --config /dev/null --adapter json + + app: + build: + context: . + dockerfile: Dockerfile + target: backend-builder # Stop at builder stage for faster rebuilds + environment: + - CPM_ENV=development + - CPM_HTTP_PORT=8080 + - CPM_DB_PATH=/app/data/cpm.db + - CPM_FRONTEND_DIR=/app/frontend/dist + - CPM_CADDY_ADMIN_API=http://caddy:2019 + - CPM_CADDY_CONFIG_DIR=/app/data/caddy + volumes: + - ./backend:/app/backend:ro # Mount source for live reload (if using air) + - app_data:/app/data + command: /app/backend/api # Run the built binary diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..909538e9 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,58 @@ +version: '3.9' + +services: + caddy: + image: caddy:2.8-alpine + container_name: cpm_caddy + restart: unless-stopped + ports: + - "80:80" + - "443:443" + - "443:443/udp" # HTTP/3 + volumes: + - caddy_data:/data + - caddy_config:/config + networks: + - cpm_network + # Caddy admin API exposed on default port 2019 (internal only) + command: caddy run --config /config/caddy.json --adapter json + + app: + build: + context: . + dockerfile: Dockerfile + container_name: cpm_app + restart: unless-stopped + ports: + - "8080:8080" + environment: + - CPM_ENV=production + - CPM_HTTP_PORT=8080 + - CPM_DB_PATH=/app/data/cpm.db + - CPM_FRONTEND_DIR=/app/frontend/dist + - CPM_CADDY_ADMIN_API=http://caddy:2019 + - CPM_CADDY_CONFIG_DIR=/app/data/caddy + volumes: + - app_data:/app/data + networks: + - cpm_network + depends_on: + - caddy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/api/v1/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + +volumes: + caddy_data: + driver: local + caddy_config: + driver: local + app_data: + driver: local + +networks: + cpm_network: + driver: bridge