Merge pull request #53 from Wikid82/Caddy-Integration-&-Configuration-Management

feat: Single-container deployment & automated semantic versioning
This commit is contained in:
Jeremy
2025-11-18 13:12:21 -05:00
committed by GitHub
30 changed files with 2093 additions and 52 deletions
+47
View File
@@ -98,3 +98,50 @@ jobs:
- name: Build frontend
working-directory: frontend
run: npm run build
docker-build-test:
name: Docker - Build & Integration Test
runs-on: ubuntu-latest
needs: [backend-test, frontend-build]
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v5
with:
context: .
load: true
tags: caddyproxymanager-plus:test
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Start services with docker-compose
run: |
docker-compose up -d
sleep 10 # Wait for services to be ready
- name: Check app health
run: |
curl --retry 5 --retry-delay 3 --retry-connrefused http://localhost:8080/api/v1/health
- name: Check Caddy admin API
run: |
curl --retry 5 --retry-delay 3 --retry-connrefused http://localhost:2019/config/
- name: Run integration tests
run: |
# Future: run integration tests against running containers
echo "Integration tests placeholder - will be implemented with Issue #4"
- name: Show logs on failure
if: failure()
run: |
docker-compose logs app
docker-compose logs caddy
- name: Cleanup
if: always()
run: docker-compose down -v
+76
View File
@@ -0,0 +1,76 @@
name: Docker Build & Publish
on:
push:
branches:
- main
- development
tags:
- 'v*.*.*'
pull_request:
branches:
- main
- development
workflow_call: # Allow this workflow to be called by other workflows
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
# Tag 'latest' for main branch
type=raw,value=latest,enable={{is_default_branch}}
# Tag 'development' for development branch
type=raw,value=development,enable=${{ github.ref == 'refs/heads/development' }}
# Semver tags for version releases (v1.0.0 -> 1.0.0, 1.0, 1)
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
# SHA for all builds
type=sha,prefix={{branch}}-
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
VERSION=${{ steps.meta.outputs.version }}
BUILD_DATE=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
VCS_REF=${{ github.sha }}
- name: Image digest
run: echo ${{ steps.build-and-push.outputs.digest }}
+52
View File
@@ -0,0 +1,52 @@
name: Release
on:
push:
tags:
- 'v*.*.*'
permissions:
contents: write
packages: write
jobs:
create-release:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generate changelog
id: changelog
run: |
# Get previous tag
PREV_TAG=$(git describe --tags --abbrev=0 $(git rev-list --tags --skip=1 --max-count=1) 2>/dev/null || echo "")
if [ -z "$PREV_TAG" ]; then
echo "First release - generating full changelog"
CHANGELOG=$(git log --pretty=format:"- %s (%h)" --no-merges)
else
echo "Generating changelog since $PREV_TAG"
CHANGELOG=$(git log $PREV_TAG..HEAD --pretty=format:"- %s (%h)" --no-merges)
fi
# Save to file for GitHub release
echo "$CHANGELOG" > CHANGELOG.txt
echo "Generated changelog with $(echo "$CHANGELOG" | wc -l) commits"
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
body_path: CHANGELOG.txt
generate_release_notes: true
draft: false
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') || contains(github.ref_name, 'rc') }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build-and-publish:
needs: create-release
uses: ./.github/workflows/docker-publish.yml
secrets: inherit
+4
View File
@@ -6,6 +6,7 @@
# Go backend
backend/data/
*.db
backend/coverage*.out
# Node frontend
frontend/node_modules/
@@ -14,3 +15,6 @@ frontend/dist/
# Python scaffolding leftovers
__pycache__/
*.pyc
# Release artifacts
CHANGELOG.txt
+6
View File
@@ -40,6 +40,12 @@ repos:
language: system
types: [go]
- id: go-test-coverage
name: go test (with coverage enforcement)
entry: bash scripts/go-test-coverage.sh
language: system
pass_filenames: false
- id: golangci-lint
name: golangci-lint (project linter)
entry: golangci-lint run
+1
View File
@@ -0,0 +1 @@
0.1.0-alpha
+234
View File
@@ -0,0 +1,234 @@
# Docker Deployment Guide
CaddyProxyManager+ is designed for Docker-first deployment, making it easy for home users to run Caddy without learning Caddyfile syntax.
## Quick Start
```bash
# Clone the repository
git clone https://github.com/Wikid82/CaddyProxyManagerPlus.git
cd CaddyProxyManagerPlus
# Start the stack
docker-compose up -d
# Access the UI
open http://localhost:8080
```
## Architecture
The Docker stack consists of two services:
1. **app** (`caddyproxymanager-plus`): Management interface
- Manages proxy host configuration
- Provides web UI on port 8080
- Communicates with Caddy via admin API
2. **caddy**: Reverse proxy server
- Handles incoming traffic on ports 80/443
- Automatic HTTPS with Let's Encrypt
- Configured dynamically via JSON API
```
┌──────────────┐
│ Internet │
└──────┬───────┘
│ :80, :443
┌──────────────┐ Admin API ┌──────────────┐
│ Caddy │◄───────:2019───────┤ CPM+ App │
│ (Proxy) │ │ (Manager) │
└──────┬───────┘ └──────┬───────┘
│ │
▼ ▼
Your Services :8080 (Web UI)
```
## Environment Variables
Configure CPM+ via environment variables in `docker-compose.yml`:
```yaml
environment:
- CPM_ENV=production # production | development
- CPM_HTTP_PORT=8080 # Management UI port
- CPM_DB_PATH=/app/data/cpm.db # SQLite database location
- CPM_CADDY_ADMIN_API=http://caddy:2019 # Caddy admin endpoint
- CPM_CADDY_CONFIG_DIR=/app/data/caddy # Config snapshots
```
## Volumes
Three persistent volumes store your data:
- **app_data**: CPM+ database, config snapshots, logs
- **caddy_data**: Caddy certificates, ACME account data
- **caddy_config**: Caddy runtime configuration
To backup your configuration:
```bash
# Backup volumes
docker run --rm -v cpm_app_data:/data -v $(pwd):/backup alpine tar czf /backup/cpm-backup.tar.gz /data
# Restore from backup
docker run --rm -v cpm_app_data:/data -v $(pwd):/backup alpine tar xzf /backup/cpm-backup.tar.gz -C /
```
## Ports
Default port mapping:
- **80**: HTTP (Caddy) - redirects to HTTPS
- **443/tcp**: HTTPS (Caddy)
- **443/udp**: HTTP/3 (Caddy)
- **8080**: Management UI (CPM+)
- **2019**: Caddy admin API (internal only, exposed in dev mode)
## Development Mode
Development mode exposes the Caddy admin API externally for debugging:
```bash
docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
```
Access Caddy admin API: `http://localhost:2019/config/`
## Health Checks
CPM+ includes a health check endpoint:
```bash
# Check if app is running
curl http://localhost:8080/api/v1/health
# Check Caddy status
docker-compose exec caddy caddy version
```
## Troubleshooting
### App can't reach Caddy
**Symptom**: "Caddy unreachable" errors in logs
**Solution**: Ensure both containers are on the same network:
```bash
docker-compose ps # Check both services are "Up"
docker-compose logs caddy # Check Caddy logs
```
### Certificates not working
**Symptom**: HTTP works but HTTPS fails
**Check**:
1. Port 80/443 are accessible from the internet
2. DNS points to your server
3. Caddy logs: `docker-compose logs caddy | grep -i acme`
### Config changes not applied
**Symptom**: Changes in UI don't affect routing
**Debug**:
```bash
# View current Caddy config
curl http://localhost:2019/config/ | jq
# Check CPM+ logs
docker-compose logs app
# Manual config reload
curl -X POST http://localhost:8080/api/v1/caddy/reload
```
## Updating
Pull the latest images and restart:
```bash
docker-compose pull
docker-compose up -d
```
For specific versions:
```bash
# Edit docker-compose.yml to pin version
image: ghcr.io/wikid82/caddyproxymanagerplus:v1.0.0
docker-compose up -d
```
## Building from Source
```bash
# Build multi-arch images
docker buildx build --platform linux/amd64,linux/arm64 -t caddyproxymanager-plus:local .
# Or use Make
make docker-build
```
## Security Considerations
1. **Caddy admin API**: Keep port 2019 internal (not exposed in production compose)
2. **Management UI**: Add authentication (Issue #7) before exposing to internet
3. **Certificates**: Caddy stores private keys in `caddy_data` - protect this volume
4. **Database**: SQLite file contains all config - backup regularly
## Integration with Existing Caddy
If you already have Caddy running, you can point CPM+ to it:
```yaml
environment:
- CPM_CADDY_ADMIN_API=http://your-caddy-host:2019
```
**Warning**: CPM+ will replace Caddy's entire configuration. Backup first!
## Platform-Specific Notes
### Synology NAS
Use Container Manager (Docker GUI):
1. Import `docker-compose.yml`
2. Map port 80/443 to your NAS IP
3. Enable auto-restart
### Unraid
1. Use Docker Compose Manager plugin
2. Add compose file to `/boot/config/plugins/compose.manager/projects/cpm/`
3. Start via web UI
### Home Assistant Add-on
Coming soon in Beta release.
## Performance Tuning
For high-traffic deployments:
```yaml
# docker-compose.yml
services:
caddy:
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
```
## Next Steps
- Configure your first proxy host via UI
- Enable automatic HTTPS (happens automatically)
- Add authentication (Issue #7)
- Integrate CrowdSec (Issue #15)
+56 -18
View File
@@ -1,4 +1,10 @@
# Multi-stage Dockerfile for CaddyProxyManager+ (Go backend + React frontend)
# Multi-stage Dockerfile for CaddyProxyManager+ with integrated Caddy
# Single container deployment for simplified home user setup
# Build arguments for versioning
ARG VERSION=dev
ARG BUILD_DATE
ARG VCS_REF
# ---- Frontend Builder ----
FROM node:20-alpine AS frontend-builder
@@ -13,7 +19,7 @@ COPY frontend/ ./
RUN npm run build
# ---- Backend Builder ----
FROM golang:1.22-alpine AS backend-builder
FROM golang:latest AS backend-builder
WORKDIR /app/backend
# Install build dependencies
@@ -26,15 +32,25 @@ RUN go mod download
# Copy backend source
COPY backend/ ./
# Build the Go binary
RUN CGO_ENABLED=1 GOOS=linux go build -a -installsuffix cgo -o api ./cmd/api
# Build arguments passed from main build context
ARG VERSION=dev
ARG VCS_REF=unknown
ARG BUILD_DATE=unknown
# ---- Final Runtime ----
FROM alpine:latest
# Build the Go binary with version information injected via ldflags
RUN CGO_ENABLED=1 GOOS=linux go build \
-a -installsuffix cgo \
-ldflags "-X github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/version.SemVer=${VERSION} \
-X github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/version.GitCommit=${VCS_REF} \
-X github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/version.BuildDate=${BUILD_DATE}" \
-o api ./cmd/api
# ---- Final Runtime with Caddy ----
FROM caddy:latest
WORKDIR /app
# Install runtime dependencies
RUN apk --no-cache add ca-certificates sqlite-libs
# Install runtime dependencies for CPM+
RUN apk --no-cache add ca-certificates sqlite-libs bash
# Copy Go binary from backend builder
COPY --from=backend-builder /app/backend/api /app/api
@@ -42,17 +58,39 @@ COPY --from=backend-builder /app/backend/api /app/api
# Copy frontend build from frontend builder
COPY --from=frontend-builder /app/frontend/dist /app/frontend/dist
# Copy startup script
COPY docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh
# Set default environment variables
ENV CPM_ENV=production
ENV CPM_HTTP_PORT=8080
ENV CPM_DB_PATH=/app/data/cpm.db
ENV CPM_FRONTEND_DIR=/app/frontend/dist
ENV CPM_ENV=production \
CPM_HTTP_PORT=8080 \
CPM_DB_PATH=/app/data/cpm.db \
CPM_FRONTEND_DIR=/app/frontend/dist \
CPM_CADDY_ADMIN_API=http://localhost:2019 \
CPM_CADDY_CONFIG_DIR=/app/data/caddy
# Create data directory
RUN mkdir -p /app/data
# Create necessary directories
RUN mkdir -p /app/data /app/data/caddy /config
# Expose HTTP port
EXPOSE 8080
# Re-declare build args for LABEL usage
ARG VERSION=dev
ARG BUILD_DATE
ARG VCS_REF
# Run the application
CMD ["/app/api"]
# OCI image labels for version metadata
LABEL org.opencontainers.image.title="CaddyProxyManager+" \
org.opencontainers.image.description="Web UI for managing Caddy reverse proxy configurations" \
org.opencontainers.image.version="${VERSION}" \
org.opencontainers.image.created="${BUILD_DATE}" \
org.opencontainers.image.revision="${VCS_REF}" \
org.opencontainers.image.source="https://github.com/Wikid82/CaddyProxyManagerPlus" \
org.opencontainers.image.url="https://github.com/Wikid82/CaddyProxyManagerPlus" \
org.opencontainers.image.vendor="CaddyProxyManagerPlus" \
org.opencontainers.image.licenses="MIT"
# Expose ports
EXPOSE 80 443 443/udp 8080 2019
# Use custom entrypoint to start both Caddy and CPM+
ENTRYPOINT ["/docker-entrypoint.sh"]
+44 -12
View File
@@ -1,18 +1,21 @@
.PHONY: help install test build run clean docker-build docker-run
.PHONY: help install test build run clean docker-build docker-run release
# Default target
help:
@echo "CaddyProxyManager+ Build System"
@echo ""
@echo "Available targets:"
@echo " install - Install all dependencies (backend + frontend)"
@echo " test - Run all tests (backend + frontend)"
@echo " build - Build backend and frontend"
@echo " run - Run backend in development mode"
@echo " clean - Clean build artifacts"
@echo " docker-build - Build Docker image"
@echo " docker-run - Run Docker container"
@echo " dev - Run both backend and frontend in dev mode (requires tmux)"
@echo " install - Install all dependencies (backend + frontend)"
@echo " test - Run all tests (backend + frontend)"
@echo " build - Build backend and frontend"
@echo " run - Run backend in development mode"
@echo " clean - Clean build artifacts"
@echo " docker-build - Build Docker image"
@echo " docker-build-versioned - Build Docker image with version from .version file"
@echo " docker-run - Run Docker container"
@echo " docker-dev - Run Docker in development mode"
@echo " release - Create a new semantic version release (interactive)"
@echo " dev - Run both backend and frontend in dev mode (requires tmux)"
# Install all dependencies
install:
@@ -52,11 +55,36 @@ clean:
# Build Docker image
docker-build:
docker build -t caddyproxymanager-plus:latest .
docker-compose build
# Run Docker container
# Build Docker image with version
docker-build-versioned:
@VERSION=$$(cat .version 2>/dev/null || echo "dev"); \
BUILD_DATE=$$(date -u +'%Y-%m-%dT%H:%M:%SZ'); \
VCS_REF=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
docker build \
--build-arg VERSION=$$VERSION \
--build-arg BUILD_DATE=$$BUILD_DATE \
--build-arg VCS_REF=$$VCS_REF \
-t caddyproxymanagerplus:$$VERSION \
-t caddyproxymanagerplus:latest \
.
# Run Docker containers (production)
docker-run:
docker run -p 8080:8080 -v cpm-data:/app/data caddyproxymanager-plus:latest
docker-compose up -d
# Run Docker containers (development)
docker-dev:
docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
# Stop Docker containers
docker-stop:
docker-compose down
# View Docker logs
docker-logs:
docker-compose logs -f
# Development mode (requires tmux)
dev:
@@ -64,3 +92,7 @@ dev:
tmux new-session -d -s cpm 'cd backend && go run ./cmd/api'
tmux split-window -h -t cpm 'cd frontend && npm run dev'
tmux attach -t cpm
# Create a new release (interactive script)
release:
@./scripts/release.sh
+35 -9
View File
@@ -70,24 +70,50 @@ cd frontend
npm run build
```
### Docker Deployment
### Docker Deployment (Recommended)
CaddyProxyManager+ is designed to run in Docker with Caddy as a sidecar container.
```bash
# Build the image
make docker-build
# Production deployment
docker-compose up -d
# Run the container
make docker-run
# Or manually:
docker build -t caddyproxymanager-plus .
docker run -p 8080:8080 -v cpm-data:/app/data caddyproxymanager-plus
# Development mode (exposes Caddy admin API on :2019)
docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
```
The docker-compose stack includes:
- **app**: CaddyProxyManager+ management interface (`:8080`)
- **caddy**: Caddy reverse proxy (`:80`, `:443`, `:443/udp` for HTTP/3)
Data is persisted in Docker volumes:
- `app_data`: CPM+ database and config snapshots
- `caddy_data`: Caddy certificates and data
- `caddy_config`: Caddy configuration
**Docker images** are published to GitHub Container Registry with automatic semantic versioning:
```bash
# Latest stable (from main branch)
docker pull ghcr.io/wikid82/caddyproxymanagerplus:latest
# Development (from development branch)
docker pull ghcr.io/wikid82/caddyproxymanagerplus:development
# Specific version (recommended for production)
docker pull ghcr.io/wikid82/caddyproxymanagerplus:v1.0.0
# Major/minor version (auto-updates to latest patch)
docker pull ghcr.io/wikid82/caddyproxymanagerplus:1.0
```
See `VERSION.md` for complete versioning documentation.
### Tooling
- **Build system**: `Makefile` provides common development tasks (`make help` for all commands)
- **Branching model**: `development` is the integration branch; open PRs from `feature/**`
- **CI**: `.github/workflows/ci.yml` runs Go tests, ESLint, and frontend builds
- **Docker**: Multi-stage build with Node (frontend) → Go (backend) → Alpine runtime
- **Pre-commit**: `.pre-commit-config.yaml` runs formatters, linters, and now `go test` with coverage enforcement (`CPM_MIN_COVERAGE=75` by default)
## Contributing
- See `CONTRIBUTING.md` (coming soon) for contribution guidelines.
+142
View File
@@ -0,0 +1,142 @@
# Versioning Guide
## Semantic Versioning
CaddyProxyManager+ follows [Semantic Versioning 2.0.0](https://semver.org/):
- **MAJOR.MINOR.PATCH** (e.g., `1.2.3`)
- **MAJOR**: Incompatible API changes
- **MINOR**: New functionality (backward compatible)
- **PATCH**: Bug fixes (backward compatible)
### Pre-release Identifiers
- `alpha`: Early development, unstable
- `beta`: Feature complete, testing phase
- `rc` (release candidate): Final testing before release
Example: `0.1.0-alpha`, `1.0.0-beta.1`, `2.0.0-rc.2`
## Creating a Release
### Automated Release Process
1. **Update version** in `.version` file:
```bash
echo "1.0.0" > .version
```
2. **Commit version bump**:
```bash
git add .version
git commit -m "chore: bump version to 1.0.0"
```
3. **Create and push tag**:
```bash
git tag -a v1.0.0 -m "Release v1.0.0"
git push origin v1.0.0
```
4. **GitHub Actions automatically**:
- Creates GitHub Release with changelog
- Builds multi-arch Docker images (amd64, arm64)
- Publishes to GitHub Container Registry with tags:
- `v1.0.0` (exact version)
- `1.0` (minor version)
- `1` (major version)
- `latest` (for non-prerelease on main branch)
## Container Image Tags
### Available Tags
- **`latest`**: Latest stable release (main branch)
- **`development`**: Latest development build (development branch)
- **`v1.2.3`**: Specific version tag
- **`1.2`**: Latest patch for minor version
- **`1`**: Latest minor for major version
- **`main-<sha>`**: Commit-specific build from main
- **`development-<sha>`**: Commit-specific build from development
### Usage Examples
```bash
# Use latest stable release
docker pull ghcr.io/wikid82/caddyproxymanagerplus:latest
# Use specific version
docker pull ghcr.io/wikid82/caddyproxymanagerplus:v1.0.0
# Use development builds
docker pull ghcr.io/wikid82/caddyproxymanagerplus:development
# Use specific commit
docker pull ghcr.io/wikid82/caddyproxymanagerplus:main-abc123
```
## Version Information
### Runtime Version Endpoint
```bash
curl http://localhost:8080/api/v1/health
```
Response includes:
```json
{
"status": "ok",
"service": "caddy-proxy-manager-plus",
"version": "1.0.0",
"git_commit": "abc1234567890def",
"build_date": "2025-11-17T12:34:56Z"
}
```
### Container Image Labels
View version metadata:
```bash
docker inspect ghcr.io/wikid82/caddyproxymanagerplus:latest \
--format='{{json .Config.Labels}}' | jq
```
Returns OCI-compliant labels:
- `org.opencontainers.image.version`
- `org.opencontainers.image.created`
- `org.opencontainers.image.revision`
- `org.opencontainers.image.source`
## Development Builds
Local builds default to `version=dev`:
```bash
docker build -t caddyproxymanagerplus:dev .
```
Build with custom version:
```bash
docker build \
--build-arg VERSION=1.2.3 \
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
--build-arg VCS_REF=$(git rev-parse HEAD) \
-t caddyproxymanagerplus:1.2.3 .
```
## Changelog Generation
The release workflow automatically generates changelogs from commit messages. Use conventional commit format:
- `feat:` New features
- `fix:` Bug fixes
- `docs:` Documentation changes
- `chore:` Maintenance tasks
- `refactor:` Code refactoring
- `test:` Test updates
- `ci:` CI/CD changes
Example:
```bash
git commit -m "feat: add TLS certificate management"
git commit -m "fix: correct proxy timeout handling"
```
+161
View File
@@ -0,0 +1,161 @@
# Automated Semantic Versioning - Implementation Summary
## Overview
Added comprehensive automated semantic versioning to CaddyProxyManager+ with version injection into container images, runtime version endpoints, and automated release workflows.
## Components Implemented
### 1. Dockerfile Version Injection
**File**: `Dockerfile`
- Added build arguments: `VERSION`, `BUILD_DATE`, `VCS_REF`
- Backend builder injects version info via Go ldflags during compilation
- Final image includes OCI-compliant labels for version metadata
- Version defaults to `dev` for local builds
### 2. Runtime Version Package
**File**: `backend/internal/version/version.go`
- Added `GitCommit` and `BuildDate` variables (injected via ldflags)
- Added `Full()` function returning complete version string
- Version information available at runtime via `/api/v1/health` endpoint
### 3. Health Endpoint Enhancement
**File**: `backend/internal/api/handlers/health_handler.go`
- Extended to expose version metadata:
- `version`: Semantic version (e.g., "1.0.0")
- `git_commit`: Git commit SHA
- `build_date`: Build timestamp
### 4. Docker Publishing Workflow
**File**: `.github/workflows/docker-publish.yml`
- Added `workflow_call` trigger for reusability
- Uses `docker/metadata-action` for automated tag generation
- Tag strategy:
- `latest` for main branch
- `development` for development branch
- `v1.2.3`, `1.2`, `1` for semantic version tags
- `{branch}-{sha}` for commit-specific builds
- Passes version metadata as build args
### 5. Release Workflow
**File**: `.github/workflows/release.yml`
- Triggered on `v*.*.*` tags
- Automatically generates changelog from commit messages
- Creates GitHub Release (marks pre-releases for alpha/beta/rc)
- Calls docker-publish workflow to build and publish images
### 6. Release Helper Script
**File**: `scripts/release.sh`
- Interactive script for creating releases
- Validates semantic version format
- Updates `.version` file
- Creates annotated git tag
- Pushes to remote and triggers workflows
- Safety checks: uncommitted changes, duplicate tags
### 7. Version File
**File**: `.version`
- Single source of truth for current version
- Current: `0.1.0-alpha`
- Used by release script and Makefile
### 8. Documentation
**File**: `VERSION.md`
- Complete versioning guide
- Release process documentation
- Container image tag reference
- Examples for all version query methods
### 9. Build System Updates
**File**: `Makefile`
- Added `docker-build-versioned`: Builds with version from `.version` file
- Added `release`: Interactive release creation
- Updated help text
**File**: `.gitignore`
- Added `CHANGELOG.txt` to ignored files
## Usage Examples
### Creating a Release
```bash
# Interactive release
make release
# Manual release
echo "1.0.0" > .version
git add .version
git commit -m "chore: bump version to 1.0.0"
git tag -a v1.0.0 -m "Release v1.0.0"
git push origin main
git push origin v1.0.0
```
### Building with Version
```bash
# Using Makefile (reads from .version)
make docker-build-versioned
# Manual with custom version
docker build \
--build-arg VERSION=1.2.3 \
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
--build-arg VCS_REF=$(git rev-parse HEAD) \
-t caddyproxymanagerplus:1.2.3 .
```
### Querying Version at Runtime
```bash
# Health endpoint includes version
curl http://localhost:8080/api/v1/health
{
"status": "ok",
"service": "caddy-proxy-manager-plus",
"version": "1.0.0",
"git_commit": "abc1234567890def",
"build_date": "2025-11-17T12:34:56Z"
}
# Container image labels
docker inspect ghcr.io/wikid82/caddyproxymanagerplus:latest \
--format='{{json .Config.Labels}}' | jq
```
## Automated Workflows
### On Tag Push (v1.2.3)
1. Release workflow creates GitHub Release with changelog
2. Docker publish workflow builds multi-arch images (amd64, arm64)
3. Images tagged: `v1.2.3`, `1.2`, `1`, `latest` (if main)
4. Published to GitHub Container Registry
### On Branch Push
1. Docker publish workflow builds images
2. Images tagged: `development` or `main-{sha}`
3. Published to GHCR (not for PRs)
## Benefits
1. **Traceability**: Every container image traceable to exact git commit
2. **Automation**: Zero-touch release process after tag push
3. **Flexibility**: Multiple tag strategies (latest, semver, commit-specific)
4. **Standards**: OCI-compliant image labels
5. **Runtime Discovery**: Version queryable via API endpoint
6. **User Experience**: Clear version information for support/debugging
## Testing
Version injection tested and working:
- ✅ Go binary builds with ldflags injection
- ✅ Health endpoint returns version info
- ✅ Dockerfile ARGs properly scoped
- ✅ OCI labels properly set
- ✅ Release script validates input
- ✅ Workflows configured correctly
## Next Steps
1. Test full release workflow with actual tag push
2. Consider adding `/api/v1/version` dedicated endpoint
3. Display version in frontend UI footer
4. Add version to error reports/logs
5. Document version strategy in contributor guide
@@ -3,13 +3,17 @@ package handlers
import (
"net/http"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/version"
"github.com/gin-gonic/gin"
)
// HealthHandler responds with basic service metadata for uptime checks.
func HealthHandler(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"status": "ok",
"service": "caddy-proxy-manager-plus",
"status": "ok",
"service": version.Name,
"version": version.SemVer,
"git_commit": version.GitCommit,
"build_date": version.BuildDate,
})
}
+2 -2
View File
@@ -12,8 +12,8 @@ import (
// Register wires up API routes and performs automatic migrations.
func Register(router *gin.Engine, db *gorm.DB) error {
if err := db.AutoMigrate(&models.ProxyHost{}); err != nil {
return fmt.Errorf("auto migrate proxy host: %w", err)
if err := db.AutoMigrate(&models.ProxyHost{}, &models.CaddyConfig{}); err != nil {
return fmt.Errorf("auto migrate: %w", err)
}
router.GET("/api/v1/health", handlers.HealthHandler)
+101
View File
@@ -0,0 +1,101 @@
package caddy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
// Client wraps the Caddy admin API.
type Client struct {
baseURL string
httpClient *http.Client
}
// NewClient creates a Caddy API client.
func NewClient(adminAPIURL string) *Client {
return &Client{
baseURL: adminAPIURL,
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
}
}
// Load atomically replaces Caddy's entire configuration.
// This is the primary method for applying configuration changes.
func (c *Client) Load(ctx context.Context, config *Config) error {
body, err := json.Marshal(config)
if err != nil {
return fmt.Errorf("marshal config: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.baseURL+"/load", bytes.NewReader(body))
if err != nil {
return fmt.Errorf("create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("execute request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes))
}
return nil
}
// GetConfig retrieves the current running configuration from Caddy.
func (c *Client) GetConfig(ctx context.Context) (*Config, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil)
if err != nil {
return nil, fmt.Errorf("create request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("execute request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("caddy returned status %d: %s", resp.StatusCode, string(bodyBytes))
}
var config Config
if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
return nil, fmt.Errorf("decode response: %w", err)
}
return &config, nil
}
// Ping checks if Caddy admin API is reachable.
func (c *Client) Ping(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.baseURL+"/config/", nil)
if err != nil {
return fmt.Errorf("create request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("caddy unreachable: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("caddy returned status %d", resp.StatusCode)
}
return nil
}
+94
View File
@@ -0,0 +1,94 @@
package caddy
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models"
)
func TestClient_Load_Success(t *testing.T) {
// Mock Caddy admin API
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "/load", r.URL.Path)
require.Equal(t, http.MethodPost, r.Method)
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
client := NewClient(server.URL)
config, _ := GenerateConfig([]models.ProxyHost{
{
UUID: "test",
Domain: "test.com",
TargetHost: "app",
TargetPort: 8080,
},
})
err := client.Load(context.Background(), config)
require.NoError(t, err)
}
func TestClient_Load_Failure(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error": "invalid config"}`))
}))
defer server.Close()
client := NewClient(server.URL)
config := &Config{}
err := client.Load(context.Background(), config)
require.Error(t, err)
require.Contains(t, err.Error(), "400")
}
func TestClient_GetConfig_Success(t *testing.T) {
testConfig := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"test": {Listen: []string{":80"}},
},
},
},
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "/config/", r.URL.Path)
require.Equal(t, http.MethodGet, r.Method)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(testConfig)
}))
defer server.Close()
client := NewClient(server.URL)
config, err := client.GetConfig(context.Background())
require.NoError(t, err)
require.NotNil(t, config)
require.NotNil(t, config.Apps.HTTP)
}
func TestClient_Ping_Success(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
client := NewClient(server.URL)
err := client.Ping(context.Background())
require.NoError(t, err)
}
func TestClient_Ping_Unreachable(t *testing.T) {
client := NewClient("http://localhost:9999")
err := client.Ping(context.Background())
require.Error(t, err)
}
+62
View File
@@ -0,0 +1,62 @@
package caddy
import (
"fmt"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models"
)
// GenerateConfig creates a Caddy JSON configuration from proxy hosts.
// This is the core transformation layer from our database model to Caddy config.
func GenerateConfig(hosts []models.ProxyHost) (*Config, error) {
if len(hosts) == 0 {
return &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{},
},
},
}, nil
}
routes := make([]*Route, 0, len(hosts))
for _, host := range hosts {
if host.Domain == "" {
return nil, fmt.Errorf("proxy host %s has empty domain", host.UUID)
}
dial := fmt.Sprintf("%s:%d", host.TargetHost, host.TargetPort)
route := &Route{
Match: []Match{
{Host: []string{host.Domain}},
},
Handle: []Handler{
ReverseProxyHandler(dial, host.EnableWS),
},
Terminal: true,
}
routes = append(routes, route)
}
config := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"cpm_server": {
Listen: []string{":80", ":443"},
Routes: routes,
AutoHTTPS: &AutoHTTPSConfig{
// Enable automatic HTTPS by default
Disable: false,
},
},
},
},
},
}
return config, nil
}
+110
View File
@@ -0,0 +1,110 @@
package caddy
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models"
)
func TestGenerateConfig_Empty(t *testing.T) {
config, err := GenerateConfig([]models.ProxyHost{})
require.NoError(t, err)
require.NotNil(t, config)
require.NotNil(t, config.Apps.HTTP)
require.Empty(t, config.Apps.HTTP.Servers)
}
func TestGenerateConfig_SingleHost(t *testing.T) {
hosts := []models.ProxyHost{
{
UUID: "test-uuid",
Name: "Media",
Domain: "media.example.com",
TargetScheme: "http",
TargetHost: "media",
TargetPort: 32400,
EnableTLS: true,
EnableWS: false,
},
}
config, err := GenerateConfig(hosts)
require.NoError(t, err)
require.NotNil(t, config)
require.NotNil(t, config.Apps.HTTP)
require.Len(t, config.Apps.HTTP.Servers, 1)
server := config.Apps.HTTP.Servers["cpm_server"]
require.NotNil(t, server)
require.Contains(t, server.Listen, ":80")
require.Contains(t, server.Listen, ":443")
require.Len(t, server.Routes, 1)
route := server.Routes[0]
require.Len(t, route.Match, 1)
require.Equal(t, []string{"media.example.com"}, route.Match[0].Host)
require.Len(t, route.Handle, 1)
require.True(t, route.Terminal)
handler := route.Handle[0]
require.Equal(t, "reverse_proxy", handler["handler"])
}
func TestGenerateConfig_MultipleHosts(t *testing.T) {
hosts := []models.ProxyHost{
{
UUID: "uuid-1",
Domain: "site1.example.com",
TargetHost: "app1",
TargetPort: 8080,
},
{
UUID: "uuid-2",
Domain: "site2.example.com",
TargetHost: "app2",
TargetPort: 8081,
},
}
config, err := GenerateConfig(hosts)
require.NoError(t, err)
require.Len(t, config.Apps.HTTP.Servers["cpm_server"].Routes, 2)
}
func TestGenerateConfig_WebSocketEnabled(t *testing.T) {
hosts := []models.ProxyHost{
{
UUID: "uuid-ws",
Domain: "ws.example.com",
TargetHost: "wsapp",
TargetPort: 3000,
EnableWS: true,
},
}
config, err := GenerateConfig(hosts)
require.NoError(t, err)
route := config.Apps.HTTP.Servers["cpm_server"].Routes[0]
handler := route.Handle[0]
// Check WebSocket headers are present
require.NotNil(t, handler["headers"])
}
func TestGenerateConfig_EmptyDomain(t *testing.T) {
hosts := []models.ProxyHost{
{
UUID: "bad-uuid",
Domain: "",
TargetHost: "app",
TargetPort: 8080,
},
}
_, err := GenerateConfig(hosts)
require.Error(t, err)
require.Contains(t, err.Error(), "empty domain")
}
+199
View File
@@ -0,0 +1,199 @@
package caddy
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"time"
"gorm.io/gorm"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models"
)
// Manager orchestrates Caddy configuration lifecycle: generate, validate, apply, rollback.
type Manager struct {
client *Client
db *gorm.DB
configDir string
}
// NewManager creates a configuration manager.
func NewManager(client *Client, db *gorm.DB, configDir string) *Manager {
return &Manager{
client: client,
db: db,
configDir: configDir,
}
}
// ApplyConfig generates configuration from database, validates it, applies to Caddy with rollback on failure.
func (m *Manager) ApplyConfig(ctx context.Context) error {
// Fetch all proxy hosts from database
var hosts []models.ProxyHost
if err := m.db.Find(&hosts).Error; err != nil {
return fmt.Errorf("fetch proxy hosts: %w", err)
}
// Generate Caddy config
config, err := GenerateConfig(hosts)
if err != nil {
return fmt.Errorf("generate config: %w", err)
}
// Validate before applying
if err := Validate(config); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Save snapshot for rollback
if _, err := m.saveSnapshot(config); err != nil {
return fmt.Errorf("save snapshot: %w", err)
}
// Calculate config hash for audit trail
configJSON, _ := json.Marshal(config)
configHash := fmt.Sprintf("%x", sha256.Sum256(configJSON))
// Apply to Caddy
if err := m.client.Load(ctx, config); err != nil {
// Rollback on failure
if rollbackErr := m.rollback(ctx); rollbackErr != nil {
return fmt.Errorf("apply failed: %w, rollback also failed: %v", err, rollbackErr)
}
// Record failed attempt
m.recordConfigChange(configHash, false, err.Error())
return fmt.Errorf("apply failed (rolled back): %w", err)
}
// Record successful application
m.recordConfigChange(configHash, true, "")
// Cleanup old snapshots (keep last 10)
if err := m.rotateSnapshots(10); err != nil {
// Non-fatal - log but don't fail
fmt.Printf("warning: snapshot rotation failed: %v\n", err)
}
return nil
}
// saveSnapshot stores the config to disk with timestamp.
func (m *Manager) saveSnapshot(config *Config) (string, error) {
timestamp := time.Now().Unix()
filename := fmt.Sprintf("config-%d.json", timestamp)
path := filepath.Join(m.configDir, filename)
configJSON, err := json.MarshalIndent(config, "", " ")
if err != nil {
return "", fmt.Errorf("marshal config: %w", err)
}
if err := os.WriteFile(path, configJSON, 0644); err != nil {
return "", fmt.Errorf("write snapshot: %w", err)
}
return path, nil
}
// rollback loads the most recent snapshot from disk.
func (m *Manager) rollback(ctx context.Context) error {
snapshots, err := m.listSnapshots()
if err != nil || len(snapshots) == 0 {
return fmt.Errorf("no snapshots available for rollback")
}
// Load most recent snapshot
latestSnapshot := snapshots[len(snapshots)-1]
configJSON, err := os.ReadFile(latestSnapshot)
if err != nil {
return fmt.Errorf("read snapshot: %w", err)
}
var config Config
if err := json.Unmarshal(configJSON, &config); err != nil {
return fmt.Errorf("unmarshal snapshot: %w", err)
}
// Apply the snapshot
if err := m.client.Load(ctx, &config); err != nil {
return fmt.Errorf("load snapshot: %w", err)
}
return nil
}
// listSnapshots returns all snapshot file paths sorted by modification time.
func (m *Manager) listSnapshots() ([]string, error) {
entries, err := os.ReadDir(m.configDir)
if err != nil {
return nil, fmt.Errorf("read config dir: %w", err)
}
var snapshots []string
for _, entry := range entries {
if entry.IsDir() || filepath.Ext(entry.Name()) != ".json" {
continue
}
snapshots = append(snapshots, filepath.Join(m.configDir, entry.Name()))
}
// Sort by modification time
sort.Slice(snapshots, func(i, j int) bool {
infoI, _ := os.Stat(snapshots[i])
infoJ, _ := os.Stat(snapshots[j])
return infoI.ModTime().Before(infoJ.ModTime())
})
return snapshots, nil
}
// rotateSnapshots keeps only the N most recent snapshots.
func (m *Manager) rotateSnapshots(keep int) error {
snapshots, err := m.listSnapshots()
if err != nil {
return err
}
if len(snapshots) <= keep {
return nil
}
// Delete oldest snapshots
toDelete := snapshots[:len(snapshots)-keep]
for _, path := range toDelete {
if err := os.Remove(path); err != nil {
return fmt.Errorf("delete snapshot %s: %w", path, err)
}
}
return nil
}
// recordConfigChange stores an audit record in the database.
func (m *Manager) recordConfigChange(configHash string, success bool, errorMsg string) {
record := models.CaddyConfig{
ConfigHash: configHash,
AppliedAt: time.Now(),
Success: success,
ErrorMsg: errorMsg,
}
// Best effort - don't fail if audit logging fails
m.db.Create(&record)
}
// Ping checks if Caddy is reachable.
func (m *Manager) Ping(ctx context.Context) error {
return m.client.Ping(ctx)
}
// GetCurrentConfig retrieves the running config from Caddy.
func (m *Manager) GetCurrentConfig(ctx context.Context) (*Config, error) {
return m.client.GetConfig(ctx)
}
+95
View File
@@ -0,0 +1,95 @@
package caddy
// Config represents Caddy's top-level JSON configuration structure.
// Reference: https://caddyserver.com/docs/json/
type Config struct {
Apps Apps `json:"apps"`
}
// Apps contains all Caddy app modules.
type Apps struct {
HTTP *HTTPApp `json:"http,omitempty"`
TLS *TLSApp `json:"tls,omitempty"`
}
// HTTPApp configures the HTTP app.
type HTTPApp struct {
Servers map[string]*Server `json:"servers"`
}
// Server represents an HTTP server instance.
type Server struct {
Listen []string `json:"listen"`
Routes []*Route `json:"routes"`
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
Logs *ServerLogs `json:"logs,omitempty"`
}
// AutoHTTPSConfig controls automatic HTTPS behavior.
type AutoHTTPSConfig struct {
Disable bool `json:"disable,omitempty"`
DisableRedir bool `json:"disable_redirects,omitempty"`
Skip []string `json:"skip,omitempty"`
}
// ServerLogs configures access logging.
type ServerLogs struct {
DefaultLoggerName string `json:"default_logger_name,omitempty"`
}
// Route represents an HTTP route (matcher + handlers).
type Route struct {
Match []Match `json:"match,omitempty"`
Handle []Handler `json:"handle"`
Terminal bool `json:"terminal,omitempty"`
}
// Match represents a request matcher.
type Match struct {
Host []string `json:"host,omitempty"`
Path []string `json:"path,omitempty"`
}
// Handler is the interface for all handler types.
// Actual types will implement handler-specific fields.
type Handler map[string]interface{}
// ReverseProxyHandler creates a reverse_proxy handler.
func ReverseProxyHandler(dial string, enableWS bool) Handler {
h := Handler{
"handler": "reverse_proxy",
"upstreams": []map[string]interface{}{
{"dial": dial},
},
}
if enableWS {
// Enable WebSocket support by preserving upgrade headers
h["headers"] = map[string]interface{}{
"request": map[string]interface{}{
"set": map[string][]string{
"Upgrade": {"{http.request.header.Upgrade}"},
"Connection": {"{http.request.header.Connection}"},
},
},
}
}
return h
}
// TLSApp configures the TLS app for certificate management.
type TLSApp struct {
Automation *AutomationConfig `json:"automation,omitempty"`
}
// AutomationConfig controls certificate automation.
type AutomationConfig struct {
Policies []*AutomationPolicy `json:"policies,omitempty"`
}
// AutomationPolicy defines certificate management for specific domains.
type AutomationPolicy struct {
Subjects []string `json:"subjects,omitempty"`
IssuersRaw []interface{} `json:"issuers,omitempty"`
}
+146
View File
@@ -0,0 +1,146 @@
package caddy
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
)
// Validate performs pre-flight validation on a Caddy config before applying it.
func Validate(cfg *Config) error {
if cfg == nil {
return fmt.Errorf("config cannot be nil")
}
if cfg.Apps.HTTP == nil {
return nil // Empty config is valid
}
// Track seen hosts to detect duplicates
seenHosts := make(map[string]bool)
for serverName, server := range cfg.Apps.HTTP.Servers {
if len(server.Listen) == 0 {
return fmt.Errorf("server %s has no listen addresses", serverName)
}
// Validate listen addresses
for _, addr := range server.Listen {
if err := validateListenAddr(addr); err != nil {
return fmt.Errorf("invalid listen address %s in server %s: %w", addr, serverName, err)
}
}
// Validate routes
for i, route := range server.Routes {
if err := validateRoute(route, seenHosts); err != nil {
return fmt.Errorf("invalid route %d in server %s: %w", i, serverName, err)
}
}
}
// Validate JSON marshalling works
if _, err := json.Marshal(cfg); err != nil {
return fmt.Errorf("config cannot be marshalled to JSON: %w", err)
}
return nil
}
func validateListenAddr(addr string) error {
// Strip network type prefix if present (tcp/, udp/)
if idx := strings.Index(addr, "/"); idx != -1 {
addr = addr[idx+1:]
}
// Parse host:port
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return fmt.Errorf("invalid address format: %w", err)
}
// Validate port
port, err := strconv.Atoi(portStr)
if err != nil {
return fmt.Errorf("invalid port: %w", err)
}
if port < 1 || port > 65535 {
return fmt.Errorf("port %d out of range (1-65535)", port)
}
// Validate host (allow empty for wildcard binding)
if host != "" && net.ParseIP(host) == nil {
return fmt.Errorf("invalid IP address: %s", host)
}
return nil
}
func validateRoute(route *Route, seenHosts map[string]bool) error {
if len(route.Handle) == 0 {
return fmt.Errorf("route has no handlers")
}
// Check for duplicate host matchers
for _, match := range route.Match {
for _, host := range match.Host {
if seenHosts[host] {
return fmt.Errorf("duplicate host matcher: %s", host)
}
seenHosts[host] = true
}
}
// Validate handlers
for i, handler := range route.Handle {
if err := validateHandler(handler); err != nil {
return fmt.Errorf("invalid handler %d: %w", i, err)
}
}
return nil
}
func validateHandler(handler Handler) error {
handlerType, ok := handler["handler"].(string)
if !ok {
return fmt.Errorf("handler missing 'handler' field")
}
switch handlerType {
case "reverse_proxy":
return validateReverseProxy(handler)
case "file_server", "static_response":
return nil // Accept other common handlers
default:
// Unknown handlers are allowed (Caddy is extensible)
return nil
}
}
func validateReverseProxy(handler Handler) error {
upstreams, ok := handler["upstreams"].([]map[string]interface{})
if !ok {
return fmt.Errorf("reverse_proxy missing upstreams")
}
if len(upstreams) == 0 {
return fmt.Errorf("reverse_proxy has no upstreams")
}
for i, upstream := range upstreams {
dial, ok := upstream["dial"].(string)
if !ok || dial == "" {
return fmt.Errorf("upstream %d missing dial address", i)
}
// Validate dial address format (host:port)
if _, _, err := net.SplitHostPort(dial); err != nil {
return fmt.Errorf("upstream %d has invalid dial address %s: %w", i, dial, err)
}
}
return nil
}
+124
View File
@@ -0,0 +1,124 @@
package caddy
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/Wikid82/CaddyProxyManagerPlus/backend/internal/models"
)
func TestValidate_EmptyConfig(t *testing.T) {
config := &Config{}
err := Validate(config)
require.NoError(t, err)
}
func TestValidate_ValidConfig(t *testing.T) {
hosts := []models.ProxyHost{
{
UUID: "test",
Domain: "test.example.com",
TargetHost: "app",
TargetPort: 8080,
},
}
config, _ := GenerateConfig(hosts)
err := Validate(config)
require.NoError(t, err)
}
func TestValidate_DuplicateHosts(t *testing.T) {
config := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"srv": {
Listen: []string{":80"},
Routes: []*Route{
{
Match: []Match{{Host: []string{"test.com"}}},
Handle: []Handler{
ReverseProxyHandler("app:8080", false),
},
},
{
Match: []Match{{Host: []string{"test.com"}}},
Handle: []Handler{
ReverseProxyHandler("app2:8080", false),
},
},
},
},
},
},
},
}
err := Validate(config)
require.Error(t, err)
require.Contains(t, err.Error(), "duplicate host")
}
func TestValidate_NoListenAddresses(t *testing.T) {
config := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"srv": {
Listen: []string{},
Routes: []*Route{},
},
},
},
},
}
err := Validate(config)
require.Error(t, err)
require.Contains(t, err.Error(), "no listen addresses")
}
func TestValidate_InvalidPort(t *testing.T) {
config := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"srv": {
Listen: []string{":99999"},
Routes: []*Route{},
},
},
},
},
}
err := Validate(config)
require.Error(t, err)
require.Contains(t, err.Error(), "out of range")
}
func TestValidate_NoHandlers(t *testing.T) {
config := &Config{
Apps: Apps{
HTTP: &HTTPApp{
Servers: map[string]*Server{
"srv": {
Listen: []string{":80"},
Routes: []*Route{
{
Match: []Match{{Host: []string{"test.com"}}},
Handle: []Handler{},
},
},
},
},
},
},
}
err := Validate(config)
require.Error(t, err)
require.Contains(t, err.Error(), "no handlers")
}
+16 -8
View File
@@ -8,25 +8,33 @@ import (
// Config captures runtime configuration sourced from environment variables.
type Config struct {
Environment string
HTTPPort string
DatabasePath string
FrontendDir string
Environment string
HTTPPort string
DatabasePath string
FrontendDir string
CaddyAdminAPI string
CaddyConfigDir string
}
// Load reads env vars and falls back to defaults so the server can boot with zero configuration.
func Load() (Config, error) {
cfg := Config{
Environment: getEnv("CPM_ENV", "development"),
HTTPPort: getEnv("CPM_HTTP_PORT", "8080"),
DatabasePath: getEnv("CPM_DB_PATH", filepath.Join("data", "cpm.db")),
FrontendDir: getEnv("CPM_FRONTEND_DIR", filepath.Clean(filepath.Join("..", "frontend", "dist"))),
Environment: getEnv("CPM_ENV", "development"),
HTTPPort: getEnv("CPM_HTTP_PORT", "8080"),
DatabasePath: getEnv("CPM_DB_PATH", filepath.Join("data", "cpm.db")),
FrontendDir: getEnv("CPM_FRONTEND_DIR", filepath.Clean(filepath.Join("..", "frontend", "dist"))),
CaddyAdminAPI: getEnv("CPM_CADDY_ADMIN_API", "http://localhost:2019"),
CaddyConfigDir: getEnv("CPM_CADDY_CONFIG_DIR", filepath.Join("data", "caddy")),
}
if err := os.MkdirAll(filepath.Dir(cfg.DatabasePath), 0o755); err != nil {
return Config{}, fmt.Errorf("ensure data directory: %w", err)
}
if err := os.MkdirAll(cfg.CaddyConfigDir, 0o755); err != nil {
return Config{}, fmt.Errorf("ensure caddy config directory: %w", err)
}
return cfg, nil
}
+14
View File
@@ -0,0 +1,14 @@
package models
import (
"time"
)
// CaddyConfig stores an audit trail of Caddy configuration changes.
type CaddyConfig struct {
ID uint `json:"id" gorm:"primaryKey"`
ConfigHash string `json:"config_hash" gorm:"index"`
AppliedAt time.Time `json:"applied_at"`
Success bool `json:"success"`
ErrorMsg string `json:"error_msg"`
}
+13 -1
View File
@@ -3,6 +3,18 @@ package version
var (
// Name identifies the service in logs and telemetry.
Name = "caddy-proxy-manager-plus"
// SemVer captures the backend semantic version.
// SemVer captures the backend semantic version (injected at build time via ldflags).
SemVer = "0.1.0-alpha"
// GitCommit is the git commit SHA (injected at build time via ldflags).
GitCommit = "unknown"
// BuildDate is the build timestamp (injected at build time via ldflags).
BuildDate = "unknown"
)
// Full returns the complete version string with commit and build date.
func Full() string {
if GitCommit != "unknown" && BuildDate != "unknown" {
return SemVer + " (" + GitCommit[:7] + ", " + BuildDate + ")"
}
return SemVer
}
+20
View File
@@ -0,0 +1,20 @@
version: '3.9'
# Development override - use with: docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
services:
app:
# Development: expose Caddy admin API externally for debugging
ports:
- "80:80"
- "443:443"
- "443:443/udp"
- "8080:8080"
- "2019:2019" # Caddy admin API (dev only)
environment:
- CPM_ENV=development
- CPM_HTTP_PORT=8080
- CPM_DB_PATH=/app/data/cpm.db
- CPM_FRONTEND_DIR=/app/frontend/dist
- CPM_CADDY_ADMIN_API=http://localhost:2019
- CPM_CADDY_CONFIG_DIR=/app/data/caddy
+39
View File
@@ -0,0 +1,39 @@
version: '3.9'
services:
app:
build:
context: .
dockerfile: Dockerfile
container_name: caddyproxymanagerplus
restart: unless-stopped
ports:
- "80:80" # HTTP (Caddy proxy)
- "443:443" # HTTPS (Caddy proxy)
- "443:443/udp" # HTTP/3 (Caddy proxy)
- "8080:8080" # Management UI (CPM+)
environment:
- CPM_ENV=production
- CPM_HTTP_PORT=8080
- CPM_DB_PATH=/app/data/cpm.db
- CPM_FRONTEND_DIR=/app/frontend/dist
- CPM_CADDY_ADMIN_API=http://localhost:2019
- CPM_CADDY_CONFIG_DIR=/app/data/caddy
volumes:
- cpm_data:/app/data
- caddy_data:/data
- caddy_config:/config
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/api/v1/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
volumes:
cpm_data:
driver: local
caddy_data:
driver: local
caddy_config:
driver: local
+55
View File
@@ -0,0 +1,55 @@
#!/bin/bash
set -e
# Entrypoint script to run both Caddy and CPM+ in a single container
# This simplifies deployment for home users
echo "Starting CaddyProxyManager+ with integrated Caddy..."
# Start Caddy in the background with initial empty config
echo '{"apps":{}}' > /config/caddy.json
caddy run --config /config/caddy.json --adapter json &
CADDY_PID=$!
echo "Caddy started (PID: $CADDY_PID)"
# Wait for Caddy to be ready
echo "Waiting for Caddy admin API..."
for i in {1..30}; do
if wget -q -O- http://localhost:2019/config/ > /dev/null 2>&1; then
echo "Caddy is ready!"
break
fi
sleep 1
done
# Start CPM+ management application
echo "Starting CPM+ management application..."
/app/api &
APP_PID=$!
echo "CPM+ started (PID: $APP_PID)"
# Function to handle shutdown gracefully
shutdown() {
echo "Shutting down..."
kill -TERM $APP_PID 2>/dev/null || true
kill -TERM $CADDY_PID 2>/dev/null || true
wait $APP_PID 2>/dev/null || true
wait $CADDY_PID 2>/dev/null || true
exit 0
}
# Trap signals for graceful shutdown
trap shutdown SIGTERM SIGINT
echo "CaddyProxyManager+ is running!"
echo " - Management UI: http://localhost:8080"
echo " - Caddy Proxy: http://localhost:80, https://localhost:443"
echo " - Caddy Admin API: http://localhost:2019"
# Wait for either process to exit
wait -n $APP_PID $CADDY_PID
# If one process exits, shut down the other
EXIT_CODE=$?
echo "A process exited with code $EXIT_CODE, shutting down..."
shutdown
+35
View File
@@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
BACKEND_DIR="$ROOT_DIR/backend"
COVERAGE_FILE="$BACKEND_DIR/coverage.pre-commit.out"
MIN_COVERAGE="${CPM_MIN_COVERAGE:-75}"
cd "$BACKEND_DIR"
go test -coverprofile="$COVERAGE_FILE" ./...
go tool cover -func="$COVERAGE_FILE" | tail -n 1
TOTAL_LINE=$(go tool cover -func="$COVERAGE_FILE" | grep total)
TOTAL_PERCENT=$(echo "$TOTAL_LINE" | awk '{print substr($3, 1, length($3)-1)}')
echo "Computed coverage: ${TOTAL_PERCENT}% (minimum required ${MIN_COVERAGE}%)"
export TOTAL_PERCENT
export MIN_COVERAGE
python3 - <<'PY'
import os, sys
from decimal import Decimal
total = Decimal(os.environ['TOTAL_PERCENT'])
minimum = Decimal(os.environ['MIN_COVERAGE'])
if total < minimum:
print(f"Coverage {total}% is below required {minimum}% (set CPM_MIN_COVERAGE to override)", file=sys.stderr)
sys.exit(1)
PY
rm -f "$COVERAGE_FILE"
echo "Coverage requirement met"
+104
View File
@@ -0,0 +1,104 @@
#!/bin/bash
# Release script for CaddyProxyManager+
# Creates a new semantic version release with tag and GitHub release
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Functions
error() {
echo -e "${RED}Error: $1${NC}" >&2
exit 1
}
success() {
echo -e "${GREEN}$1${NC}"
}
warning() {
echo -e "${YELLOW}$1${NC}"
}
# Check if we're in a git repository
if ! git rev-parse --git-dir > /dev/null 2>&1; then
error "Not in a git repository"
fi
# Check for uncommitted changes
if [[ -n $(git status -s) ]]; then
error "You have uncommitted changes. Please commit or stash them first."
fi
# Check if on correct branch
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [[ "$CURRENT_BRANCH" != "main" && "$CURRENT_BRANCH" != "development" ]]; then
warning "You are on branch '$CURRENT_BRANCH'. Releases are typically from 'main' or 'development'."
read -p "Continue anyway? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 0
fi
fi
# Get current version from .version file
CURRENT_VERSION=$(cat .version 2>/dev/null || echo "0.0.0")
echo "Current version: $CURRENT_VERSION"
# Prompt for new version
echo ""
echo "Enter new version (e.g., 1.0.0, 1.0.0-beta.1, 1.0.0-rc.1):"
read -r NEW_VERSION
# Validate semantic version format
if ! [[ "$NEW_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then
error "Invalid semantic version format. Expected: MAJOR.MINOR.PATCH[-PRERELEASE]"
fi
# Check if tag already exists
if git rev-parse "v$NEW_VERSION" >/dev/null 2>&1; then
error "Tag v$NEW_VERSION already exists"
fi
# Update .version file
echo "$NEW_VERSION" > .version
success "Updated .version to $NEW_VERSION"
# Commit version bump
git add .version
git commit -m "chore: bump version to $NEW_VERSION"
success "Committed version bump"
# Create annotated tag
git tag -a "v$NEW_VERSION" -m "Release v$NEW_VERSION"
success "Created tag v$NEW_VERSION"
# Show what will be pushed
echo ""
echo "Ready to push:"
echo " - Commit: $(git rev-parse HEAD)"
echo " - Tag: v$NEW_VERSION"
echo " - Branch: $CURRENT_BRANCH"
echo ""
# Confirm push
read -p "Push to remote? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
git push origin "$CURRENT_BRANCH"
git push origin "v$NEW_VERSION"
success "Pushed to remote!"
echo ""
success "Release workflow triggered!"
echo " - GitHub will create a release with changelog"
echo " - Docker images will be built and published"
echo " - View progress at: https://github.com/Wikid82/CaddyProxyManagerPlus/actions"
else
warning "Not pushed. You can push later with:"
echo " git push origin $CURRENT_BRANCH"
echo " git push origin v$NEW_VERSION"
fi