changed docker-compose to compose
Some checks failed
Build and Push Docker Images (Trusted) / build-and-push (., docker/caddy/Dockerfile, caddy) (push) Has been cancelled
Build and Push Docker Images (Trusted) / build-and-push (., docker/l4-port-manager/Dockerfile, l4-port-manager) (push) Has been cancelled
Build and Push Docker Images (Trusted) / build-and-push (., docker/web/Dockerfile, web) (push) Has been cancelled
Tests / test (push) Has been cancelled
Some checks failed
Build and Push Docker Images (Trusted) / build-and-push (., docker/caddy/Dockerfile, caddy) (push) Has been cancelled
Build and Push Docker Images (Trusted) / build-and-push (., docker/l4-port-manager/Dockerfile, l4-port-manager) (push) Has been cancelled
Build and Push Docker Images (Trusted) / build-and-push (., docker/web/Dockerfile, web) (push) Has been cancelled
Tests / test (push) Has been cancelled
This commit is contained in:
219
compose.yml
Executable file
219
compose.yml
Executable file
@@ -0,0 +1,219 @@
|
||||
services:
|
||||
web:
|
||||
container_name: caddy-proxy-manager-web
|
||||
image: ghcr.io/fuomag9/caddy-proxy-manager-web:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/web/Dockerfile
|
||||
args:
|
||||
# User and group IDs for rootless operation
|
||||
# Set these to match your host user to avoid permission issues
|
||||
# Find your UID/GID with: id -u / id -g
|
||||
PUID: ${PUID:-10001}
|
||||
PGID: ${PGID:-10001}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "3001:3000"
|
||||
environment:
|
||||
# Node environment
|
||||
NODE_ENV: production
|
||||
|
||||
# REQUIRED: Session secret for encrypting cookies and sessions
|
||||
# Generate with: openssl rand -base64 32
|
||||
# SECURITY: You MUST set this to a unique value in production!
|
||||
SESSION_SECRET: ${SESSION_SECRET:?ERROR - SESSION_SECRET is required}
|
||||
|
||||
# Caddy API endpoint (internal communication)
|
||||
CADDY_API_URL: ${CADDY_API_URL:-http://caddy:2019}
|
||||
|
||||
# Public base URL for the application
|
||||
BASE_URL: ${BASE_URL:-http://localhost:3000}
|
||||
|
||||
# Database configuration
|
||||
DATABASE_PATH: /app/data/caddy-proxy-manager.db
|
||||
DATABASE_URL: file:/app/data/caddy-proxy-manager.db
|
||||
|
||||
# NextAuth configuration
|
||||
NEXTAUTH_URL: ${BASE_URL:-http://localhost:3000}
|
||||
|
||||
# REQUIRED: Admin credentials for login
|
||||
# SECURITY: You MUST set these to secure values in production!
|
||||
# Password must be 12+ chars with uppercase, lowercase, numbers, and special chars
|
||||
ADMIN_USERNAME: ${ADMIN_USERNAME:?ERROR - ADMIN_USERNAME is required}
|
||||
ADMIN_PASSWORD: ${ADMIN_PASSWORD:?ERROR - ADMIN_PASSWORD is required}
|
||||
|
||||
# OAuth2/OIDC Authentication (Optional - works with Authentik, Authelia, Keycloak, etc.)
|
||||
OAUTH_ENABLED: ${OAUTH_ENABLED:-false}
|
||||
OAUTH_PROVIDER_NAME: ${OAUTH_PROVIDER_NAME:-OAuth2}
|
||||
OAUTH_CLIENT_ID: ${OAUTH_CLIENT_ID:-}
|
||||
OAUTH_CLIENT_SECRET: ${OAUTH_CLIENT_SECRET:-}
|
||||
OAUTH_ISSUER: ${OAUTH_ISSUER:-}
|
||||
OAUTH_AUTHORIZATION_URL: ${OAUTH_AUTHORIZATION_URL:-}
|
||||
OAUTH_TOKEN_URL: ${OAUTH_TOKEN_URL:-}
|
||||
OAUTH_USERINFO_URL: ${OAUTH_USERINFO_URL:-}
|
||||
OAUTH_ALLOW_AUTO_LINKING: ${OAUTH_ALLOW_AUTO_LINKING:-false}
|
||||
|
||||
# ClickHouse analytics database
|
||||
CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123}
|
||||
CLICKHOUSE_USER: ${CLICKHOUSE_USER:-cpm}
|
||||
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:?ERROR - CLICKHOUSE_PASSWORD is required}
|
||||
CLICKHOUSE_DB: ${CLICKHOUSE_DB:-analytics}
|
||||
group_add:
|
||||
- "${CADDY_GID:-10000}" # caddy's GID — lets the web user read /logs/access.log
|
||||
volumes:
|
||||
- caddy-manager-data:/app/data
|
||||
- geoip-data:/usr/share/GeoIP:ro,z
|
||||
- caddy-logs:/logs:ro
|
||||
depends_on:
|
||||
caddy:
|
||||
condition: service_healthy
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- caddy-network
|
||||
healthcheck:
|
||||
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3000/api/health',r=>{process.exit(r.statusCode<400?0:1)}).on('error',()=>process.exit(1))"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
caddy:
|
||||
container_name: caddy-proxy-manager-caddy
|
||||
image: ghcr.io/fuomag9/caddy-proxy-manager-caddy:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/caddy/Dockerfile
|
||||
args:
|
||||
# User and group IDs for rootless operation
|
||||
# Set these to match your host user to avoid permission issues
|
||||
# Find your UID/GID with: id -u / id -g
|
||||
PUID: ${PUID:-10000}
|
||||
PGID: ${PGID:-10000}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "80:80/udp"
|
||||
- "443:443"
|
||||
- "443:443/udp"
|
||||
# - "2019:2019" # Admin API
|
||||
# Admin API (port 2019) is only exposed on internal network for security, enable at your risk
|
||||
# Web UI accesses via http://caddy:2019 internally
|
||||
# Uncomment the line below to expose metrics externally for Grafana/Prometheus
|
||||
# - "9090:9090" # Metrics available at http://localhost:9090/metrics (configure in Settings first)
|
||||
environment:
|
||||
# Primary domain for Caddy configuration
|
||||
PRIMARY_DOMAIN: ${PRIMARY_DOMAIN:-akanealw.com}
|
||||
volumes:
|
||||
- caddy-data:/data
|
||||
- caddy-config:/config
|
||||
- caddy-logs:/logs
|
||||
- geoip-data:/usr/share/GeoIP:ro,z
|
||||
networks:
|
||||
- caddy-network
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "-O", "/dev/null", "http://localhost:2019/config/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
# Docker socket proxy — restricts API surface exposed to l4-port-manager.
|
||||
# Only allows GET, POST to /containers/ and /compose/ endpoints.
|
||||
docker-socket-proxy:
|
||||
container_name: caddy-proxy-manager-docker-proxy
|
||||
image: tecnativa/docker-socket-proxy:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
CONTAINERS: 1
|
||||
POST: 1
|
||||
# Deny everything else by default
|
||||
IMAGES: 0
|
||||
NETWORKS: 0
|
||||
VOLUMES: 0
|
||||
EXEC: 0
|
||||
SWARM: 0
|
||||
AUTH: 0
|
||||
SECRETS: 0
|
||||
BUILD: 0
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- caddy-network
|
||||
|
||||
# L4 Port Manager sidecar — automatically recreates the caddy container
|
||||
# when L4 proxy host ports change.
|
||||
# Uses Docker socket proxy instead of direct Docker socket access.
|
||||
l4-port-manager:
|
||||
container_name: caddy-proxy-manager-l4-ports
|
||||
image: ghcr.io/fuomag9/caddy-proxy-manager-l4-port-manager:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/l4-port-manager/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
DATA_DIR: /data
|
||||
COMPOSE_DIR: /compose
|
||||
POLL_INTERVAL: "${L4_PORT_MANAGER_POLL_INTERVAL:-2}"
|
||||
DOCKER_HOST: tcp://docker-socket-proxy:2375
|
||||
volumes:
|
||||
- caddy-manager-data:/data
|
||||
- .:/compose:ro
|
||||
depends_on:
|
||||
caddy:
|
||||
condition: service_healthy
|
||||
docker-socket-proxy:
|
||||
condition: service_started
|
||||
networks:
|
||||
- caddy-network
|
||||
|
||||
clickhouse:
|
||||
container_name: caddy-proxy-manager-clickhouse
|
||||
image: clickhouse/clickhouse-server:latest-alpine
|
||||
restart: always
|
||||
environment:
|
||||
CLICKHOUSE_DB: ${CLICKHOUSE_DB:-analytics}
|
||||
CLICKHOUSE_USER: ${CLICKHOUSE_USER:-cpm}
|
||||
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:?ERROR - CLICKHOUSE_PASSWORD is required}
|
||||
CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: 1
|
||||
volumes:
|
||||
- clickhouse-data:/var/lib/clickhouse
|
||||
networks:
|
||||
- caddy-network
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "clickhouse-client --user ${CLICKHOUSE_USER:-cpm} --password ${CLICKHOUSE_PASSWORD:?ERROR - CLICKHOUSE_PASSWORD is required} --query 'SELECT 1'"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
|
||||
geoipupdate:
|
||||
container_name: geoipupdate-${HOSTNAME}
|
||||
image: ghcr.io/maxmind/geoipupdate
|
||||
profiles: [geoipupdate]
|
||||
restart: always
|
||||
environment:
|
||||
- GEOIPUPDATE_ACCOUNT_ID=${GEOIPUPDATE_ACCOUNT_ID:-}
|
||||
- GEOIPUPDATE_LICENSE_KEY=${GEOIPUPDATE_LICENSE_KEY:-}
|
||||
- 'GEOIPUPDATE_EDITION_IDS=GeoLite2-ASN GeoLite2-City GeoLite2-Country'
|
||||
- GEOIPUPDATE_FREQUENCY=72
|
||||
volumes:
|
||||
- geoip-data:/usr/share/GeoIP:z
|
||||
networks:
|
||||
- caddy-network
|
||||
|
||||
networks:
|
||||
caddy-network:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
caddy-manager-data:
|
||||
caddy-data:
|
||||
caddy-config:
|
||||
caddy-logs:
|
||||
geoip-data:
|
||||
clickhouse-data:
|
||||
Reference in New Issue
Block a user