solstice-ci/deploy/podman/compose.yml
Till Wegmueller 4c5a8567a4
Add webhook crate for extensible signature validation and integration
- Introduce a new `webhook` crate to centralize signature validation for GitHub, Hookdeck, and Forgejo webhooks.
- Enable `github-integration` to perform unified webhook signature verification using the `webhook` crate.
- Refactor `github-integration`: replace legacy HMAC verification with the reusable `webhook` structure.
- Extend Podman configuration for Hookdeck webhook signature handling and improve documentation.
- Clean up unused dependencies by migrating to the new implementation.

Signed-off-by: Till Wegmueller <toasterson@gmail.com>
2026-01-25 22:16:11 +01:00

343 lines
13 KiB
YAML

# Podman Compose production stack for Solstice CI with Traefik + Let's Encrypt
# Usage:
# 1) cp .env.sample .env && edit values (ENV=staging|prod, secrets, email)
# 2) Ensure DNS A/AAAA for required hostnames pointing to this host:
# - traefik.svc.${DOMAIN}
# - api.${ENV}.${DOMAIN}
# - grpc.${ENV}.${DOMAIN}
# - forge.${ENV}.${DOMAIN}
# - github.${ENV}.${DOMAIN}
# - minio.svc.${DOMAIN}
# - s3.svc.${DOMAIN}
# - mq.svc.${DOMAIN}
# - db.svc.${DOMAIN} (optional)
# 3) podman compose -f compose.yml up -d --build
name: solstice-ci
networks:
core: {}
volumes:
traefik-acme:
postgres-data:
rabbitmq-data:
minio-data:
services:
traefik:
image: docker.io/library/traefik:v3.6
container_name: traefik
restart: unless-stopped
environment:
DOCKER_API_VERSION: ${DOCKER_API_VERSION:-1.44}
command:
- --api.dashboard=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --certificatesresolvers.le.acme.email=${TRAEFIK_ACME_EMAIL}
- --certificatesresolvers.le.acme.storage=/acme/acme.json
- --certificatesresolvers.le.acme.httpchallenge=true
- --certificatesresolvers.le.acme.httpchallenge.entrypoint=web
- --serversTransport.insecureSkipVerify=true
# Optional: override ACME CA server via .env (e.g., staging URL)
- --certificatesresolvers.le.acme.caserver=${TRAEFIK_ACME_CASERVER:-}
ports:
# Rootless Podman cannot bind privileged ports (<1024). Use high ports via .env (e.g., 8080/4443),
# or adjust sysctl on the host: net.ipv4.ip_unprivileged_port_start=80 (requires root).
- ${TRAEFIK_HTTP_PORT:-80}:80
- ${TRAEFIK_HTTPS_PORT:-443}:443
volumes:
- /var/run/docker.sock:/var/run/docker.sock:Z
- traefik-acme:/acme
networks:
- core
labels:
- traefik.enable=true
# Dashboard at traefik.svc.${DOMAIN}
- traefik.http.routers.traefik.rule=Host(`traefik.svc.${DOMAIN}`)
- traefik.http.routers.traefik.entrypoints=websecure
- traefik.http.routers.traefik.tls.certresolver=le
- traefik.http.routers.traefik.service=api@internal
- traefik.http.middlewares.traefik-auth.basicauth.users=${TRAEFIK_DASHBOARD_AUTH}
- traefik.http.routers.traefik.middlewares=traefik-auth
postgres:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
ports:
- "127.0.0.1:5432:5432" # expose Postgres to host only
volumes:
- postgres-data:/var/lib/postgresql/data:Z
networks:
- core
postgres-setup:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres-setup
depends_on:
postgres:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
command: >-
"export PGPASSWORD=${POSTGRES_PASSWORD} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_staging' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_staging')\" | psql -h postgres -U ${POSTGRES_USER} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_prod' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_prod')\" | psql -h postgres -U ${POSTGRES_USER}"
networks:
- core
rabbitmq:
image: docker.io/library/rabbitmq:4-management-alpine
container_name: solstice-rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS}
RABBITMQ_DEFAULT_VHOST: solstice-${ENV}
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
ports:
- "127.0.0.1:5672:5672" # expose AMQP to host only
volumes:
- rabbitmq-data:/var/lib/rabbitmq:Z
networks:
- core
labels:
- traefik.enable=true
# Management UI at mq.svc.${DOMAIN}
- traefik.http.routers.mq.rule=Host(`mq.svc.${DOMAIN}`)
- traefik.http.routers.mq.entrypoints=websecure
- traefik.http.routers.mq.tls.certresolver=le
- traefik.http.services.mq.loadbalancer.server.port=15672
minio:
image: quay.io/minio/minio:latest
container_name: solstice-minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
volumes:
- minio-data:/data:Z
networks:
- core
labels:
- traefik.enable=true
# MinIO Console at minio.svc.${DOMAIN}
- traefik.http.routers.minio.rule=Host(`minio.svc.${DOMAIN}`)
- traefik.http.routers.minio.entrypoints=websecure
- traefik.http.routers.minio.tls.certresolver=le
- traefik.http.services.minio.loadbalancer.server.port=9001
# S3 API via TCP router on s3.svc.${DOMAIN}
- traefik.tcp.routers.s3.rule=HostSNI(`s3.svc.${DOMAIN}`)
- traefik.tcp.routers.s3.entrypoints=websecure
- traefik.tcp.routers.s3.tls=true
- traefik.tcp.routers.s3.tls.certresolver=le
- traefik.tcp.services.s3.loadbalancer.server.port=9000
minio-setup:
image: quay.io/minio/mc:latest
container_name: solstice-minio-setup
depends_on:
minio:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command: >-
"mc alias set local http://minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} &&
mc mb -p local/solstice-logs-staging || true &&
mc mb -p local/solstice-logs-prod || true &&
if [ -n \"${MINIO_BUCKET}\" ]; then mc mb -p local/${MINIO_BUCKET} || true; fi"
networks:
- core
orchestrator:
build:
context: ../..
dockerfile: deploy/images/orchestrator/Containerfile
args:
BIN: orchestrator
image: local/solstice-orchestrator:latest
container_name: solstice-orchestrator
restart: unless-stopped
environment:
RUST_LOG: info
DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/solstice_${ENV}
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
AMQP_EXCHANGE: solstice.jobs
AMQP_QUEUE: solstice.jobs.v1
AMQP_ROUTING_KEY: jobrequest.v1
HTTP_ADDR: 0.0.0.0:8081
# URL where logs-service is exposed (used for redirects)
LOGS_BASE_URL: https://logs.${ENV}.${DOMAIN}
# Paths inside the container to runner binaries that will be uploaded over SSH
RUNNER_LINUX_PATH: /opt/solstice/runners/solstice-runner-linux
RUNNER_ILLUMOS_PATH: /opt/solstice/runners/solstice-runner-illumos
# Remote path on the VM where the runner will be uploaded and executed
REMOTE_RUNNER_PATH: /usr/local/bin/solstice-runner
# SSH connect timeout for reaching the VM (seconds)
SSH_CONNECT_TIMEOUT_SECS: ${SSH_CONNECT_TIMEOUT_SECS:-300}
# Libvirt configuration for Linux/KVM
LIBVIRT_URI: ${LIBVIRT_URI:-qemu:///system}
LIBVIRT_NETWORK: ${LIBVIRT_NETWORK:-default}
depends_on:
postgres:
condition: service_healthy
postgres-setup:
condition: service_completed_successfully
rabbitmq:
condition: service_healthy
# Host integrations: libvirt sockets/devices and config + image/work directories
volumes:
# Read-only mount of the image map config into the container path expected by the binary
- ${ORCH_IMAGE_MAP_PATH:-../../examples/orchestrator-image-map.yaml}:/examples/orchestrator-image-map.yaml:ro,Z
# Writable bind for images so the orchestrator can download/retain base images on the host
- ${ORCH_IMAGES_DIR:-/var/lib/solstice/images}:/var/lib/solstice/images:Z
# Writable bind for per-VM overlays and console logs (used by libvirt backend)
- ${ORCH_WORK_DIR:-/var/lib/solstice-ci}:/var/lib/solstice-ci:Z
# Read-only bind for locally built workflow runner binaries; orchestrator will upload over SSH
- ${RUNNER_DIR_HOST:-../../target/runners}:/opt/solstice/runners:ro,Z
# Libvirt control sockets (ro is sufficient for read-only, but write is needed to create domains)
- /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock:Z
- /var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro:Z
# Optional: expose host libvirt configs for network XML reads
- /etc/libvirt:/etc/libvirt:ro,Z
- /var/lib/libvirt:/var/lib/libvirt:ro,Z
# KVM device for hardware acceleration
- /dev/kvm:/dev/kvm
devices:
- /dev/kvm
networks:
- core
labels:
- traefik.enable=true
# HTTP endpoints at api.${ENV}.${DOMAIN}
- traefik.http.routers.api.rule=Host(`api.${ENV}.${DOMAIN}`)
- traefik.http.routers.api.entrypoints=websecure
- traefik.http.routers.api.tls.certresolver=le
- traefik.http.services.api.loadbalancer.server.port=8081
logs-service:
build:
context: ../..
dockerfile: deploy/images/logs-service/Containerfile
image: local/solstice-logs-service:latest
container_name: solstice-logs-service
restart: unless-stopped
environment:
RUST_LOG: info
DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/solstice_${ENV}
HTTP_ADDR: 0.0.0.0:8082
depends_on:
postgres:
condition: service_healthy
postgres-setup:
condition: service_completed_successfully
networks:
- core
labels:
- traefik.enable=true
# Expose logs service at logs.${ENV}.${DOMAIN}
- traefik.http.routers.logs.rule=Host(`logs.${ENV}.${DOMAIN}`)
- traefik.http.routers.logs.entrypoints=websecure
- traefik.http.routers.logs.tls.certresolver=le
- traefik.http.services.logs.loadbalancer.server.port=8082
forge-integration:
build:
context: ../..
dockerfile: deploy/images/forge-integration/Containerfile
args:
BIN: forge-integration
image: local/solstice-forge-integration:latest
container_name: solstice-forge-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server config for webhooks
HTTP_ADDR: 0.0.0.0:8080
WEBHOOK_PATH: /webhooks/forgejo
# Secrets and Forgejo API configuration
WEBHOOK_SECRET: ${WEBHOOK_SECRET}
FORGEJO_TOKEN: ${FORGEJO_TOKEN}
FORGEJO_BASE_URL: ${FORGEJO_BASE_URL}
# URL where logs-service is exposed (used for commit status links)
LOGS_BASE_URL: https://logs.${ENV}.${DOMAIN}
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# Forge webhooks at forge.svc.${DOMAIN}
- traefik.http.routers.forge.rule=Host(`forge.${ENV}.${DOMAIN}`)
- traefik.http.routers.forge.entrypoints=websecure
- traefik.http.routers.forge.tls.certresolver=le
- traefik.http.services.forge.loadbalancer.server.port=8080
github-integration:
build:
context: ../..
dockerfile: deploy/images/github-integration/Containerfile
args:
BIN: github-integration
image: local/solstice-github-integration:latest
container_name: solstice-github-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server for GitHub webhooks
HTTP_ADDR: 0.0.0.0:8082
WEBHOOK_PATH: /webhooks/github
GITHUB_WEBHOOK_SECRET: ${GITHUB_WEBHOOK_SECRET}
HOOKDECK_SIGNING_SECRET: ${HOOKDECK_SIGNING_SECRET}
GITHUB_APP_ID: ${GITHUB_APP_ID}
GITHUB_APP_KEY_PATH: /app/github-app-key.pem
GITHUB_API_BASE: ${GITHUB_API_BASE:-https://api.github.com}
GITHUB_CHECK_NAME: ${GITHUB_CHECK_NAME:-Solstice CI}
# URL where logs-service is exposed (used for check-run links)
LOGS_BASE_URL: https://logs.${ENV}.${DOMAIN}
volumes:
- ${GITHUB_APP_KEY_PATH}:/app/github-app-key.pem
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# GitHub webhooks at github.svc.${DOMAIN}
- traefik.http.routers.github.rule=Host(`github.${ENV}.${DOMAIN}`)
- traefik.http.routers.github.entrypoints=websecure
- traefik.http.routers.github.tls.certresolver=le
- traefik.http.services.github.loadbalancer.server.port=8082