solstice-ci/deploy/podman/compose.yml
Till Wegmueller 1c5dc338f5
Add Podman Compose deployment stack with Traefik and services integration
This commit introduces:
- A production-ready Podman Compose stack using Traefik as a reverse proxy with Let's Encrypt integration.
- Per-environment logical separation for Postgres, RabbitMQ, and MinIO services.
- New deployment utilities, including a `.env.sample` template, `compose.yml`, and setup scripts for MinIO and Postgres.
- Updates to `github-integration` HTTP server with basic webhook handling using `axum` and configurable paths.
- Adjustments to packaging tasks for better tarball generation via `git archive`.
- Expanded dependencies for `PKGBUILD` to support SQLite and PostgreSQL libraries.
- Containerfiles for orchestrator and integration services to enable Rust multi-stage builds without sccache.

This enables simplified and secure CI deployments with automatic routing, TLS, and volume persistence.
2025-11-08 20:21:57 +00:00

268 lines
9.4 KiB
YAML

# Podman Compose production stack for Solstice CI with Traefik + Let's Encrypt
# Usage:
# 1) cp .env.sample .env && edit values (ENV=staging|prod, secrets, email)
# 2) Ensure DNS A/AAAA for required hostnames pointing to this host:
# - traefik.svc.${DOMAIN}
# - api.${ENV}.${DOMAIN}
# - grpc.${ENV}.${DOMAIN}
# - forge.${ENV}.${DOMAIN}
# - github.${ENV}.${DOMAIN}
# - minio.svc.${DOMAIN}
# - s3.svc.${DOMAIN}
# - mq.svc.${DOMAIN}
# - db.svc.${DOMAIN} (optional)
# 3) podman compose -f compose.yml up -d --build
name: solstice-ci
networks:
core:
driver: bridge
volumes:
traefik-acme:
postgres-data:
rabbitmq-data:
minio-data:
services:
traefik:
image: docker.io/library/traefik:v3.1
container_name: traefik
restart: unless-stopped
command:
- --api.dashboard=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --certificatesresolvers.le.acme.email=${TRAEFIK_ACME_EMAIL}
- --certificatesresolvers.le.acme.storage=/acme/acme.json
- --certificatesresolvers.le.acme.httpchallenge=true
- --certificatesresolvers.le.acme.httpchallenge.entrypoint=web
- --serversTransport.insecureSkipVerify=true
# Optional: override ACME CA server via .env (e.g., staging URL)
- --certificatesresolvers.le.acme.caserver=${TRAEFIK_ACME_CASERVER}
ports:
- ${TRAEFIK_HTTP_PORT:-80}:80
- ${TRAEFIK_HTTPS_PORT:-443}:443
volumes:
- /var/run/podman/podman.sock:/var/run/docker.sock:Z
- traefik-acme:/acme
networks:
- core
labels:
- traefik.enable=true
# Dashboard at traefik.svc.${DOMAIN}
- traefik.http.routers.traefik.rule=Host(`traefik.svc.${DOMAIN}`)
- traefik.http.routers.traefik.entrypoints=websecure
- traefik.http.routers.traefik.tls.certresolver=le
- traefik.http.routers.traefik.service=api@internal
- traefik.http.middlewares.traefik-auth.basicauth.users=${TRAEFIK_DASHBOARD_AUTH}
- traefik.http.routers.traefik.middlewares=traefik-auth
postgres:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
volumes:
- postgres-data:/var/lib/postgresql/data:Z
networks:
- core
postgres-setup:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres-setup
depends_on:
postgres:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
command: >-
"export PGPASSWORD=${POSTGRES_PASSWORD} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_staging' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_staging')\" | psql -h postgres -U ${POSTGRES_USER} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_prod' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_prod')\" | psql -h postgres -U ${POSTGRES_USER}"
networks:
- core
rabbitmq:
image: docker.io/library/rabbitmq:4-management-alpine
container_name: solstice-rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS}
RABBITMQ_DEFAULT_VHOST: solstice-${ENV}
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
volumes:
- rabbitmq-data:/var/lib/rabbitmq:Z
networks:
- core
labels:
- traefik.enable=true
# Management UI at mq.svc.${DOMAIN}
- traefik.http.routers.mq.rule=Host(`mq.svc.${DOMAIN}`)
- traefik.http.routers.mq.entrypoints=websecure
- traefik.http.routers.mq.tls.certresolver=le
- traefik.http.services.mq.loadbalancer.server.port=15672
minio:
image: quay.io/minio/minio:RELEASE.2025-02-07T22-39-53Z
container_name: solstice-minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
volumes:
- minio-data:/data:Z
networks:
- core
labels:
- traefik.enable=true
# MinIO Console at minio.svc.${DOMAIN}
- traefik.http.routers.minio.rule=Host(`minio.svc.${DOMAIN}`)
- traefik.http.routers.minio.entrypoints=websecure
- traefik.http.routers.minio.tls.certresolver=le
- traefik.http.services.minio.loadbalancer.server.port=9001
# S3 API via TCP router on s3.svc.${DOMAIN}
- traefik.tcp.routers.s3.rule=HostSNI(`s3.svc.${DOMAIN}`)
- traefik.tcp.routers.s3.entrypoints=websecure
- traefik.tcp.routers.s3.tls=true
- traefik.tcp.routers.s3.tls.certresolver=le
- traefik.tcp.services.s3.loadbalancer.server.port=9000
minio-setup:
image: quay.io/minio/mc:RELEASE.2025-02-07T22-47-51Z
container_name: solstice-minio-setup
depends_on:
minio:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command: >-
"mc alias set local http://minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} &&
mc mb -p local/solstice-logs-staging || true &&
mc mb -p local/solstice-logs-prod || true &&
if [ -n \"${MINIO_BUCKET}\" ]; then mc mb -p local/${MINIO_BUCKET} || true; fi"
networks:
- core
orchestrator:
build:
context: ../..
dockerfile: deploy/images/orchestrator/Containerfile
args:
BIN: orchestrator
image: local/solstice-orchestrator:latest
container_name: solstice-orchestrator
restart: unless-stopped
environment:
RUST_LOG: info
DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/solstice_${ENV}
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
AMQP_EXCHANGE: solstice.jobs
AMQP_QUEUE: solstice.jobs.v1
AMQP_ROUTING_KEY: jobrequest.v1
GRPC_ADDR: 0.0.0.0:50051
HTTP_ADDR: 0.0.0.0:8081
depends_on:
postgres:
condition: service_healthy
postgres-setup:
condition: service_completed_successfully
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# HTTP endpoints at api.${ENV}.${DOMAIN}
- traefik.http.routers.api.rule=Host(`api.${ENV}.${DOMAIN}`)
- traefik.http.routers.api.entrypoints=websecure
- traefik.http.routers.api.tls.certresolver=le
- traefik.http.services.api.loadbalancer.server.port=8081
# gRPC on grpc.${ENV}.${DOMAIN} (TLS, h2)
- traefik.tcp.routers.grpc.rule=HostSNI(`grpc.${ENV}.${DOMAIN}`)
- traefik.tcp.routers.grpc.entrypoints=websecure
- traefik.tcp.routers.grpc.tls=true
- traefik.tcp.routers.grpc.tls.certresolver=le
- traefik.tcp.services.grpc.loadbalancer.server.port=50051
forge-integration:
build:
context: ../..
dockerfile: deploy/images/forge-integration/Containerfile
args:
BIN: forge-integration
image: local/solstice-forge-integration:latest
container_name: solstice-forge-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server config for webhooks
HTTP_ADDR: 0.0.0.0:8080
WEBHOOK_PATH: /webhooks/forgejo
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# Forge webhooks at forge.svc.${DOMAIN}
- traefik.http.routers.forge.rule=Host(`forge.${ENV}.${DOMAIN}`)
- traefik.http.routers.forge.entrypoints=websecure
- traefik.http.routers.forge.tls.certresolver=le
- traefik.http.services.forge.loadbalancer.server.port=8080
github-integration:
build:
context: ../..
dockerfile: deploy/images/github-integration/Containerfile
args:
BIN: github-integration
image: local/solstice-github-integration:latest
container_name: solstice-github-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server for GitHub webhooks (skeleton service; implement handler later)
HTTP_ADDR: 0.0.0.0:8082
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# GitHub webhooks at github.svc.${DOMAIN}
- traefik.http.routers.github.rule=Host(`github.${ENV}.${DOMAIN}`)
- traefik.http.routers.github.entrypoints=websecure
- traefik.http.routers.github.tls.certresolver=le
- traefik.http.services.github.loadbalancer.server.port=8082