Add Podman Compose deployment stack with Traefik and services integration

This commit introduces:
- A production-ready Podman Compose stack using Traefik as a reverse proxy with Let's Encrypt integration.
- Per-environment logical separation for Postgres, RabbitMQ, and MinIO services.
- New deployment utilities, including a `.env.sample` template, `compose.yml`, and setup scripts for MinIO and Postgres.
- Updates to `github-integration` HTTP server with basic webhook handling using `axum` and configurable paths.
- Adjustments to packaging tasks for better tarball generation via `git archive`.
- Expanded dependencies for `PKGBUILD` to support SQLite and PostgreSQL libraries.
- Containerfiles for orchestrator and integration services to enable Rust multi-stage builds without sccache.

This enables simplified and secure CI deployments with automatic routing, TLS, and volume persistence.
This commit is contained in:
Till Wegmueller 2025-11-08 20:21:57 +00:00
parent 31a88343cb
commit 1c5dc338f5
No known key found for this signature in database
12 changed files with 487 additions and 14 deletions

View file

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="DataSourcePerFileMappings">
<file url="file://$APPLICATION_CONFIG_DIR$/consoles/db/8be7b89e-6fc5-4e5d-a84a-57129d3a04d4/console.sql" value="8be7b89e-6fc5-4e5d-a84a-57129d3a04d4" />
</component>
</project>

View file

@ -12,7 +12,9 @@ TARBALL="solstice-ci.tar.gz"
TMPDIR=$(mktemp -d) TMPDIR=$(mktemp -d)
trap 'rm -rf "$TMPDIR"' EXIT trap 'rm -rf "$TMPDIR"' EXIT
git ls-files -z | tar --null -czf "$TMPDIR/$TARBALL" -T - # Create a tarball with a top-level solstice-ci/ prefix so PKGBUILDs can `cd "$srcdir/solstice-ci"`
# Use git archive to ensure only tracked files are included and the prefix is present.
git archive --format=tar.gz --prefix=solstice-ci/ -o "$TMPDIR/$TARBALL" HEAD
for pkg in solstice-orchestrator solstice-forge-integration; do for pkg in solstice-orchestrator solstice-forge-integration; do
PKG_DIR="$ROOT_DIR/packaging/arch/$pkg" PKG_DIR="$ROOT_DIR/packaging/arch/$pkg"

View file

@ -9,3 +9,4 @@ clap = { version = "4", features = ["derive", "env"] }
miette = { version = "7", features = ["fancy"] } miette = { version = "7", features = ["fancy"] }
tracing = "0.1" tracing = "0.1"
tokio = { version = "1", features = ["rt-multi-thread", "macros", "signal"] } tokio = { version = "1", features = ["rt-multi-thread", "macros", "signal"] }
axum = { version = "0.8", features = ["macros"] }

View file

@ -1,6 +1,8 @@
use std::net::SocketAddr;
use clap::Parser; use clap::Parser;
use miette::Result; use miette::Result;
use tracing::{info, warn}; use tracing::{info, warn};
use axum::{Router, routing::post, response::IntoResponse, http::StatusCode};
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[command( #[command(
@ -9,10 +11,14 @@ use tracing::{info, warn};
about = "Solstice CI — GitHub Integration (GitHub App)" about = "Solstice CI — GitHub Integration (GitHub App)"
)] )]
struct Opts { struct Opts {
/// HTTP bind address for GitHub webhooks (e.g., 0.0.0.0:8081) /// HTTP bind address for GitHub webhooks (e.g., 0.0.0.0:8082)
#[arg(long, env = "HTTP_ADDR", default_value = "0.0.0.0:8081")] #[arg(long, env = "HTTP_ADDR", default_value = "0.0.0.0:8082")]
http_addr: String, http_addr: String,
/// Webhook path (route)
#[arg(long, env = "WEBHOOK_PATH", default_value = "/webhooks/github")]
webhook_path: String,
/// GitHub App ID /// GitHub App ID
#[arg(long, env = "GITHUB_APP_ID")] #[arg(long, env = "GITHUB_APP_ID")]
app_id: Option<u64>, app_id: Option<u64>,
@ -26,15 +32,29 @@ struct Opts {
otlp: Option<String>, otlp: Option<String>,
} }
async fn handle_github_webhook() -> impl IntoResponse {
// For now, accept and log. Implement signature verification and event handling later.
StatusCode::OK
}
#[tokio::main(flavor = "multi_thread")] #[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> { async fn main() -> Result<()> {
let _t = common::init_tracing("solstice-github-integration")?; let _t = common::init_tracing("solstice-github-integration")?;
let opts = Opts::parse(); let opts = Opts::parse();
info!(http_addr = %opts.http_addr, "github integration starting"); info!(http_addr = %opts.http_addr, path = %opts.webhook_path, "github integration starting");
// TODO: Start HTTP server, validate signatures, implement GitHub App auth flow let path: &'static str = Box::leak(opts.webhook_path.clone().into_boxed_str());
warn!("github-integration skeleton running; HTTP/GitHub App not implemented yet"); let app = Router::new().route(path, post(handle_github_webhook));
let addr: SocketAddr = opts.http_addr.parse().expect("invalid HTTP_ADDR");
warn!("github-integration webhook endpoint is active but handler is minimal; implement GitHub App flow");
axum::serve(
tokio::net::TcpListener::bind(addr).await.expect("bind"),
app,
)
.await
.expect("server error");
tokio::signal::ctrl_c().await.expect("listen for ctrl-c");
Ok(()) Ok(())
} }

View file

@ -0,0 +1,25 @@
# syntax=docker/dockerfile:1.7
# Build Solstice Forge Integration using upstream official images (no sccache)
FROM docker.io/library/rust:bookworm AS builder
ENV CARGO_HOME=/cargo
WORKDIR /work
# Install protoc for tonic/prost builds
RUN apt-get update \
&& apt-get install -y --no-install-recommends protobuf-compiler ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Configure cargo target-dir so it can be cached between layers
RUN mkdir -p /cargo && printf "[build]\ntarget-dir = \"/cargo/target\"\n" > /cargo/config.toml
COPY Cargo.toml ./
COPY crates ./crates
RUN --mount=type=cache,target=/cargo/registry \
--mount=type=cache,target=/cargo/git \
--mount=type=cache,target=/cargo/target \
cargo build --release -p forge-integration && cp /cargo/target/release/forge-integration /forge-integration
FROM docker.io/library/debian:bookworm-slim
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /forge-integration /usr/local/bin/forge-integration
ENTRYPOINT ["/usr/local/bin/forge-integration"]

View file

@ -0,0 +1,25 @@
# syntax=docker/dockerfile:1.7
# Build Solstice GitHub Integration using upstream official images (no sccache)
FROM docker.io/library/rust:bookworm AS builder
ENV CARGO_HOME=/cargo
WORKDIR /work
# Install protoc for tonic/prost builds
RUN apt-get update \
&& apt-get install -y --no-install-recommends protobuf-compiler ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Configure cargo target-dir so it can be cached between layers
RUN mkdir -p /cargo && printf "[build]\ntarget-dir = \"/cargo/target\"\n" > /cargo/config.toml
COPY Cargo.toml ./
COPY crates ./crates
RUN --mount=type=cache,target=/cargo/registry \
--mount=type=cache,target=/cargo/git \
--mount=type=cache,target=/cargo/target \
cargo build --release -p github-integration && cp /cargo/target/release/github-integration /github-integration
FROM docker.io/library/debian:bookworm-slim
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /github-integration /usr/local/bin/github-integration
ENTRYPOINT ["/usr/local/bin/github-integration"]

View file

@ -0,0 +1,30 @@
# syntax=docker/dockerfile:1.7
# Build Solstice Orchestrator using upstream official images (no sccache)
FROM docker.io/library/rust:bookworm AS builder
ENV CARGO_HOME=/cargo
WORKDIR /work
# Install build dependencies: protoc, headers, pkg-config
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
protobuf-compiler pkg-config libsqlite3-dev libpq-dev ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Configure cargo target-dir so it can be cached between layers
RUN mkdir -p /cargo && printf "[build]\ntarget-dir = \"/cargo/target\"\n" > /cargo/config.toml
# Pre-copy manifests for better caching
COPY Cargo.toml ./
COPY crates ./crates
# Build orchestrator only
RUN --mount=type=cache,target=/cargo/registry \
--mount=type=cache,target=/cargo/git \
--mount=type=cache,target=/cargo/target \
cargo build --release -p orchestrator && cp /cargo/target/release/orchestrator /orchestrator
FROM docker.io/library/debian:bookworm-slim
# Minimal runtime image with required shared libs for sqlite/postgres
RUN apt-get update \
&& apt-get install -y --no-install-recommends libsqlite3-0 libpq5 ca-certificates \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /orchestrator /usr/local/bin/orchestrator
EXPOSE 50051 8081
ENTRYPOINT ["/usr/local/bin/orchestrator"]

36
deploy/podman/.env.sample Normal file
View file

@ -0,0 +1,36 @@
# Copy to .env and adjust values. This file is consumed by podman compose.
# Deployment environment: staging or prod
ENV=staging
# Base domain used for routing. External hostnames are *.svc.${DOMAIN} (no ENV in hostname)
DOMAIN=solstice-ci.org
# ACME email for Let's Encrypt registration
TRAEFIK_ACME_EMAIL=ops@solstice-ci.org
# Optional: set Let's Encrypt CA server (leave empty for production, set to staging for tests)
# For staging, uncomment:
# TRAEFIK_ACME_CASERVER=https://acme-staging-v02.api.letsencrypt.org/directory
# Admin credentials (override in real deployments via secret store)
POSTGRES_USER=solstice
POSTGRES_PASSWORD=change-me
# Databases are created by postgres-setup: solstice_staging and solstice_prod
# Services will connect to postgres database: solstice_${ENV}
POSTGRES_DB=solstice
# RabbitMQ uses a single broker with per-env vhosts: solstice-staging, solstice-prod
RABBITMQ_DEFAULT_USER=solstice
RABBITMQ_DEFAULT_PASS=change-me
MINIO_ROOT_USER=solstice
MINIO_ROOT_PASSWORD=change-me
# Buckets per env (created by minio-setup): solstice-logs-staging, solstice-logs-prod
# Optionally set to the env-specific bucket name (set in your shell, not here): e.g., solstice-logs-staging or solstice-logs-prod
# Leave empty to skip custom bucket creation in minio-setup
MINIO_BUCKET=
# Traefik dashboard basic auth user:password hash (htpasswd -nB admin)
# Example: admin:$2y$05$kN2K0... (bcrypt)
TRAEFIK_DASHBOARD_AUTH=
# Host ports to bind Traefik
TRAEFIK_HTTP_PORT=80
TRAEFIK_HTTPS_PORT=443

71
deploy/podman/README.md Normal file
View file

@ -0,0 +1,71 @@
Solstice CI — Production deployment with Podman Compose + Traefik
This stack deploys Solstice CI services behind Traefik with automatic TLS certificates from Lets Encrypt. It uses upstream official images for system services and multi-stage Rust builds on official Rust/Debian images that rely on container layer caching (no sccache) for fast, reproducible builds.
Prerequisites
- Podman 4.9+ with podman-compose compatibility (podman compose)
- Public DNS records for subdomains pointing to the host running this stack
- Ports 80 and 443 open to the Internet
- Email address for ACME registration
DNS
Create A/AAAA records for the following hostnames under your base domain (no environment in hostname; env separation is logical via DB/vhost/buckets):
- traefik.svc.DOMAIN
- api.svc.DOMAIN
- grpc.svc.DOMAIN
- runner.svc.DOMAIN
- forge.svc.DOMAIN (Forge/Forgejo webhooks)
- github.svc.DOMAIN (GitHub App/webhooks)
- minio.svc.DOMAIN (console UI)
- s3.svc.DOMAIN (S3 API, TLS via TCP SNI)
- mq.svc.DOMAIN (RabbitMQ mgmt UI; AMQP remains internal)
Quick start
1. Copy env template and edit secrets and settings:
cp .env.sample .env
# Edit .env (ENV=staging|prod, DOMAIN, passwords, ACME email)
2. (Optional) Use Lets Encrypt staging CA to test issuance without rate limits by setting in .env:
TRAEFIK_ACME_CASERVER=https://acme-staging-v02.api.letsencrypt.org/directory
3. Bring up the stack:
podman compose -f compose.yml up -d --build
4. Monitor logs:
podman compose logs -f traefik
Services and routing
- Traefik dashboard: https://traefik.svc.${DOMAIN} (protect with TRAEFIK_DASHBOARD_AUTH in .env)
- Orchestrator HTTP: https://api.svc.${DOMAIN}
- Orchestrator gRPC (h2/TLS via SNI): grpc.svc.${DOMAIN}
- Forge webhooks: https://forge.svc.${DOMAIN}
- GitHub webhooks: https://github.svc.${DOMAIN}
- Runner static server: https://runner.svc.${DOMAIN}
- MinIO console: https://minio.svc.${DOMAIN}
- S3 API: s3.svc.${DOMAIN}
- RabbitMQ management: https://mq.svc.${DOMAIN}
Environment scoping (single infra, logical separation)
- RabbitMQ: single broker; per-environment vhosts named solstice-${ENV} (staging/prod). Services connect to amqp://.../solstice-${ENV}.
- Postgres: single cluster; databases solstice_staging and solstice_prod are created by the postgres-setup job. Services use postgres://.../solstice_${ENV}.
- MinIO: single server; buckets solstice-logs-staging and solstice-logs-prod are created by the minio-setup job. Set S3 bucket per service to the env-appropriate bucket.
Security notes
- Secrets are provided via podman compose secrets referencing your environment variables. Do not commit real secrets.
- Only management UIs are exposed publicly via Traefik. Data planes (Postgres, AMQP, S3 API) terminate TLS at Traefik and route internally. Adjust exposure policy as needed.
Images and builds
- System services use Chainguard images (postgres, rabbitmq). MinIO uses upstream images.
- Rust services are built with multi-stage Containerfiles using cgr.dev/chainguard/rust and run on cgr.dev/chainguard/glibc-dynamic.
- Build caches are mounted in-build for cargo registry/git and the cargo target directory (via ~/.cargo/config target-dir=/cargo/target).
Maintenance
- Upgrade images by editing tags in compose.yml and rebuilding: podman compose build --pull
- Renewals are automatic via Traefik ACME. Certificates are stored in the traefik-acme volume.
- Backups: persist volumes (postgres-data, rabbitmq-data, minio-data, traefik-acme).
Tear down
- Stop: podman compose down
- Remove volumes (DANGEROUS: destroys data): podman volume rm solstice-ci_traefik-acme solstice-ci_postgres-data solstice-ci_rabbitmq-data solstice-ci_minio-data
Troubleshooting
- Certificate issues: check Traefik logs; verify DNS and ports 80/443. For testing, use ACME staging server.
- No routes: verify labels on services and that traefik sees the podman socket.
- Healthchecks failing: inspect service logs with podman logs <container>.

268
deploy/podman/compose.yml Normal file
View file

@ -0,0 +1,268 @@
# Podman Compose production stack for Solstice CI with Traefik + Let's Encrypt
# Usage:
# 1) cp .env.sample .env && edit values (ENV=staging|prod, secrets, email)
# 2) Ensure DNS A/AAAA for required hostnames pointing to this host:
# - traefik.svc.${DOMAIN}
# - api.${ENV}.${DOMAIN}
# - grpc.${ENV}.${DOMAIN}
# - forge.${ENV}.${DOMAIN}
# - github.${ENV}.${DOMAIN}
# - minio.svc.${DOMAIN}
# - s3.svc.${DOMAIN}
# - mq.svc.${DOMAIN}
# - db.svc.${DOMAIN} (optional)
# 3) podman compose -f compose.yml up -d --build
name: solstice-ci
networks:
core:
driver: bridge
volumes:
traefik-acme:
postgres-data:
rabbitmq-data:
minio-data:
services:
traefik:
image: docker.io/library/traefik:v3.1
container_name: traefik
restart: unless-stopped
command:
- --api.dashboard=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --certificatesresolvers.le.acme.email=${TRAEFIK_ACME_EMAIL}
- --certificatesresolvers.le.acme.storage=/acme/acme.json
- --certificatesresolvers.le.acme.httpchallenge=true
- --certificatesresolvers.le.acme.httpchallenge.entrypoint=web
- --serversTransport.insecureSkipVerify=true
# Optional: override ACME CA server via .env (e.g., staging URL)
- --certificatesresolvers.le.acme.caserver=${TRAEFIK_ACME_CASERVER}
ports:
- ${TRAEFIK_HTTP_PORT:-80}:80
- ${TRAEFIK_HTTPS_PORT:-443}:443
volumes:
- /var/run/podman/podman.sock:/var/run/docker.sock:Z
- traefik-acme:/acme
networks:
- core
labels:
- traefik.enable=true
# Dashboard at traefik.svc.${DOMAIN}
- traefik.http.routers.traefik.rule=Host(`traefik.svc.${DOMAIN}`)
- traefik.http.routers.traefik.entrypoints=websecure
- traefik.http.routers.traefik.tls.certresolver=le
- traefik.http.routers.traefik.service=api@internal
- traefik.http.middlewares.traefik-auth.basicauth.users=${TRAEFIK_DASHBOARD_AUTH}
- traefik.http.routers.traefik.middlewares=traefik-auth
postgres:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
volumes:
- postgres-data:/var/lib/postgresql/data:Z
networks:
- core
postgres-setup:
image: docker.io/library/postgres:16-alpine
container_name: solstice-postgres-setup
depends_on:
postgres:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
command: >-
"export PGPASSWORD=${POSTGRES_PASSWORD} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_staging' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_staging')\" | psql -h postgres -U ${POSTGRES_USER} &&
psql -h postgres -U ${POSTGRES_USER} -v ON_ERROR_STOP=1 -tc \"SELECT 'CREATE DATABASE solstice_prod' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname='solstice_prod')\" | psql -h postgres -U ${POSTGRES_USER}"
networks:
- core
rabbitmq:
image: docker.io/library/rabbitmq:4-management-alpine
container_name: solstice-rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS}
RABBITMQ_DEFAULT_VHOST: solstice-${ENV}
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
volumes:
- rabbitmq-data:/var/lib/rabbitmq:Z
networks:
- core
labels:
- traefik.enable=true
# Management UI at mq.svc.${DOMAIN}
- traefik.http.routers.mq.rule=Host(`mq.svc.${DOMAIN}`)
- traefik.http.routers.mq.entrypoints=websecure
- traefik.http.routers.mq.tls.certresolver=le
- traefik.http.services.mq.loadbalancer.server.port=15672
minio:
image: quay.io/minio/minio:RELEASE.2025-02-07T22-39-53Z
container_name: solstice-minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
volumes:
- minio-data:/data:Z
networks:
- core
labels:
- traefik.enable=true
# MinIO Console at minio.svc.${DOMAIN}
- traefik.http.routers.minio.rule=Host(`minio.svc.${DOMAIN}`)
- traefik.http.routers.minio.entrypoints=websecure
- traefik.http.routers.minio.tls.certresolver=le
- traefik.http.services.minio.loadbalancer.server.port=9001
# S3 API via TCP router on s3.svc.${DOMAIN}
- traefik.tcp.routers.s3.rule=HostSNI(`s3.svc.${DOMAIN}`)
- traefik.tcp.routers.s3.entrypoints=websecure
- traefik.tcp.routers.s3.tls=true
- traefik.tcp.routers.s3.tls.certresolver=le
- traefik.tcp.services.s3.loadbalancer.server.port=9000
minio-setup:
image: quay.io/minio/mc:RELEASE.2025-02-07T22-47-51Z
container_name: solstice-minio-setup
depends_on:
minio:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command: >-
"mc alias set local http://minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} &&
mc mb -p local/solstice-logs-staging || true &&
mc mb -p local/solstice-logs-prod || true &&
if [ -n \"${MINIO_BUCKET}\" ]; then mc mb -p local/${MINIO_BUCKET} || true; fi"
networks:
- core
orchestrator:
build:
context: ../..
dockerfile: deploy/images/orchestrator/Containerfile
args:
BIN: orchestrator
image: local/solstice-orchestrator:latest
container_name: solstice-orchestrator
restart: unless-stopped
environment:
RUST_LOG: info
DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/solstice_${ENV}
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
AMQP_EXCHANGE: solstice.jobs
AMQP_QUEUE: solstice.jobs.v1
AMQP_ROUTING_KEY: jobrequest.v1
GRPC_ADDR: 0.0.0.0:50051
HTTP_ADDR: 0.0.0.0:8081
depends_on:
postgres:
condition: service_healthy
postgres-setup:
condition: service_completed_successfully
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# HTTP endpoints at api.${ENV}.${DOMAIN}
- traefik.http.routers.api.rule=Host(`api.${ENV}.${DOMAIN}`)
- traefik.http.routers.api.entrypoints=websecure
- traefik.http.routers.api.tls.certresolver=le
- traefik.http.services.api.loadbalancer.server.port=8081
# gRPC on grpc.${ENV}.${DOMAIN} (TLS, h2)
- traefik.tcp.routers.grpc.rule=HostSNI(`grpc.${ENV}.${DOMAIN}`)
- traefik.tcp.routers.grpc.entrypoints=websecure
- traefik.tcp.routers.grpc.tls=true
- traefik.tcp.routers.grpc.tls.certresolver=le
- traefik.tcp.services.grpc.loadbalancer.server.port=50051
forge-integration:
build:
context: ../..
dockerfile: deploy/images/forge-integration/Containerfile
args:
BIN: forge-integration
image: local/solstice-forge-integration:latest
container_name: solstice-forge-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server config for webhooks
HTTP_ADDR: 0.0.0.0:8080
WEBHOOK_PATH: /webhooks/forgejo
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# Forge webhooks at forge.svc.${DOMAIN}
- traefik.http.routers.forge.rule=Host(`forge.${ENV}.${DOMAIN}`)
- traefik.http.routers.forge.entrypoints=websecure
- traefik.http.routers.forge.tls.certresolver=le
- traefik.http.services.forge.loadbalancer.server.port=8080
github-integration:
build:
context: ../..
dockerfile: deploy/images/github-integration/Containerfile
args:
BIN: github-integration
image: local/solstice-github-integration:latest
container_name: solstice-github-integration
restart: unless-stopped
environment:
RUST_LOG: info
AMQP_URL: amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq:5672/solstice-${ENV}
# HTTP server for GitHub webhooks (skeleton service; implement handler later)
HTTP_ADDR: 0.0.0.0:8082
depends_on:
rabbitmq:
condition: service_healthy
networks:
- core
labels:
- traefik.enable=true
# GitHub webhooks at github.svc.${DOMAIN}
- traefik.http.routers.github.rule=Host(`github.${ENV}.${DOMAIN}`)
- traefik.http.routers.github.entrypoints=websecure
- traefik.http.routers.github.tls.certresolver=le
- traefik.http.services.github.loadbalancer.server.port=8082

View file

@ -1,4 +1,5 @@
[tools] [tools]
age = "latest" age = "latest"
fnox = "latest" fnox = "latest"
protoc = "latest"
python = "latest" python = "latest"

View file

@ -6,7 +6,7 @@ pkgdesc="Solstice CI Orchestrator service"
arch=(x86_64) arch=(x86_64)
url="https://codeberg.org/your-namespace/solstice-ci" url="https://codeberg.org/your-namespace/solstice-ci"
license=(MPL2) license=(MPL2)
depends=(glibc libvirt) depends=(glibc libvirt sqlite postgresql-libs zstd)
makedepends=(rust cargo) makedepends=(rust cargo)
source=("solstice-ci.tar.gz" source=("solstice-ci.tar.gz"
"solstice-orchestrator.service" "solstice-orchestrator.service"