Add libvirt/KVM integration and Forgejo webhook support to Podman stack

- Extend `.env.sample` with libvirt configuration, Forgejo secrets, and image mapping defaults.
- Update `compose.yml` to enable libvirt integration, including required mounts, devices, and environment variables.
- Add Forgejo webhook configuration and commit status reporting with optional HMAC validation.
- Enhance the orchestrator container with libvirt dependencies and optional features for VM management.
- Document host preparation for libvirt/KVM and image directories in the README.
- Set default fallback values for Traefik ACME CA server.

Signed-off-by: Till Wegmueller <toasterson@gmail.com>
This commit is contained in:
Till Wegmueller 2025-11-09 17:58:36 +01:00
parent fe7b4b9ce0
commit 888aa26388
No known key found for this signature in database
6 changed files with 134 additions and 5 deletions

View file

@ -2,6 +2,7 @@
name = "orchestrator" name = "orchestrator"
version = "0.1.0" version = "0.1.0"
edition = "2024" edition = "2024"
build = "build.rs"
[features] [features]
# Enable libvirt backend on Linux hosts (uses virt crate on Linux) # Enable libvirt backend on Linux hosts (uses virt crate on Linux)

View file

@ -0,0 +1,14 @@
fn main() {
// Only emit link directive for libvirt when building on Linux and the `libvirt` feature is enabled.
let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap_or_default();
let libvirt_enabled = std::env::var("CARGO_FEATURE_LIBVIRT").is_ok();
if target_os == "linux" && libvirt_enabled {
// Ensure the final link includes -lvirt. The virt crate should do this via bindgen/pkg-config,
// but in some minimal containers the link directive may be missing. This is a safe no-op if
// already present and fixes undefined references like virConnectOpen, virDomainDefineXML, etc.
println!("cargo:rustc-link-lib=virt");
// On Debian/Ubuntu the library resides in /usr/lib/{arch}-linux-gnu which the linker knows.
// If custom locations are ever needed, also emit cargo:rustc-link-search here.
}
}

View file

@ -7,7 +7,7 @@ WORKDIR /work
# Install build dependencies: protoc, headers, pkg-config # Install build dependencies: protoc, headers, pkg-config
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
protobuf-compiler pkg-config libsqlite3-dev libpq-dev ca-certificates \ protobuf-compiler pkg-config libsqlite3-dev libpq-dev libvirt-dev ca-certificates \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Configure cargo target-dir so it can be cached between layers # Configure cargo target-dir so it can be cached between layers
RUN mkdir -p /cargo && printf "[build]\ntarget-dir = \"/cargo/target\"\n" > /cargo/config.toml RUN mkdir -p /cargo && printf "[build]\ntarget-dir = \"/cargo/target\"\n" > /cargo/config.toml
@ -18,12 +18,14 @@ COPY crates ./crates
RUN --mount=type=cache,target=/cargo/registry \ RUN --mount=type=cache,target=/cargo/registry \
--mount=type=cache,target=/cargo/git \ --mount=type=cache,target=/cargo/git \
--mount=type=cache,target=/cargo/target \ --mount=type=cache,target=/cargo/target \
cargo build --release -p orchestrator && cp /cargo/target/release/orchestrator /orchestrator cargo build --release -p orchestrator --features libvirt && cp /cargo/target/release/orchestrator /orchestrator
FROM docker.io/library/debian:bookworm-slim FROM docker.io/library/debian:bookworm-slim
# Minimal runtime image with required shared libs for sqlite/postgres # Minimal runtime image with required shared libs for sqlite/postgres and libvirt/qemu tools
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y --no-install-recommends libsqlite3-0 libpq5 ca-certificates \ && apt-get install -y --no-install-recommends \
libsqlite3-0 libpq5 ca-certificates \
libvirt-clients libvirt0 qemu-utils genisoimage \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
COPY --from=builder /orchestrator /usr/local/bin/orchestrator COPY --from=builder /orchestrator /usr/local/bin/orchestrator
EXPOSE 50051 8081 EXPOSE 50051 8081

View file

@ -37,3 +37,27 @@ TRAEFIK_DASHBOARD_AUTH=
# to allow binding 80/443. # to allow binding 80/443.
TRAEFIK_HTTP_PORT=8080 TRAEFIK_HTTP_PORT=8080
TRAEFIK_HTTPS_PORT=4443 TRAEFIK_HTTPS_PORT=4443
# Orchestrator libvirt integration (Linux hosts)
# URI to the system libvirt on the host; usually qemu:///system
LIBVIRT_URI=qemu:///system
# Libvirt virtual network to attach VMs to; ensure it exists/active on host
LIBVIRT_NETWORK=default
# Host path to the orchestrator image map YAML; mounted read-only into the container
# Default points to the repo example. Change to a production path on your host.
ORCH_IMAGE_MAP_PATH=../../examples/orchestrator-image-map.yaml
# Host directory where base images are stored (or downloaded to by the orchestrator)
# This will be mounted at /var/lib/solstice/images in the container. Ensure local_path in the YAML points there.
ORCH_IMAGES_DIR=/var/lib/solstice/images
# Host working directory for per-VM overlays and logs; mounted read-write
# The libvirt backend will use /var/lib/solstice-ci inside the container; map it to a persistent host path.
ORCH_WORK_DIR=/var/lib/solstice-ci
# Forge Integration secrets (set per deployment)
# Shared secret used to validate Forgejo/Gitea webhooks (X-Gitea-Signature HMAC-SHA256)
WEBHOOK_SECRET=
# Forgejo API token used to post commit statuses back to the forge
FORGEJO_TOKEN=
# Optional: Forgejo API base URL (not secret, but commonly configured alongside the token)
# Example: https://codeberg.org/api/v1
FORGEJO_BASE_URL=

View file

@ -88,3 +88,66 @@ Troubleshooting
- Check firewall (nftables): allow UDP/TCP 53 from the Podman bridge (e.g., 10.89.0.0/24) to host 10.89.0.1; allow FORWARD for ESTABLISHED,RELATED. - Check firewall (nftables): allow UDP/TCP 53 from the Podman bridge (e.g., 10.89.0.0/24) to host 10.89.0.1; allow FORWARD for ESTABLISHED,RELATED.
- Inspect network: podman network inspect podman; consider creating a custom network with explicit DNS servers: podman network create --dns 1.1.1.1 --dns 8.8.8.8 solstice-net and set networks.core.name to that network in compose.yml. - Inspect network: podman network inspect podman; consider creating a custom network with explicit DNS servers: podman network create --dns 1.1.1.1 --dns 8.8.8.8 solstice-net and set networks.core.name to that network in compose.yml.
- As a last resort, run Traefik with host networking: network_mode: host (then remove ports and ensure only Traefik is exposed), or switch ACME to DNS-01. - As a last resort, run Traefik with host networking: network_mode: host (then remove ports and ensure only Traefik is exposed), or switch ACME to DNS-01.
Ubuntu host setup for libvirt/KVM and image directories
These steps prepare an Ubuntu host so the orchestrator (running in a container) can control KVM/libvirt and manage VM images stored on the host.
1) Install libvirt/KVM and tools
- sudo apt update
- sudo apt install -y qemu-kvm libvirt-daemon-system libvirt-clients virtinst bridge-utils genisoimage
- Ensure the libvirt service is running:
- systemctl status libvirtd
- If inactive: sudo systemctl enable --now libvirtd
2) User permissions (KVM and libvirt sockets)
- Add your deployment user (the one running podman compose) to the required groups:
- sudo usermod -aG libvirt $USER
- sudo usermod -aG kvm $USER
- Log out and back in (or new shell) for group membership to take effect.
3) Default libvirt network
- Make sure the default network exists and is active (compose defaults LIBVIRT_NETWORK=default):
- virsh net-list --all
- If missing, define it from the stock XML or create a new NAT network.
- If present but inactive:
- virsh net-start default
- virsh net-autostart default
4) Prepare host directories for images and work data
- Base images directory (bind-mounted read/write into the orchestrator container):
- sudo mkdir -p /var/lib/solstice/images
- sudo chown "$USER":"$USER" /var/lib/solstice/images
- Orchestrator work directory for overlays and console logs:
- sudo mkdir -p /var/lib/solstice-ci
- sudo chown "$USER":"$USER" /var/lib/solstice-ci
- In deploy/podman/.env(.sample), set:
- ORCH_IMAGES_DIR=/var/lib/solstice/images
- ORCH_WORK_DIR=/var/lib/solstice-ci
5) Map the image list (image map YAML)
- Point ORCH_IMAGE_MAP_PATH at your production image map on the host (kept in git or ops repo):
- ORCH_IMAGE_MAP_PATH=/etc/solstice/orchestrator-image-map.yaml
- The orchestrator looks for /examples/orchestrator-image-map.yaml in the container; compose binds your host file there read-only.
- Ensure each images[*].local_path in the YAML points inside /var/lib/solstice/images (the in-container path is the same via the bind mount). The provided example already uses that prefix.
6) Bring up the stack
- podman compose -f compose.yml up -d --build
- The orchestrator will, on first start, download missing base images as per the YAML into ORCH_IMAGES_DIR. Subsequent starts reuse the same files.
Notes
- Hardware acceleration: compose maps /dev/kvm into the container; verify kvm is available on the host: lsmod | grep kvm and that your CPU virtualization features are enabled in BIOS/UEFI.
- Sockets and configs: compose binds libvirt control sockets and common libvirt directories read-only so the orchestrator can read network definitions and create domains.
- If you change LIBVIRT_URI or LIBVIRT_NETWORK, update deploy/podman/.env and redeploy.
Forge integration configuration
- The forge-integration service will warn if WEBHOOK_SECRET is not set: it will accept webhooks without signature validation (dev mode). Set WEBHOOK_SECRET in deploy/podman/.env to enable HMAC validation.
- To enable posting commit statuses back to Forgejo/Gitea, set FORGEJO_TOKEN and FORGEJO_BASE_URL in deploy/podman/.env. If they are not set, the service logs a warning (FORGEJO_* not set) and disables the job result consumer that reports statuses.
- The compose file passes these variables to the container. After editing .env, run: podman compose up -d forge-integration
Traefik ACME CA server note
- If you see a warning about TRAEFIK_ACME_CASERVER being unset, it is harmless. The compose file now defaults this value to empty so Traefik uses the production Lets Encrypt endpoint. To test with staging, set TRAEFIK_ACME_CASERVER=https://acme-staging-v02.api.letsencrypt.org/directory in .env and redeploy Traefik.

View file

@ -42,7 +42,7 @@ services:
- --certificatesresolvers.le.acme.httpchallenge.entrypoint=web - --certificatesresolvers.le.acme.httpchallenge.entrypoint=web
- --serversTransport.insecureSkipVerify=true - --serversTransport.insecureSkipVerify=true
# Optional: override ACME CA server via .env (e.g., staging URL) # Optional: override ACME CA server via .env (e.g., staging URL)
- --certificatesresolvers.le.acme.caserver=${TRAEFIK_ACME_CASERVER} - --certificatesresolvers.le.acme.caserver=${TRAEFIK_ACME_CASERVER:-}
ports: ports:
# Rootless Podman cannot bind privileged ports (<1024). Use high ports via .env (e.g., 8080/4443), # Rootless Podman cannot bind privileged ports (<1024). Use high ports via .env (e.g., 8080/4443),
# or adjust sysctl on the host: net.ipv4.ip_unprivileged_port_start=80 (requires root). # or adjust sysctl on the host: net.ipv4.ip_unprivileged_port_start=80 (requires root).
@ -190,6 +190,9 @@ services:
AMQP_ROUTING_KEY: jobrequest.v1 AMQP_ROUTING_KEY: jobrequest.v1
GRPC_ADDR: 0.0.0.0:50051 GRPC_ADDR: 0.0.0.0:50051
HTTP_ADDR: 0.0.0.0:8081 HTTP_ADDR: 0.0.0.0:8081
# Libvirt configuration for Linux/KVM
LIBVIRT_URI: ${LIBVIRT_URI:-qemu:///system}
LIBVIRT_NETWORK: ${LIBVIRT_NETWORK:-default}
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
@ -197,6 +200,24 @@ services:
condition: service_completed_successfully condition: service_completed_successfully
rabbitmq: rabbitmq:
condition: service_healthy condition: service_healthy
# Host integrations: libvirt sockets/devices and config + image/work directories
volumes:
# Read-only mount of the image map config into the container path expected by the binary
- ${ORCH_IMAGE_MAP_PATH:-../../examples/orchestrator-image-map.yaml}:/examples/orchestrator-image-map.yaml:ro,Z
# Writable bind for images so the orchestrator can download/retain base images on the host
- ${ORCH_IMAGES_DIR:-/var/lib/solstice/images}:/var/lib/solstice/images:Z
# Writable bind for per-VM overlays and console logs (used by libvirt backend)
- ${ORCH_WORK_DIR:-/var/lib/solstice-ci}:/var/lib/solstice-ci:Z
# Libvirt control sockets (ro is sufficient for read-only, but write is needed to create domains)
- /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock:Z
- /var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro:Z
# Optional: expose host libvirt configs for network XML reads
- /etc/libvirt:/etc/libvirt:ro,Z
- /var/lib/libvirt:/var/lib/libvirt:ro,Z
# KVM device for hardware acceleration
- /dev/kvm:/dev/kvm
devices:
- /dev/kvm
networks: networks:
- core - core
labels: labels:
@ -228,6 +249,10 @@ services:
# HTTP server config for webhooks # HTTP server config for webhooks
HTTP_ADDR: 0.0.0.0:8080 HTTP_ADDR: 0.0.0.0:8080
WEBHOOK_PATH: /webhooks/forgejo WEBHOOK_PATH: /webhooks/forgejo
# Secrets and Forgejo API configuration
WEBHOOK_SECRET: ${WEBHOOK_SECRET}
FORGEJO_TOKEN: ${FORGEJO_TOKEN}
FORGEJO_BASE_URL: ${FORGEJO_BASE_URL}
depends_on: depends_on:
rabbitmq: rabbitmq:
condition: service_healthy condition: service_healthy