diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
new file mode 100644
index 0000000..fd18043
--- /dev/null
+++ b/.idea/dataSources.xml
@@ -0,0 +1,17 @@
+
+
+
+
+ postgresql
+ true
+ org.postgresql.Driver
+ jdbc:postgresql://localhost:5432/solstice
+
+
+
+
+
+ $ProjectFileDir$
+
+
+
\ No newline at end of file
diff --git a/.mise/tasks/ci/local b/.mise/tasks/ci/local
index 9010122..7895286 100755
--- a/.mise/tasks/ci/local
+++ b/.mise/tasks/ci/local
@@ -68,6 +68,9 @@ export SOLSTICE_RUNNER_URL=${SOLSTICE_RUNNER_URL:-http://$HOST_IP:$SOL_RUNNER_PO
SERVE_PID=$!
# Start orchestrator in background
+# Enable persistence by default in CI: use local Postgres from docker-compose
+export ORCH_SKIP_PERSIST=${ORCH_SKIP_PERSIST:-false}
+export DATABASE_URL=${DATABASE_URL:-postgres://solstice:solstice@127.0.0.1:5432/solstice}
LOGFILE=${SOL_ORCH_LOG:-"$ROOT_DIR/target/orchestrator.local.log"}
echo "Starting orchestrator... (logs: $LOGFILE)" >&2
(
diff --git a/.mise/tasks/ci/vm-build b/.mise/tasks/ci/vm-build
index 782f1b5..c69ea22 100755
--- a/.mise/tasks/ci/vm-build
+++ b/.mise/tasks/ci/vm-build
@@ -72,8 +72,9 @@ ILLUMOS_URL="http://$HOST_IP:$SOL_RUNNER_PORT_ILLUMOS/solstice-runner-illumos"
export SOLSTICE_RUNNER_URLS="$LINUX_URL $ILLUMOS_URL"
# Start orchestrator in background (inherits env including SOLSTICE_RUNNER_URLS/ORCH_CONTACT_ADDR)
-# Speed up startup by skipping persistence unless explicitly disabled
-export ORCH_SKIP_PERSIST=${ORCH_SKIP_PERSIST:-true}
+# Enable persistence by default in CI: use local Postgres from docker-compose
+export ORCH_SKIP_PERSIST=${ORCH_SKIP_PERSIST:-false}
+export DATABASE_URL=${DATABASE_URL:-postgres://solstice:solstice@127.0.0.1:5432/solstice}
LOGFILE=${SOL_ORCH_LOG:-"$ROOT_DIR/target/orchestrator.vm-build.log"}
echo "Starting orchestrator... (logs: $LOGFILE)" >&2
(
diff --git a/.mise/tasks/dev/up b/.mise/tasks/dev/up
index c0ddb1b..5ae6256 100755
--- a/.mise/tasks/dev/up
+++ b/.mise/tasks/dev/up
@@ -1,11 +1,11 @@
#!/usr/bin/env bash
set -euo pipefail
-# Start local development dependencies (RabbitMQ) via docker compose
+# Start local development dependencies (RabbitMQ + Postgres) via docker compose
if command -v docker >/dev/null 2>&1; then
if command -v docker-compose >/dev/null 2>&1; then
- exec docker-compose up -d rabbitmq
+ exec docker-compose up -d rabbitmq postgres
else
- exec docker compose up -d rabbitmq
+ exec docker compose up -d rabbitmq postgres
fi
elif command -v podman >/dev/null 2>&1; then
echo "Podman detected but this project uses docker-compose file; please use Docker or translate to podman-compose" >&2
diff --git a/.mise/tasks/pkg/build b/.mise/tasks/pkg/build
new file mode 100644
index 0000000..542901b
--- /dev/null
+++ b/.mise/tasks/pkg/build
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+set -euo pipefail
+# Build Arch packages for Solstice CI components.
+# Requires: Arch Linux with base-devel, rust, cargo, makepkg.
+# Outputs: pkg files under packaging/arch/*/*.pkg.tar.*
+
+ROOT_DIR=$(cd "$(dirname "$0")/../../../" && pwd)
+cd "$ROOT_DIR"
+
+# Create a clean source tarball of the repository
+TARBALL="solstice-ci.tar.gz"
+TMPDIR=$(mktemp -d)
+trap 'rm -rf "$TMPDIR"' EXIT
+
+git ls-files -z | tar --null -czf "$TMPDIR/$TARBALL" -T -
+
+for pkg in solstice-orchestrator solstice-forge-integration; do
+ PKG_DIR="$ROOT_DIR/packaging/arch/$pkg"
+ mkdir -p "$PKG_DIR"
+ cp "$TMPDIR/$TARBALL" "$PKG_DIR/$TARBALL"
+ ( cd "$PKG_DIR" && makepkg -fC --noconfirm )
+ echo "Built package(s) in $PKG_DIR:" >&2
+ ls -1 "$PKG_DIR"/*.pkg.tar.* 2>/dev/null || true
+done
diff --git a/.mise/tasks/pkg/install b/.mise/tasks/pkg/install
new file mode 100644
index 0000000..e30ee5b
--- /dev/null
+++ b/.mise/tasks/pkg/install
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+set -euo pipefail
+# Install built Arch packages and enable services
+# Requires: sudo privileges for pacman and systemctl
+
+ROOT_DIR=$(cd "$(dirname "$0")/../../../" && pwd)
+cd "$ROOT_DIR"
+
+shopt -s nullglob
+PKGS=()
+for p in packaging/arch/solstice-orchestrator/*.pkg.tar.* packaging/arch/solstice-forge-integration/*.pkg.tar.*; do
+ PKGS+=("$p")
+done
+
+if [[ ${#PKGS[@]} -eq 0 ]]; then
+ echo "No packages found. Build first: mise run pkg:build" >&2
+ exit 1
+fi
+
+echo "Installing: ${PKGS[*]}" >&2
+sudo pacman -U --noconfirm "${PKGS[@]}"
+
+# Place example env files if not present
+sudo install -d -m 755 /etc/solstice
+if [[ ! -f /etc/solstice/orchestrator.env ]]; then
+ sudo install -m 644 packaging/arch/solstice-orchestrator/orchestrator.env.example /etc/solstice/orchestrator.env
+fi
+if [[ ! -f /etc/solstice/forge-integration.env ]]; then
+ sudo install -m 644 packaging/arch/solstice-forge-integration/forge-integration.env.example /etc/solstice/forge-integration.env
+fi
+# Ship example orchestrator image map if none present
+if [[ ! -f /etc/solstice/orchestrator-image-map.yaml ]]; then
+ sudo install -m 644 examples/orchestrator-image-map.yaml /etc/solstice/orchestrator-image-map.yaml
+fi
+
+# Reload systemd and optionally enable services
+sudo systemctl daemon-reload
+if [[ "${SOL_ENABLE_SERVICES:-1}" == "1" ]]; then
+ sudo systemctl enable --now solstice-orchestrator.service || true
+ sudo systemctl enable --now solstice-forge-integration.service || true
+fi
+
+echo "Done. Adjust /etc/solstice/*.env and /etc/solstice/orchestrator-image-map.yaml as needed." >&2
diff --git a/.mise/tasks/setup/install b/.mise/tasks/setup/install
new file mode 100644
index 0000000..68262ec
--- /dev/null
+++ b/.mise/tasks/setup/install
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+set -euo pipefail
+# Build Solstice CI Arch packages and install them with systemd units
+# Usage:
+# mise run setup:install # build packages, install, and enable services
+# SOL_ENABLE_SERVICES=0 mise run setup:install # build and install without enabling
+
+ROOT_DIR=$(cd "$(dirname "$0")/../../../" && pwd)
+cd "$ROOT_DIR"
+
+"$ROOT_DIR/.mise/tasks/pkg/build"
+"$ROOT_DIR/.mise/tasks/pkg/install"
diff --git a/crates/forge-integration/Cargo.toml b/crates/forge-integration/Cargo.toml
index b643a7c..c919a0b 100644
--- a/crates/forge-integration/Cargo.toml
+++ b/crates/forge-integration/Cargo.toml
@@ -8,12 +8,19 @@ common = { path = "../common" }
clap = { version = "4", features = ["derive", "env"] }
miette = { version = "7", features = ["fancy"] }
tracing = "0.1"
-tokio = { version = "1", features = ["rt-multi-thread", "macros", "signal"] }
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "signal", "fs", "io-util", "time"] }
# HTTP + Webhooks
axum = { version = "0.8", features = ["macros"] }
+reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls-native-roots"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
# Signature verification
hmac = "0.12"
sha2 = "0.10"
hex = "0.4"
+# AMQP consumer for results
+lapin = { version = "2" }
+futures-util = "0.3"
+# S3/Garage upload
+aws-config = { version = "1", default-features = false, features = ["behavior-version-latest", "rt-tokio"] }
+aws-sdk-s3 = { version = "1", default-features = false, features = ["rt-tokio", "rustls"] }
diff --git a/crates/forge-integration/src/main.rs b/crates/forge-integration/src/main.rs
index 1b829d2..f3507fc 100644
--- a/crates/forge-integration/src/main.rs
+++ b/crates/forge-integration/src/main.rs
@@ -1,5 +1,6 @@
use std::net::SocketAddr;
use std::sync::Arc;
+use aws_sdk_s3::primitives::ByteStream;
use axum::{
Router,
@@ -11,10 +12,11 @@ use axum::{
};
use clap::{Parser, Subcommand};
use hmac::{Hmac, Mac};
-use miette::Result;
+use miette::{Result, IntoDiagnostic};
use serde::Deserialize;
use sha2::Sha256;
use tracing::{error, info, warn};
+use futures_util::StreamExt;
#[derive(Subcommand, Debug)]
enum Cmd {
@@ -71,6 +73,34 @@ struct Opts {
#[arg(long, env = "OTEL_EXPORTER_OTLP_ENDPOINT")]
otlp: Option,
+ /// Forgejo API base (e.g., https://codeberg.org/api/v1)
+ #[arg(long, env = "FORGEJO_BASE_URL")]
+ forgejo_base: Option,
+ /// Forgejo token (PAT or app token)
+ #[arg(long, env = "FORGEJO_TOKEN")]
+ forgejo_token: Option,
+ /// Commit status context
+ #[arg(long, env = "FORGE_CONTEXT", default_value = "solstice/ci")]
+ forge_context: String,
+
+ /// Orchestrator HTTP base for logs (e.g., http://localhost:8081)
+ #[arg(long, env = "ORCH_HTTP_BASE")]
+ orch_http_base: Option,
+
+ /// S3-compatible endpoint for Garage/MinIO (e.g., http://localhost:9000)
+ #[arg(long, env = "S3_ENDPOINT")]
+ s3_endpoint: Option,
+ /// Bucket to upload logs into
+ #[arg(long, env = "S3_BUCKET")]
+ s3_bucket: Option,
+
+ /// Default runs_on label to use when not specified via labels or repo map
+ #[arg(long, env = "RUNS_ON_DEFAULT")]
+ runs_on_default: Option,
+ /// Per-repo runs_on overrides: comma-separated owner/repo=label pairs
+ #[arg(long, env = "RUNS_ON_MAP")]
+ runs_on_map: Option,
+
#[command(subcommand)]
cmd: Option,
}
@@ -79,6 +109,14 @@ struct Opts {
struct AppState {
mq_cfg: common::MqConfig,
webhook_secret: Option,
+ forgejo_base: Option,
+ forgejo_token: Option,
+ forge_context: String,
+ orch_http_base: Option,
+ s3_endpoint: Option,
+ s3_bucket: Option,
+ runs_on_default: Option,
+ runs_on_map: std::collections::HashMap, // key: owner/repo
}
type HmacSha256 = Hmac;
@@ -125,9 +163,24 @@ async fn main() -> Result<()> {
);
}
+ // Parse runs_on overrides map from CLI/env
+ let runs_on_map = opts
+ .runs_on_map
+ .as_deref()
+ .map(parse_runs_on_map)
+ .unwrap_or_default();
+
let state = Arc::new(AppState {
mq_cfg,
webhook_secret: opts.webhook_secret,
+ forgejo_base: opts.forgejo_base,
+ forgejo_token: opts.forgejo_token,
+ forge_context: opts.forge_context,
+ orch_http_base: opts.orch_http_base,
+ s3_endpoint: opts.s3_endpoint,
+ s3_bucket: opts.s3_bucket,
+ runs_on_default: opts.runs_on_default,
+ runs_on_map,
});
// Leak the path string to satisfy 'static requirement for axum route API
@@ -138,6 +191,14 @@ async fn main() -> Result<()> {
.with_state(state.clone());
let addr: SocketAddr = opts.http_addr.parse().expect("invalid HTTP_ADDR");
+ // Start JobResult consumer in background
+ let state_clone = state.clone();
+ tokio::spawn(async move {
+ if let Err(e) = consume_job_results(state_clone).await {
+ tracing::error!(error = %e, "job result consumer exited");
+ }
+ });
+
axum::serve(
tokio::net::TcpListener::bind(addr).await.expect("bind"),
router,
@@ -148,6 +209,278 @@ async fn main() -> Result<()> {
Ok(())
}
+async fn post_commit_status(
+ base: &str,
+ token: &str,
+ repo_url: &str,
+ sha: &str,
+ context: &str,
+ state: &str,
+ target_url: Option<&str>,
+ description: Option<&str>,
+) -> Result<()> {
+ // Extract owner/repo from repo_url (supports https://.../owner/repo.git and ssh://git@host/owner/repo.git)
+ let (owner, repo) = parse_owner_repo(repo_url).ok_or_else(|| miette::miette!("cannot parse owner/repo from repo_url: {repo_url}"))?;
+ let api = format!("{}/repos/{}/{}/statuses/{}", base.trim_end_matches('/'), owner, repo, sha);
+ let mut body = serde_json::json!({
+ "state": state,
+ "context": context,
+ });
+ if let Some(u) = target_url { body["target_url"] = serde_json::Value::String(u.to_string()); }
+ if let Some(d) = description { body["description"] = serde_json::Value::String(d.to_string()); }
+ let client = reqwest::Client::new();
+ let resp = client.post(&api)
+ .bearer_auth(token)
+ .json(&body)
+ .send()
+ .await
+ .into_diagnostic()?;
+ if !resp.status().is_success() {
+ let status = resp.status();
+ let text = resp.text().await.unwrap_or_default();
+ tracing::warn!(status = ?status, body = %text, "forgejo status post failed");
+ }
+ Ok(())
+}
+
+async fn consume_job_results(state: Arc) -> Result<()> {
+ // Only start if we have at least Forgejo base+token to post statuses
+ if state.forgejo_base.is_none() || state.forgejo_token.is_none() {
+ warn!("FORGEJO_* not set; job result consumer disabled");
+ return Ok(());
+ }
+ let url = state.mq_cfg.url.clone();
+ let exchange = state.mq_cfg.exchange.clone();
+ let conn = lapin::Connection::connect(&url, lapin::ConnectionProperties::default())
+ .await
+ .into_diagnostic()?;
+ let channel = conn.create_channel().await.into_diagnostic()?;
+
+ // Ensure exchange exists (direct)
+ channel
+ .exchange_declare(
+ &exchange,
+ lapin::ExchangeKind::Direct,
+ lapin::options::ExchangeDeclareOptions {
+ durable: true,
+ auto_delete: false,
+ internal: false,
+ nowait: false,
+ passive: false,
+ },
+ lapin::types::FieldTable::default(),
+ )
+ .await
+ .into_diagnostic()?;
+
+ // Declare results queue and bind to routing key jobresult.v1
+ let results_queue = std::env::var("RESULTS_QUEUE").unwrap_or_else(|_| "solstice.results.v1".into());
+ channel
+ .queue_declare(
+ &results_queue,
+ lapin::options::QueueDeclareOptions { durable: true, auto_delete: false, exclusive: false, nowait: false, passive: false },
+ lapin::types::FieldTable::default(),
+ )
+ .await
+ .into_diagnostic()?;
+ channel
+ .queue_bind(
+ &results_queue,
+ &exchange,
+ "jobresult.v1",
+ lapin::options::QueueBindOptions { nowait: false },
+ lapin::types::FieldTable::default(),
+ )
+ .await
+ .into_diagnostic()?;
+
+ channel
+ .basic_qos(16, lapin::options::BasicQosOptions { global: false })
+ .await
+ .into_diagnostic()?;
+
+ let mut consumer = channel
+ .basic_consume(
+ &results_queue,
+ "forge-integration",
+ lapin::options::BasicConsumeOptions { no_ack: false, ..Default::default() },
+ lapin::types::FieldTable::default(),
+ )
+ .await
+ .into_diagnostic()?;
+
+ info!(queue = %results_queue, "job results consumer started");
+
+ while let Some(delivery) = consumer.next().await {
+ match delivery {
+ Ok(d) => {
+ let tag = d.delivery_tag;
+ let res: Result = serde_json::from_slice(&d.data).into_diagnostic();
+ match res {
+ Ok(jobres) => {
+ if let Err(e) = handle_job_result(&state, &jobres).await {
+ warn!(error = %e, request_id = %jobres.request_id, "failed to handle JobResult; acking to avoid loops");
+ }
+ channel
+ .basic_ack(tag, lapin::options::BasicAckOptions { multiple: false })
+ .await
+ .into_diagnostic()?;
+ }
+ Err(e) => {
+ warn!(error = %e, "failed to parse JobResult; acking");
+ channel
+ .basic_ack(tag, lapin::options::BasicAckOptions { multiple: false })
+ .await
+ .into_diagnostic()?;
+ }
+ }
+ }
+ Err(e) => {
+ warn!(error = %e, "consumer error; sleeping");
+ tokio::time::sleep(std::time::Duration::from_millis(200)).await;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+async fn handle_job_result(state: &AppState, jobres: &common::messages::JobResult) -> Result<()> {
+ // Fetch logs
+ let mut log_text: Option = None;
+ if let Some(base) = state.orch_http_base.as_ref() {
+ let url = format!("{}/jobs/{}/logs", base.trim_end_matches('/'), jobres.request_id);
+ let resp = reqwest::Client::new().get(&url).send().await.into_diagnostic()?;
+ if resp.status().is_success() {
+ let txt = resp.text().await.into_diagnostic()?;
+ log_text = Some(txt);
+ } else {
+ warn!(status = ?resp.status(), "failed to fetch logs from orchestrator HTTP");
+ }
+ }
+
+ // Upload to S3 if configured and we have logs
+ let mut target_url: Option = None;
+ if let (Some(endpoint), Some(bucket), Some(text)) = (state.s3_endpoint.as_ref(), state.s3_bucket.as_ref(), log_text.as_ref()) {
+ if let Ok(url) = upload_to_s3(endpoint, bucket, &format!("logs/{}/{}.txt", jobres.repo_url.replace(':', "/").replace('/', "_"), jobres.request_id), text.as_bytes()).await {
+ target_url = Some(url);
+ }
+ }
+ // Fallback to orchestrator log URL if upload not done
+ if target_url.is_none() {
+ if let Some(base) = state.orch_http_base.as_ref() {
+ target_url = Some(format!("{}/jobs/{}/logs", base.trim_end_matches('/'), jobres.request_id));
+ }
+ }
+
+ // Post final status to Forgejo
+ let state_str = if jobres.success { "success" } else { "failure" };
+ if let (Some(base), Some(token)) = (state.forgejo_base.as_ref(), state.forgejo_token.as_ref()) {
+ let desc = if jobres.success { Some("Job succeeded") } else { Some("Job failed") };
+ let _ = post_commit_status(
+ base,
+ token,
+ &jobres.repo_url,
+ &jobres.commit_sha,
+ &state.forge_context,
+ state_str,
+ target_url.as_deref(),
+ desc,
+ )
+ .await;
+ }
+
+ Ok(())
+}
+
+async fn upload_to_s3(endpoint: &str, bucket: &str, key: &str, bytes: &[u8]) -> Result {
+ let loader = aws_config::defaults(aws_config::BehaviorVersion::latest()).load().await;
+ // Override endpoint and enforce path-style
+ let conf = aws_sdk_s3::config::Builder::from(&loader)
+ .endpoint_url(endpoint)
+ .force_path_style(true)
+ .build();
+ let client = aws_sdk_s3::Client::from_conf(conf);
+ client
+ .put_object()
+ .bucket(bucket)
+ .key(key)
+ .body(ByteStream::from(bytes.to_vec()))
+ .content_type("text/plain; charset=utf-8")
+ .send()
+ .await
+ .into_diagnostic()?;
+ // Construct path-style URL
+ let url = format!("{}/{}/{}", endpoint.trim_end_matches('/'), bucket, key);
+ Ok(url)
+}
+
+fn parse_owner_repo(repo_url: &str) -> Option<(String, String)> {
+ // Strip .git
+ let url = repo_url.trim_end_matches(".git");
+ if let Some(rest) = url.strip_prefix("https://").or_else(|| url.strip_prefix("http://")) {
+ let parts: Vec<&str> = rest.split('/').collect();
+ if parts.len() >= 3 { return Some((parts[1].to_string(), parts[2].to_string())); }
+ } else if let Some(rest) = url.strip_prefix("ssh://") {
+ // ssh://git@host/owner/repo
+ let after_host = rest.splitn(2, '/').nth(1)?;
+ let parts: Vec<&str> = after_host.split('/').collect();
+ if parts.len() >= 2 { return Some((parts[0].to_string(), parts[1].to_string())); }
+ } else if let Some(idx) = url.find(':') {
+ // git@host:owner/repo
+ let after = &url[idx+1..];
+ let parts: Vec<&str> = after.split('/').collect();
+ if parts.len() >= 2 { return Some((parts[0].to_string(), parts[1].to_string())); }
+ }
+ None
+}
+
+fn parse_runs_on_map(s: &str) -> std::collections::HashMap {
+ let mut map = std::collections::HashMap::new();
+ for part in s.split(',') {
+ let p = part.trim();
+ if p.is_empty() { continue; }
+ if let Some((k, v)) = p.split_once('=') {
+ let key = k.trim().to_string();
+ let val = v.trim().to_string();
+ if !key.is_empty() && !val.is_empty() {
+ map.insert(key, val);
+ }
+ }
+ }
+ map
+}
+
+fn infer_runs_on(state: &AppState, repo_url: &str, labels: Option<&[String]>) -> Option {
+ // 1) Per-repo override map
+ if let Some((owner, repo)) = parse_owner_repo(repo_url) {
+ let key = format!("{}/{}", owner, repo);
+ if let Some(v) = state.runs_on_map.get(&key) {
+ return Some(v.clone());
+ }
+ }
+ // 2) From PR labels
+ if let Some(ls) = labels {
+ for name in ls {
+ let n = name.trim();
+ let lower = n.to_ascii_lowercase();
+ // patterns: "runs-on: label", "runs-on=label", or "runs-on-label"
+ if let Some(rest) = lower.strip_prefix("runs-on:") {
+ let label = rest.trim();
+ if !label.is_empty() { return Some(label.to_string()); }
+ } else if let Some(rest) = lower.strip_prefix("runs-on=") {
+ let label = rest.trim();
+ if !label.is_empty() { return Some(label.to_string()); }
+ } else if let Some(rest) = lower.strip_prefix("runs-on-") {
+ let label = rest.trim();
+ if !label.is_empty() { return Some(label.to_string()); }
+ }
+ }
+ }
+ // 3) Default
+ state.runs_on_default.clone()
+}
+
async fn handle_webhook(
State(state): State>,
headers: HeaderMap,
@@ -233,8 +566,27 @@ async fn handle_push(state: Arc, body: Bytes) -> StatusCode {
let repo_url = pick_repo_url(&payload.repository);
let sha = payload.after;
- match enqueue_job(&state, repo_url, sha).await {
- Ok(_) => StatusCode::ACCEPTED,
+ match enqueue_job(&state, repo_url.clone(), sha.clone(), None).await {
+ Ok(jr) => {
+ if let (Some(base), Some(token), Some(orch)) = (
+ state.forgejo_base.as_ref(),
+ state.forgejo_token.as_ref(),
+ state.orch_http_base.as_ref(),
+ ) {
+ let _ = post_commit_status(
+ base,
+ token,
+ &jr.repo_url,
+ &jr.commit_sha,
+ &state.forge_context,
+ "pending",
+ Some(&format!("{}/jobs/{}/logs", orch.trim_end_matches('/'), jr.request_id)),
+ Some("Solstice job queued"),
+ )
+ .await;
+ }
+ StatusCode::ACCEPTED
+ }
Err(e) => {
error!(error = %e, "failed to publish job");
StatusCode::INTERNAL_SERVER_ERROR
@@ -256,9 +608,14 @@ struct PrHead {
repo: PrRepoInfo,
}
+#[derive(Debug, Deserialize)]
+struct Label { name: String }
+
#[derive(Debug, Deserialize)]
struct PullRequest {
head: PrHead,
+ #[serde(default)]
+ labels: Vec