mirror of
https://codeberg.org/Toasterson/solstice-ci.git
synced 2026-04-10 13:20:41 +00:00
Introduce workflow.jobs support and script path overrides; bump version to 0.1.14
- Add parsing and execution support for `.solstice/workflow.kdl` with job-specific configurations, including `runs_on`, `script path`, and `workflow_job_id`. - Enable job grouping via `group_id` for cohesive workflow processing. - Update orchestrator to pass workflow-specific parameters to `cloud-init` for finer control over execution. - Refactor enqueue logic to handle multiple jobs per workflow with fallback to single job when no workflow is defined. - Enhance dependencies for workflow parsing by integrating `base64`, `regex`, and `uuid`. - Increment orchestrator version to 0.1.14 for release. Signed-off-by: Till Wegmueller <toasterson@gmail.com>
This commit is contained in:
parent
7cc6ff856b
commit
2c73c80619
6 changed files with 199 additions and 31 deletions
|
|
@ -11,6 +11,9 @@ pub struct JobRequest {
|
||||||
pub schema_version: String, // e.g., "jobrequest.v1"
|
pub schema_version: String, // e.g., "jobrequest.v1"
|
||||||
/// Unique request identifier for idempotency and tracing correlation.
|
/// Unique request identifier for idempotency and tracing correlation.
|
||||||
pub request_id: Uuid,
|
pub request_id: Uuid,
|
||||||
|
/// Optional group identifier to correlate multiple job runs for the same workflow request.
|
||||||
|
#[serde(default)]
|
||||||
|
pub group_id: Option<Uuid>,
|
||||||
/// Source system of this request (forge or manual trigger).
|
/// Source system of this request (forge or manual trigger).
|
||||||
pub source: SourceSystem,
|
pub source: SourceSystem,
|
||||||
/// Repository clone URL (SSH or HTTPS).
|
/// Repository clone URL (SSH or HTTPS).
|
||||||
|
|
@ -24,11 +27,17 @@ pub struct JobRequest {
|
||||||
/// Commit SHA to check out.
|
/// Commit SHA to check out.
|
||||||
pub commit_sha: String,
|
pub commit_sha: String,
|
||||||
/// Optional path to the workflow file within the repo (KDL).
|
/// Optional path to the workflow file within the repo (KDL).
|
||||||
|
#[serde(default)]
|
||||||
pub workflow_path: Option<String>,
|
pub workflow_path: Option<String>,
|
||||||
/// Optional specific job id from the workflow to run.
|
/// Optional specific job id from the workflow to run.
|
||||||
|
#[serde(default)]
|
||||||
pub workflow_job_id: Option<String>,
|
pub workflow_job_id: Option<String>,
|
||||||
/// Optional scheduling hint selecting a base image or host group.
|
/// Optional scheduling hint selecting a base image or host group.
|
||||||
|
#[serde(default)]
|
||||||
pub runs_on: Option<String>,
|
pub runs_on: Option<String>,
|
||||||
|
/// Optional script path within the repository to execute instead of the default .solstice/job.sh
|
||||||
|
#[serde(default)]
|
||||||
|
pub script_path: Option<String>,
|
||||||
/// Submission timestamp (UTC).
|
/// Submission timestamp (UTC).
|
||||||
pub submitted_at: OffsetDateTime,
|
pub submitted_at: OffsetDateTime,
|
||||||
}
|
}
|
||||||
|
|
@ -54,6 +63,7 @@ impl JobRequest {
|
||||||
Self {
|
Self {
|
||||||
schema_version: default_jobrequest_schema(),
|
schema_version: default_jobrequest_schema(),
|
||||||
request_id: Uuid::new_v4(),
|
request_id: Uuid::new_v4(),
|
||||||
|
group_id: None,
|
||||||
source,
|
source,
|
||||||
repo_url: repo_url.into(),
|
repo_url: repo_url.into(),
|
||||||
repo_owner: None,
|
repo_owner: None,
|
||||||
|
|
@ -62,6 +72,7 @@ impl JobRequest {
|
||||||
workflow_path: None,
|
workflow_path: None,
|
||||||
workflow_job_id: None,
|
workflow_job_id: None,
|
||||||
runs_on: None,
|
runs_on: None,
|
||||||
|
script_path: None,
|
||||||
submitted_at: OffsetDateTime::now_utc(),
|
submitted_at: OffsetDateTime::now_utc(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,3 +24,7 @@ futures-util = "0.3"
|
||||||
# S3/Garage upload
|
# S3/Garage upload
|
||||||
aws-config = { version = "1", default-features = false, features = ["behavior-version-latest", "rt-tokio"] }
|
aws-config = { version = "1", default-features = false, features = ["behavior-version-latest", "rt-tokio"] }
|
||||||
aws-sdk-s3 = { version = "1", default-features = false, features = ["rt-tokio", "rustls"] }
|
aws-sdk-s3 = { version = "1", default-features = false, features = ["rt-tokio", "rustls"] }
|
||||||
|
# Workflow parsing helpers
|
||||||
|
base64 = "0.22"
|
||||||
|
regex = "1.11"
|
||||||
|
uuid = { version = "1", features = ["v4"] }
|
||||||
|
|
|
||||||
|
|
@ -584,6 +584,8 @@ struct PushPayload {
|
||||||
repository: RepoInfo,
|
repository: RepoInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use base64::Engine;
|
||||||
|
|
||||||
async fn handle_push(state: Arc<AppState>, body: Bytes) -> StatusCode {
|
async fn handle_push(state: Arc<AppState>, body: Bytes) -> StatusCode {
|
||||||
let payload: PushPayload = match serde_json::from_slice(&body) {
|
let payload: PushPayload = match serde_json::from_slice(&body) {
|
||||||
Ok(p) => p,
|
Ok(p) => p,
|
||||||
|
|
@ -602,29 +604,31 @@ async fn handle_push(state: Arc<AppState>, body: Bytes) -> StatusCode {
|
||||||
let repo_url = pick_repo_url(&payload.repository);
|
let repo_url = pick_repo_url(&payload.repository);
|
||||||
let sha = payload.after;
|
let sha = payload.after;
|
||||||
|
|
||||||
match enqueue_job(&state, repo_url.clone(), sha.clone(), None).await {
|
match enqueue_jobs(&state, repo_url.clone(), sha.clone(), None).await {
|
||||||
Ok(jr) => {
|
Ok(jobs) => {
|
||||||
if let (Some(base), Some(token), Some(orch)) = (
|
if let (Some(base), Some(token), Some(orch)) = (
|
||||||
state.forgejo_base.as_ref(),
|
state.forgejo_base.as_ref(),
|
||||||
state.forgejo_token.as_ref(),
|
state.forgejo_token.as_ref(),
|
||||||
state.orch_http_base.as_ref(),
|
state.orch_http_base.as_ref(),
|
||||||
) {
|
) {
|
||||||
let _ = post_commit_status(
|
if let Some(first) = jobs.first() {
|
||||||
base,
|
let _ = post_commit_status(
|
||||||
token,
|
base,
|
||||||
&jr.repo_url,
|
token,
|
||||||
&jr.commit_sha,
|
&first.repo_url,
|
||||||
&state.forge_context,
|
&first.commit_sha,
|
||||||
"pending",
|
&state.forge_context,
|
||||||
Some(&format!("{}/jobs/{}/logs", orch.trim_end_matches('/'), jr.request_id)),
|
"pending",
|
||||||
Some("Solstice job queued"),
|
Some(&format!("{}/jobs/{}/logs", orch.trim_end_matches('/'), first.request_id)),
|
||||||
)
|
Some("Solstice jobs queued"),
|
||||||
.await;
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
StatusCode::ACCEPTED
|
StatusCode::ACCEPTED
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(error = %e, "failed to publish job");
|
error!(error = %e, "failed to publish jobs");
|
||||||
StatusCode::INTERNAL_SERVER_ERROR
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -685,10 +689,10 @@ async fn handle_pull_request(state: Arc<AppState>, body: Bytes) -> StatusCode {
|
||||||
.map(|l| l.name)
|
.map(|l| l.name)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
match enqueue_job(&state, repo_url, sha, Some(label_names)).await {
|
match enqueue_jobs(&state, repo_url, sha, Some(label_names)).await {
|
||||||
Ok(_) => StatusCode::ACCEPTED,
|
Ok(_) => StatusCode::ACCEPTED,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(error = %e, "failed to publish job");
|
error!(error = %e, "failed to publish jobs");
|
||||||
StatusCode::INTERNAL_SERVER_ERROR
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -710,19 +714,134 @@ fn pick_repo_url_pr(repo: &PrRepoInfo) -> String {
|
||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn enqueue_job(state: &Arc<AppState>, repo_url: String, commit_sha: String, labels: Option<Vec<String>>) -> Result<common::JobRequest> {
|
struct ParsedJob { id: String, runs_on: Option<String>, script: Option<String> }
|
||||||
|
|
||||||
|
async fn fetch_workflow_kdl(base: Option<&str>, token: Option<&str>, owner: &str, repo: &str, sha: &str) -> Result<Option<String>> {
|
||||||
|
// Try Forgejo API: GET /repos/{owner}/{repo}/contents/.solstice/workflow.kdl?ref={sha}
|
||||||
|
if let Some(base) = base {
|
||||||
|
let url = format!("{}/repos/{}/{}/contents/.solstice/workflow.kdl?ref={}", base.trim_end_matches('/'), owner, repo, sha);
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let mut req = client.get(&url);
|
||||||
|
if let Some(tok) = token { req = req.bearer_auth(tok); }
|
||||||
|
let resp = req.send().await.into_diagnostic()?;
|
||||||
|
if resp.status().is_success() {
|
||||||
|
let v: serde_json::Value = resp.json().await.into_diagnostic()?;
|
||||||
|
if let Some(enc) = v.get("encoding").and_then(|e| e.as_str()) {
|
||||||
|
if enc.eq_ignore_ascii_case("base64") {
|
||||||
|
if let Some(content) = v.get("content").and_then(|c| c.as_str()) {
|
||||||
|
let decoded = base64::engine::general_purpose::STANDARD.decode(content.replace('\n', "")).into_diagnostic()?;
|
||||||
|
let s = String::from_utf8(decoded).into_diagnostic()?;
|
||||||
|
return Ok(Some(s));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_workflow_jobs(kdl: &str) -> Vec<ParsedJob> {
|
||||||
|
// Minimal heuristic parser: find lines starting with `job id="..."` and capture runs_on and an optional `script path="..."` line inside the block.
|
||||||
|
// This is not a full KDL parser but should work for our simple workflows.
|
||||||
|
let mut out = Vec::new();
|
||||||
|
let mut lines = kdl.lines().enumerate().peekable();
|
||||||
|
while let Some((i, line)) = lines.next() {
|
||||||
|
let l = line.trim();
|
||||||
|
if l.starts_with("job ") && l.contains("id=") {
|
||||||
|
// capture id and runs_on on the same line if present
|
||||||
|
let id = capture_attr(l, "id");
|
||||||
|
let mut runs_on = capture_attr(l, "runs_on");
|
||||||
|
let mut script: Option<String> = None;
|
||||||
|
// consume block until closing '}' balancing braces
|
||||||
|
let mut depth = if l.ends_with('{') { 1 } else { 0 };
|
||||||
|
while let Some((_j, ln)) = lines.peek().cloned() {
|
||||||
|
let t = ln.trim();
|
||||||
|
if t.ends_with('{') { depth += 1; }
|
||||||
|
if t.starts_with('}') {
|
||||||
|
if depth == 0 { break; }
|
||||||
|
depth -= 1;
|
||||||
|
if depth == 0 { lines.next(); break; }
|
||||||
|
}
|
||||||
|
// within job block: look for step or script lines; allow `script path="..."` or `step name="..." run="..."`
|
||||||
|
if t.starts_with("script ") && t.contains("path=") {
|
||||||
|
if let Some(p) = capture_attr(t, "path") { script = Some(p); }
|
||||||
|
}
|
||||||
|
// Also allow runs_on within block as override
|
||||||
|
if t.contains("runs_on=") && runs_on.is_none() {
|
||||||
|
runs_on = capture_attr(t, "runs_on");
|
||||||
|
}
|
||||||
|
lines.next();
|
||||||
|
}
|
||||||
|
if let Some(id_val) = id {
|
||||||
|
out.push(ParsedJob { id: id_val, runs_on, script });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn capture_attr(line: &str, key: &str) -> Option<String> {
|
||||||
|
// Accept key="value" or key='value'
|
||||||
|
let pattern1 = format!("{}=\"", key);
|
||||||
|
if let Some(start) = line.find(&pattern1) {
|
||||||
|
let rest = &line[start + pattern1.len()..];
|
||||||
|
if let Some(end) = rest.find('"') { return Some(rest[..end].to_string()); }
|
||||||
|
}
|
||||||
|
let pattern2 = format!("{}='", key);
|
||||||
|
if let Some(start) = line.find(&pattern2) {
|
||||||
|
let rest = &line[start + pattern2.len()..];
|
||||||
|
if let Some(end) = rest.find('\'') { return Some(rest[..end].to_string()); }
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn enqueue_jobs(state: &Arc<AppState>, repo_url: String, commit_sha: String, labels: Option<Vec<String>>) -> Result<Vec<common::JobRequest>> {
|
||||||
|
use uuid::Uuid;
|
||||||
if repo_url.is_empty() {
|
if repo_url.is_empty() {
|
||||||
miette::bail!("missing repo_url in webhook payload");
|
miette::bail!("missing repo_url in webhook payload");
|
||||||
}
|
}
|
||||||
let mut jr = common::JobRequest::new(common::SourceSystem::Forgejo, repo_url, commit_sha);
|
|
||||||
|
// Base request (will be cloned per job when a workflow defines multiple jobs)
|
||||||
|
let mut base = common::JobRequest::new(common::SourceSystem::Forgejo, repo_url, commit_sha);
|
||||||
// Try to populate repo_owner/repo_name from URL for accurate status routing
|
// Try to populate repo_owner/repo_name from URL for accurate status routing
|
||||||
if let Some((owner, name)) = parse_owner_repo(&jr.repo_url) {
|
if let Some((owner, name)) = parse_owner_repo(&base.repo_url) {
|
||||||
jr.repo_owner = Some(owner);
|
base.repo_owner = Some(owner);
|
||||||
jr.repo_name = Some(name);
|
base.repo_name = Some(name);
|
||||||
}
|
}
|
||||||
// Infer runs_on from repo map, labels, or default
|
|
||||||
jr.runs_on = infer_runs_on(state, &jr.repo_url, labels.as_ref().map(|v| v.as_slice()));
|
// Attempt to fetch and parse .solstice/workflow.kdl at the commit
|
||||||
common::publish_job(&state.mq_cfg, &jr).await?;
|
let mut published: Vec<common::JobRequest> = Vec::new();
|
||||||
info!(request_id = %jr.request_id, repo = %jr.repo_url, sha = %jr.commit_sha, runs_on = ?jr.runs_on, "enqueued job from webhook");
|
if let (Some(owner), Some(repo)) = (base.repo_owner.clone(), base.repo_name.clone()) {
|
||||||
Ok(jr)
|
if let Ok(Some(kdl)) = fetch_workflow_kdl(state.forgejo_base.as_deref(), state.forgejo_token.as_deref(), &owner, &repo, &base.commit_sha).await {
|
||||||
|
let jobs = parse_workflow_jobs(&kdl);
|
||||||
|
if !jobs.is_empty() {
|
||||||
|
let gid = Uuid::new_v4();
|
||||||
|
for pj in jobs {
|
||||||
|
let mut jr = base.clone();
|
||||||
|
jr.request_id = Uuid::new_v4(); // unique per job
|
||||||
|
jr.group_id = Some(gid);
|
||||||
|
jr.workflow_path = Some(".solstice/workflow.kdl".to_string());
|
||||||
|
jr.workflow_job_id = Some(pj.id);
|
||||||
|
// runs_on precedence: job-specific -> inferred (labels/map/default)
|
||||||
|
jr.runs_on = pj
|
||||||
|
.runs_on
|
||||||
|
.clone()
|
||||||
|
.or_else(|| infer_runs_on(state, &jr.repo_url, labels.as_ref().map(|v| v.as_slice())));
|
||||||
|
jr.script_path = pj.script.clone();
|
||||||
|
|
||||||
|
common::publish_job(&state.mq_cfg, &jr).await?;
|
||||||
|
info!(request_id = %jr.request_id, group_id = ?jr.group_id, repo = %jr.repo_url, sha = %jr.commit_sha, job = ?jr.workflow_job_id, runs_on = ?jr.runs_on, "enqueued workflow job");
|
||||||
|
published.push(jr);
|
||||||
|
}
|
||||||
|
return Ok(published);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: no workflow or no jobs parsed — enqueue a single job
|
||||||
|
base.runs_on = infer_runs_on(state, &base.repo_url, labels.as_ref().map(|v| v.as_slice()));
|
||||||
|
common::publish_job(&state.mq_cfg, &base).await?;
|
||||||
|
info!(request_id = %base.request_id, repo = %base.repo_url, sha = %base.commit_sha, runs_on = ?base.runs_on, "enqueued single job (no workflow)");
|
||||||
|
published.push(base);
|
||||||
|
Ok(published)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "orchestrator"
|
name = "orchestrator"
|
||||||
version = "0.1.13"
|
version = "0.1.14"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -348,7 +348,15 @@ async fn main() -> Result<()> {
|
||||||
disk_gb,
|
disk_gb,
|
||||||
network: None, // libvirt network handled in backend
|
network: None, // libvirt network handled in backend
|
||||||
nocloud: image.nocloud,
|
nocloud: image.nocloud,
|
||||||
user_data: Some(make_cloud_init_userdata(&job.repo_url, &job.commit_sha, job.request_id, &pubkey_text)),
|
user_data: Some(make_cloud_init_userdata(
|
||||||
|
&job.repo_url,
|
||||||
|
&job.commit_sha,
|
||||||
|
job.request_id,
|
||||||
|
&pubkey_text,
|
||||||
|
job.workflow_job_id.as_deref(),
|
||||||
|
job.script_path.as_deref(),
|
||||||
|
job.group_id,
|
||||||
|
)),
|
||||||
};
|
};
|
||||||
if !spec.nocloud {
|
if !spec.nocloud {
|
||||||
warn!(label = %label_resolved, "image is not marked nocloud=true; cloud-init may not work");
|
warn!(label = %label_resolved, "image is not marked nocloud=true; cloud-init may not work");
|
||||||
|
|
@ -427,7 +435,14 @@ fn make_cloud_init_userdata(
|
||||||
commit_sha: &str,
|
commit_sha: &str,
|
||||||
_request_id: uuid::Uuid,
|
_request_id: uuid::Uuid,
|
||||||
ssh_pubkey: &str,
|
ssh_pubkey: &str,
|
||||||
|
workflow_job_id: Option<&str>,
|
||||||
|
script_path: Option<&str>,
|
||||||
|
group_id: Option<uuid::Uuid>,
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
|
let mut extra = String::new();
|
||||||
|
if let Some(j) = workflow_job_id { extra.push_str(&format!(" workflow_job_id: {}\n", j)); }
|
||||||
|
if let Some(s) = script_path { extra.push_str(&format!(" script_path: {}\n", s)); }
|
||||||
|
if let Some(g) = group_id { extra.push_str(&format!(" group_id: {}\n", g)); }
|
||||||
let s = format!(
|
let s = format!(
|
||||||
r#"#cloud-config
|
r#"#cloud-config
|
||||||
users:
|
users:
|
||||||
|
|
@ -441,10 +456,12 @@ write_files:
|
||||||
content: |
|
content: |
|
||||||
repo_url: {repo}
|
repo_url: {repo}
|
||||||
commit_sha: {sha}
|
commit_sha: {sha}
|
||||||
|
{extra}
|
||||||
"#,
|
"#,
|
||||||
repo = repo_url,
|
repo = repo_url,
|
||||||
sha = commit_sha,
|
sha = commit_sha,
|
||||||
ssh_pubkey = ssh_pubkey.trim(),
|
ssh_pubkey = ssh_pubkey.trim(),
|
||||||
|
extra = extra,
|
||||||
);
|
);
|
||||||
s.into_bytes()
|
s.into_bytes()
|
||||||
}
|
}
|
||||||
|
|
@ -475,6 +492,9 @@ mod tests {
|
||||||
"deadbeef",
|
"deadbeef",
|
||||||
req_id,
|
req_id,
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEfakepubkey user@example",
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEfakepubkey user@example",
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
let s = String::from_utf8(data).unwrap();
|
let s = String::from_utf8(data).unwrap();
|
||||||
assert!(s.contains("#cloud-config"));
|
assert!(s.contains("#cloud-config"));
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,12 @@ struct Opts {
|
||||||
struct JobFile {
|
struct JobFile {
|
||||||
repo_url: String,
|
repo_url: String,
|
||||||
commit_sha: String,
|
commit_sha: String,
|
||||||
|
#[serde(default)]
|
||||||
|
workflow_job_id: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
script_path: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
group_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_job_file() -> Result<JobFile> {
|
async fn read_job_file() -> Result<JobFile> {
|
||||||
|
|
@ -326,8 +332,13 @@ async fn ensure_repo(repo: &str, sha: &str, workdir: &str) -> Result<()> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_job_script(workdir: &str) -> Result<i32> {
|
async fn run_job_script(workdir: &str, script_override: Option<&str>) -> Result<i32> {
|
||||||
let script = format!("{}/.solstice/job.sh", workdir);
|
// Determine the script to execute: prefer override from job.yaml, else default .solstice/job.sh
|
||||||
|
let script = if let Some(path) = script_override {
|
||||||
|
if path.starts_with('/') { path.to_string() } else { format!("{}/{}", workdir, path.trim_start_matches("./")) }
|
||||||
|
} else {
|
||||||
|
format!("{}/.solstice/job.sh", workdir)
|
||||||
|
};
|
||||||
if !fs::try_exists(&script).await.into_diagnostic()? {
|
if !fs::try_exists(&script).await.into_diagnostic()? {
|
||||||
warn!(path = %script, "job script not found");
|
warn!(path = %script, "job script not found");
|
||||||
eprintln!("{}", ndjson_line("job_run", "error", &format!("job script not found at {}", script), None));
|
eprintln!("{}", ndjson_line("job_run", "error", &format!("job script not found at {}", script), None));
|
||||||
|
|
@ -458,8 +469,11 @@ async fn main() -> Result<()> {
|
||||||
|
|
||||||
let code = match ensure_repo(&repo, &sha, &workdir).await {
|
let code = match ensure_repo(&repo, &sha, &workdir).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
// Read job.yaml to get optional script override
|
||||||
|
let jf = read_job_file().await.ok();
|
||||||
|
let script_override = jf.as_ref().and_then(|j| j.script_path.as_deref());
|
||||||
// proceed to run job script
|
// proceed to run job script
|
||||||
run_job_script(&workdir).await?
|
run_job_script(&workdir, script_override).await?
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
eprintln!("{}", ndjson_line("env_setup", "error", &format!("failed to prepare repo: {}", e), None));
|
eprintln!("{}", ndjson_line("env_setup", "error", &format!("failed to prepare repo: {}", e), None));
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue