2026-04-07 00:13:54 +02:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
use std::time::Duration;
|
|
|
|
|
|
|
|
|
|
use tracing::{debug, warn};
|
|
|
|
|
use uuid::Uuid;
|
|
|
|
|
|
|
|
|
|
use crate::connect::ConnectClient;
|
|
|
|
|
use crate::proto::runner::v1::{LogRow, UpdateLogRequest};
|
|
|
|
|
use crate::state::RunnerState;
|
|
|
|
|
|
|
|
|
|
const POLL_INTERVAL: Duration = Duration::from_secs(3);
|
|
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
/// Log category summary from logs-service.
|
|
|
|
|
#[derive(serde::Deserialize)]
|
|
|
|
|
struct LogCategorySummary {
|
|
|
|
|
category: String,
|
|
|
|
|
count: i64,
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 00:13:54 +02:00
|
|
|
/// Streams logs from logs-service to Forgejo while a job is in-flight.
|
2026-04-07 00:23:00 +02:00
|
|
|
/// Returns the final log index so the reporter knows where we left off.
|
2026-04-07 00:13:54 +02:00
|
|
|
pub async fn stream_logs(
|
|
|
|
|
client: Arc<ConnectClient>,
|
|
|
|
|
state: Arc<RunnerState>,
|
|
|
|
|
request_id: Uuid,
|
|
|
|
|
task_id: i64,
|
|
|
|
|
logs_base: String,
|
|
|
|
|
mut stop: tokio::sync::watch::Receiver<bool>,
|
|
|
|
|
) -> i64 {
|
|
|
|
|
let http = reqwest::Client::new();
|
2026-04-07 00:23:00 +02:00
|
|
|
// Track how many lines we've sent per category to only send new ones.
|
|
|
|
|
let mut sent_per_category: std::collections::HashMap<String, usize> = Default::default();
|
2026-04-07 00:13:54 +02:00
|
|
|
let mut log_index: i64 = 0;
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
if *stop.borrow() {
|
2026-04-07 00:23:00 +02:00
|
|
|
// Final flush
|
2026-04-07 00:13:54 +02:00
|
|
|
log_index = poll_and_send(
|
|
|
|
|
&client,
|
|
|
|
|
&state,
|
|
|
|
|
&http,
|
|
|
|
|
&logs_base,
|
|
|
|
|
request_id,
|
|
|
|
|
task_id,
|
|
|
|
|
log_index,
|
2026-04-07 00:23:00 +02:00
|
|
|
&mut sent_per_category,
|
2026-04-07 00:13:54 +02:00
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log_index = poll_and_send(
|
|
|
|
|
&client,
|
|
|
|
|
&state,
|
|
|
|
|
&http,
|
|
|
|
|
&logs_base,
|
|
|
|
|
request_id,
|
|
|
|
|
task_id,
|
|
|
|
|
log_index,
|
2026-04-07 00:23:00 +02:00
|
|
|
&mut sent_per_category,
|
2026-04-07 00:13:54 +02:00
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
tokio::select! {
|
|
|
|
|
_ = tokio::time::sleep(POLL_INTERVAL) => {}
|
|
|
|
|
_ = stop.changed() => {
|
2026-04-07 00:23:00 +02:00
|
|
|
// Final flush
|
2026-04-07 00:13:54 +02:00
|
|
|
log_index = poll_and_send(
|
|
|
|
|
&client, &state, &http, &logs_base,
|
2026-04-07 00:23:00 +02:00
|
|
|
request_id, task_id, log_index,
|
|
|
|
|
&mut sent_per_category,
|
2026-04-07 00:13:54 +02:00
|
|
|
).await;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
debug!(task_id, log_index, "log streamer stopped");
|
|
|
|
|
log_index
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn poll_and_send(
|
|
|
|
|
client: &ConnectClient,
|
|
|
|
|
state: &RunnerState,
|
|
|
|
|
http: &reqwest::Client,
|
|
|
|
|
logs_base: &str,
|
|
|
|
|
request_id: Uuid,
|
|
|
|
|
task_id: i64,
|
|
|
|
|
current_index: i64,
|
2026-04-07 00:23:00 +02:00
|
|
|
sent_per_category: &mut std::collections::HashMap<String, usize>,
|
2026-04-07 00:13:54 +02:00
|
|
|
) -> i64 {
|
|
|
|
|
let categories_url = format!(
|
|
|
|
|
"{}/jobs/{}/logs",
|
|
|
|
|
logs_base.trim_end_matches('/'),
|
|
|
|
|
request_id
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let categories = match http.get(&categories_url).send().await {
|
|
|
|
|
Ok(resp) if resp.status().is_success() => resp
|
|
|
|
|
.json::<Vec<LogCategorySummary>>()
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or_default(),
|
|
|
|
|
_ => return current_index,
|
|
|
|
|
};
|
|
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
// Check if there are any new lines at all
|
|
|
|
|
let has_new = categories.iter().any(|c| {
|
|
|
|
|
let prev = sent_per_category.get(&c.category).copied().unwrap_or(0);
|
|
|
|
|
c.count as usize > prev
|
|
|
|
|
});
|
|
|
|
|
if !has_new {
|
|
|
|
|
return current_index;
|
2026-04-07 00:13:54 +02:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
let mut log_index = current_index;
|
|
|
|
|
|
2026-04-07 00:13:54 +02:00
|
|
|
for cat in &categories {
|
2026-04-07 00:23:00 +02:00
|
|
|
let prev_sent = sent_per_category.get(&cat.category).copied().unwrap_or(0);
|
|
|
|
|
if (cat.count as usize) <= prev_sent {
|
|
|
|
|
continue; // No new lines in this category
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fetch all lines for this category
|
2026-04-07 00:13:54 +02:00
|
|
|
let url = format!(
|
|
|
|
|
"{}/jobs/{}/logs/{}",
|
|
|
|
|
logs_base.trim_end_matches('/'),
|
|
|
|
|
request_id,
|
|
|
|
|
cat.category
|
|
|
|
|
);
|
2026-04-07 00:23:00 +02:00
|
|
|
|
|
|
|
|
let text = match http.get(&url).send().await {
|
|
|
|
|
Ok(resp) if resp.status().is_success() => match resp.text().await {
|
|
|
|
|
Ok(t) => t,
|
|
|
|
|
Err(_) => continue,
|
|
|
|
|
},
|
|
|
|
|
_ => continue,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let all_lines: Vec<&str> = text.lines().collect();
|
|
|
|
|
if all_lines.len() <= prev_sent {
|
|
|
|
|
continue;
|
2026-04-07 00:13:54 +02:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
// Only send lines we haven't sent yet
|
|
|
|
|
let new_lines = &all_lines[prev_sent..];
|
2026-04-07 00:13:54 +02:00
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
let now = prost_types::Timestamp {
|
|
|
|
|
seconds: time::OffsetDateTime::now_utc().unix_timestamp(),
|
|
|
|
|
nanos: 0,
|
|
|
|
|
};
|
2026-04-07 00:13:54 +02:00
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
let rows: Vec<LogRow> = new_lines
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|line| LogRow {
|
|
|
|
|
time: Some(now.clone()),
|
|
|
|
|
content: line.to_string(),
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2026-04-07 00:13:54 +02:00
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
let count = rows.len() as i64;
|
2026-04-07 00:13:54 +02:00
|
|
|
|
2026-04-07 00:23:00 +02:00
|
|
|
let req = UpdateLogRequest {
|
|
|
|
|
task_id,
|
|
|
|
|
index: log_index,
|
|
|
|
|
rows,
|
|
|
|
|
no_more: false,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match client
|
|
|
|
|
.update_log(&req, &state.identity.uuid, &state.identity.token)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(resp) => {
|
|
|
|
|
debug!(
|
|
|
|
|
task_id,
|
|
|
|
|
category = %cat.category,
|
|
|
|
|
new_lines = count,
|
|
|
|
|
ack_index = resp.ack_index,
|
|
|
|
|
"streamed logs"
|
|
|
|
|
);
|
|
|
|
|
log_index = resp.ack_index;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!(error = %e, task_id, category = %cat.category, "failed to stream logs");
|
|
|
|
|
log_index += count;
|
|
|
|
|
}
|
2026-04-07 00:13:54 +02:00
|
|
|
}
|
2026-04-07 00:23:00 +02:00
|
|
|
|
|
|
|
|
sent_per_category.insert(cat.category.clone(), all_lines.len());
|
2026-04-07 00:13:54 +02:00
|
|
|
}
|
2026-04-07 00:23:00 +02:00
|
|
|
|
|
|
|
|
log_index
|
2026-04-07 00:13:54 +02:00
|
|
|
}
|