use std::sync::Arc; use std::time::Duration; use tracing::{debug, warn}; use uuid::Uuid; use crate::connect::ConnectClient; use crate::proto::runner::v1::{LogRow, UpdateLogRequest}; use crate::state::RunnerState; const POLL_INTERVAL: Duration = Duration::from_secs(3); /// Streams logs from logs-service to Forgejo while a job is in-flight. /// Runs until the stop signal is received (job completed). /// Returns the final log index so the reporter can continue from there. pub async fn stream_logs( client: Arc, state: Arc, request_id: Uuid, task_id: i64, logs_base: String, mut stop: tokio::sync::watch::Receiver, ) -> i64 { let http = reqwest::Client::new(); let mut log_index: i64 = 0; let mut last_total: i64 = 0; loop { // Check if we should stop if *stop.borrow() { // Do one final poll to catch any remaining logs log_index = poll_and_send( &client, &state, &http, &logs_base, request_id, task_id, log_index, &mut last_total, ) .await; break; } log_index = poll_and_send( &client, &state, &http, &logs_base, request_id, task_id, log_index, &mut last_total, ) .await; // Wait for the next poll interval or stop signal tokio::select! { _ = tokio::time::sleep(POLL_INTERVAL) => {} _ = stop.changed() => { // Signal changed — do one more poll then exit log_index = poll_and_send( &client, &state, &http, &logs_base, request_id, task_id, log_index, &mut last_total, ).await; break; } } } debug!(task_id, log_index, "log streamer stopped"); log_index } /// Log category summary from logs-service. #[derive(serde::Deserialize)] struct LogCategorySummary { category: String, count: i64, } async fn poll_and_send( client: &ConnectClient, state: &RunnerState, http: &reqwest::Client, logs_base: &str, request_id: Uuid, task_id: i64, current_index: i64, last_total: &mut i64, ) -> i64 { // Get total log count across all categories let categories_url = format!( "{}/jobs/{}/logs", logs_base.trim_end_matches('/'), request_id ); let categories = match http.get(&categories_url).send().await { Ok(resp) if resp.status().is_success() => resp .json::>() .await .unwrap_or_default(), _ => return current_index, }; let new_total: i64 = categories.iter().map(|c| c.count).sum(); if new_total <= *last_total { return current_index; // No new logs } *last_total = new_total; // Fetch all logs and send new ones // We re-fetch everything but only send lines from current_index onward. // This is simple but not optimal for large logs — good enough for streaming. let mut all_lines: Vec = Vec::new(); for cat in &categories { let url = format!( "{}/jobs/{}/logs/{}", logs_base.trim_end_matches('/'), request_id, cat.category ); if let Ok(resp) = http.get(&url).send().await { if resp.status().is_success() { if let Ok(text) = resp.text().await { all_lines.push(format!("::group::{}", cat.category)); for line in text.lines() { all_lines.push(line.to_string()); } all_lines.push("::endgroup::".to_string()); } } } } let total_lines = all_lines.len() as i64; if total_lines <= current_index { return current_index; } // Send only new lines let new_lines = &all_lines[current_index as usize..]; if new_lines.is_empty() { return current_index; } let now = prost_types::Timestamp { seconds: time::OffsetDateTime::now_utc().unix_timestamp(), nanos: 0, }; let rows: Vec = new_lines .iter() .map(|line| LogRow { time: Some(now.clone()), content: line.clone(), }) .collect(); let req = UpdateLogRequest { task_id, index: current_index, rows, no_more: false, }; match client .update_log(&req, &state.identity.uuid, &state.identity.token) .await { Ok(resp) => { debug!( task_id, new_lines = new_lines.len(), ack_index = resp.ack_index, "streamed logs" ); resp.ack_index } Err(e) => { warn!(error = %e, task_id, "failed to stream logs"); current_index } } }