diff --git a/.idea/solstice-ci.iml b/.idea/solstice-ci.iml
index bdfeb09..928b227 100644
--- a/.idea/solstice-ci.iml
+++ b/.idea/solstice-ci.iml
@@ -11,6 +11,7 @@
+
diff --git a/Cargo.toml b/Cargo.toml
index 8f7efb2..425f993 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,3 +1,6 @@
[workspace]
members = ["crates/*"]
-resolver = "3"
\ No newline at end of file
+resolver = "3"
+
+[patch.crates-io]
+# Ensure single sea-orm version resolution across crates if needed
\ No newline at end of file
diff --git a/crates/common/src/telemetry.rs b/crates/common/src/telemetry.rs
index 1ae6805..e393cf7 100644
--- a/crates/common/src/telemetry.rs
+++ b/crates/common/src/telemetry.rs
@@ -13,7 +13,8 @@ pub fn init_tracing(_service_name: &str) -> miette::Result {
let fmt_layer = fmt::layer()
.with_target(false)
.with_writer(nb_writer)
- .with_ansi(atty::is(atty::Stream::Stderr));
+ // Force-disable ANSI to keep logs plain for serial capture and gRPC forwarding
+ .with_ansi(false);
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
diff --git a/crates/forge-integration/src/main.rs b/crates/forge-integration/src/main.rs
index 9c05703..627f1e7 100644
--- a/crates/forge-integration/src/main.rs
+++ b/crates/forge-integration/src/main.rs
@@ -83,10 +83,14 @@ struct Opts {
#[arg(long, env = "FORGE_CONTEXT", default_value = "solstice/ci")]
forge_context: String,
- /// Orchestrator HTTP base for logs (e.g., http://localhost:8081)
- #[arg(long, env = "ORCH_HTTP_BASE")]
+ /// Orchestrator HTTP base for logs (deprecated; use LOGS_BASE_URL)
+ #[arg(long, env = "ORCH_HTTP_BASE")] // Deprecated
orch_http_base: Option,
+ /// Logs service base URL (e.g., http://logs.local:8082)
+ #[arg(long, env = "LOGS_BASE_URL")]
+ logs_base_url: Option,
+
/// S3-compatible endpoint for Garage/MinIO (e.g., http://localhost:9000)
#[arg(long, env = "S3_ENDPOINT")]
s3_endpoint: Option,
@@ -112,7 +116,8 @@ struct AppState {
forgejo_base: Option,
forgejo_token: Option,
forge_context: String,
- orch_http_base: Option,
+ orch_http_base: Option, // deprecated
+ logs_base_url: Option,
s3_endpoint: Option,
s3_bucket: Option,
runs_on_default: Option,
@@ -179,6 +184,7 @@ async fn main() -> Result<()> {
forgejo_token: opts.forgejo_token,
forge_context: opts.forge_context,
orch_http_base: opts.orch_http_base,
+ logs_base_url: opts.logs_base_url,
s3_endpoint: opts.s3_endpoint,
s3_bucket: opts.s3_bucket,
runs_on_default: opts.runs_on_default,
diff --git a/crates/logs-service/Cargo.toml b/crates/logs-service/Cargo.toml
new file mode 100644
index 0000000..7b6ae46
--- /dev/null
+++ b/crates/logs-service/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "logs-service"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+common = { path = "../common" }
+clap = { version = "4", features = ["derive", "env"] }
+axum = { version = "0.8", features = ["macros"] }
+serde = { version = "1", features = ["derive"] }
+serde_json = "1"
+miette = { version = "7", features = ["fancy"] }
+tracing = "0.1"
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "signal", "net"] }
+sea-orm = { version = "1.1.17", default-features = false, features = ["sqlx-postgres", "sqlx-sqlite", "runtime-tokio-rustls", "macros", "with-uuid", "with-chrono" ] }
+sea-orm-migration = { version = "1.1.17" }
+migration = { path = "../migration" }
+uuid = { version = "1", features = ["v4", "serde"] }
+chrono = { version = "0.4", default-features = false, features = ["clock", "std", "serde"] }
diff --git a/crates/logs-service/src/main.rs b/crates/logs-service/src/main.rs
new file mode 100644
index 0000000..9055f57
--- /dev/null
+++ b/crates/logs-service/src/main.rs
@@ -0,0 +1,143 @@
+use axum::{extract::Path, http::StatusCode, response::{IntoResponse, Response}, routing::get, Json, Router};
+use clap::Parser;
+use miette::{IntoDiagnostic as _, Result};
+use sea_orm::{entity::prelude::*, Database, DatabaseConnection, QueryOrder, ColumnTrait, QueryFilter, Statement, DatabaseBackend, Value};
+use sea_orm_migration::MigratorTrait;
+use serde::Serialize;
+use std::net::SocketAddr;
+use tracing::{info, warn};
+use uuid::Uuid;
+
+#[derive(Parser, Debug)]
+#[command(name = "solstice-logs", version, about = "Solstice CI — Logs Service")]
+struct Opts {
+ /// HTTP bind address
+ #[arg(long, env = "HTTP_ADDR", default_value = "0.0.0.0:8082")]
+ http_addr: String,
+
+ /// Database URL
+ #[arg(long, env = "DATABASE_URL")]
+ database_url: String,
+
+ /// OTLP endpoint (e.g., http://localhost:4317)
+ #[arg(long, env = "OTEL_EXPORTER_OTLP_ENDPOINT")]
+ otlp: Option,
+}
+
+#[derive(Clone)]
+struct AppState { db: DatabaseConnection }
+
+#[tokio::main(flavor = "multi_thread")]
+async fn main() -> Result<()> {
+ let _t = common::init_tracing("solstice-logs-service")?;
+ let opts = Opts::parse();
+ let db = Database::connect(opts.database_url).await.into_diagnostic()?;
+ migration::Migrator::up(&db, None).await.into_diagnostic()?;
+
+ let state = AppState { db };
+ let router = Router::new()
+ .route("/jobs/{request_id}/logs", get(list_logs))
+ .route("/jobs/{request_id}/logs/{category}", get(get_logs_by_category))
+ .with_state(state);
+
+ let addr: SocketAddr = opts.http_addr.parse().expect("invalid HTTP_ADDR");
+ info!(%addr, "logs-service starting");
+ axum::serve(
+ tokio::net::TcpListener::bind(addr).await.expect("bind"),
+ router,
+ )
+ .await
+ .into_diagnostic()
+}
+
+mod job_logs {
+ use super::*;
+ #[derive(Clone, Debug, PartialEq, DeriveEntityModel)]
+ #[sea_orm(table_name = "job_logs")]
+ pub struct Model {
+ #[sea_orm(primary_key, auto_increment = false)]
+ pub request_id: Uuid,
+ #[sea_orm(primary_key, auto_increment = false)]
+ pub seq: i64,
+ pub ts: chrono::DateTime,
+ pub stderr: bool,
+ pub line: String,
+ pub category: String,
+ pub level: Option,
+ pub fields: Option,
+ pub has_error: bool,
+ }
+ #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
+ pub enum Relation {}
+ impl ActiveModelBehavior for ActiveModel {}
+}
+
+#[derive(Serialize)]
+struct LogCategorySummary {
+ category: String,
+ count: i64,
+ has_errors: bool,
+ first_ts: chrono::DateTime,
+ last_ts: chrono::DateTime,
+}
+
+async fn list_logs(Path(request_id): Path, axum::extract::State(state): axum::extract::State) -> Response {
+ let Ok(id) = Uuid::parse_str(&request_id) else { return StatusCode::BAD_REQUEST.into_response(); };
+ // Query per-category summaries using backend-agnostic SQL + parameter binding
+ let backend = state.db.get_database_backend();
+ let (sql, vals): (&str, Vec) = match backend {
+ DatabaseBackend::Postgres => (
+ "SELECT category AS category, COUNT(*) AS count, MIN(ts) AS first_ts, MAX(ts) AS last_ts, MAX(has_error) AS has_errors FROM job_logs WHERE request_id = $1 GROUP BY category ORDER BY category",
+ vec![Value::Uuid(Some(Box::new(id)))]
+ ),
+ _ => (
+ "SELECT category AS category, COUNT(*) AS count, MIN(ts) AS first_ts, MAX(ts) AS last_ts, MAX(has_error) AS has_errors FROM job_logs WHERE request_id = ? GROUP BY category ORDER BY category",
+ vec![Value::Uuid(Some(Box::new(id)))]
+ ),
+ };
+ let stmt = Statement::from_sql_and_values(backend, sql, vals);
+ let rows = match state.db.query_all(stmt).await.into_diagnostic() {
+ Ok(r) => r,
+ Err(e) => { warn!(error = %e, request_id = %id, "failed to query log categories"); return StatusCode::INTERNAL_SERVER_ERROR.into_response(); }
+ };
+ let mut out: Vec = Vec::new();
+ for row in rows {
+ let category: String = row.try_get_by("category").unwrap_or_else(|_| "default".into());
+ let count: i64 = row.try_get_by("count").unwrap_or(0);
+ let first_ts: chrono::DateTime = row.try_get_by("first_ts").unwrap_or_else(|_| chrono::Utc::now());
+ let last_ts: chrono::DateTime = row.try_get_by("last_ts").unwrap_or_else(|_| chrono::Utc::now());
+ let has_errors: bool = row.try_get_by("has_errors").unwrap_or(false);
+ out.push(LogCategorySummary { category, count, has_errors, first_ts, last_ts });
+ }
+ Json(out).into_response()
+}
+
+async fn get_logs_by_category(Path((request_id, category)): Path<(String, String)>, axum::extract::State(state): axum::extract::State) -> Response {
+ let Ok(id) = Uuid::parse_str(&request_id) else { return StatusCode::BAD_REQUEST.into_response(); };
+ let rows = job_logs::Entity::find()
+ .filter(job_logs::Column::RequestId.eq(id))
+ .filter(job_logs::Column::Category.eq(category.clone()))
+ .order_by_asc(job_logs::Column::Seq)
+ .all(&state.db)
+ .await
+ .into_diagnostic();
+ match rows {
+ Ok(items) if items.is_empty() => StatusCode::NOT_FOUND.into_response(),
+ Ok(items) => {
+ let mut text = String::new();
+ for r in items {
+ if r.stderr || r.has_error || r.level.as_deref() == Some("error") {
+ text.push_str("[stderr] ");
+ }
+ text.push_str(&r.line);
+ if !text.ends_with('\n') { text.push('\n'); }
+ }
+ (
+ StatusCode::OK,
+ [(axum::http::header::CONTENT_TYPE, "text/plain; charset=utf-8")],
+ text,
+ ).into_response()
+ }
+ Err(e) => { warn!(error = %e, request_id = %id, "failed to read logs"); StatusCode::INTERNAL_SERVER_ERROR.into_response() }
+ }
+}
diff --git a/crates/migration/src/lib.rs b/crates/migration/src/lib.rs
index 27bd15e..620651c 100644
--- a/crates/migration/src/lib.rs
+++ b/crates/migration/src/lib.rs
@@ -10,6 +10,7 @@ impl MigratorTrait for Migrator {
Box::new(m2025_10_25_000002_create_vms::Migration),
Box::new(m2025_11_02_000003_create_job_logs::Migration),
Box::new(m2025_11_15_000004_create_job_ssh_keys::Migration),
+ Box::new(m2025_11_18_000005_alter_job_logs_add_category_fields::Migration),
]
}
}
@@ -258,3 +259,81 @@ mod m2025_11_15_000004_create_job_ssh_keys {
}
}
}
+
+
+mod m2025_11_18_000005_alter_job_logs_add_category_fields {
+ use super::*;
+
+ pub struct Migration;
+
+ impl sea_orm_migration::prelude::MigrationName for Migration {
+ fn name(&self) -> &str {
+ "m2025_11_18_000005_alter_job_logs_add_category_fields"
+ }
+ }
+
+ #[derive(Iden)]
+ enum JobLogs {
+ Table,
+ RequestId,
+ Seq,
+ Ts,
+ Stderr,
+ Line,
+ Category,
+ Level,
+ Fields,
+ HasError,
+ }
+
+ #[async_trait::async_trait]
+ impl MigrationTrait for Migration {
+ async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
+ // Add new columns if they don't exist
+ manager
+ .alter_table(
+ Table::alter()
+ .table(JobLogs::Table)
+ .add_column_if_not_exists(ColumnDef::new(JobLogs::Category).string().not_null().default("default"))
+ .add_column_if_not_exists(ColumnDef::new(JobLogs::Level).string().null())
+ .add_column_if_not_exists(ColumnDef::new(JobLogs::Fields).text().null())
+ .add_column_if_not_exists(ColumnDef::new(JobLogs::HasError).boolean().not_null().default(false))
+ .to_owned(),
+ )
+ .await?;
+
+ // Composite index to speed fetching per category
+ manager
+ .create_index(
+ Index::create()
+ .name("idx_job_logs_req_cat_seq")
+ .table(JobLogs::Table)
+ .col(JobLogs::RequestId)
+ .col(JobLogs::Category)
+ .col(JobLogs::Seq)
+ .to_owned(),
+ )
+ .await
+ }
+
+ async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
+ // Drop the composite index
+ manager
+ .drop_index(Index::drop().name("idx_job_logs_req_cat_seq").table(JobLogs::Table).to_owned())
+ .await?;
+
+ // Drop added columns
+ manager
+ .alter_table(
+ Table::alter()
+ .table(JobLogs::Table)
+ .drop_column(JobLogs::Category)
+ .drop_column(JobLogs::Level)
+ .drop_column(JobLogs::Fields)
+ .drop_column(JobLogs::HasError)
+ .to_owned(),
+ )
+ .await
+ }
+ }
+}
diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml
index ee5d988..e7513e9 100644
--- a/crates/orchestrator/Cargo.toml
+++ b/crates/orchestrator/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "orchestrator"
-version = "0.1.11"
+version = "0.1.13"
edition = "2024"
build = "build.rs"
diff --git a/crates/orchestrator/src/http.rs b/crates/orchestrator/src/http.rs
index 16ffc02..c5581ca 100644
--- a/crates/orchestrator/src/http.rs
+++ b/crates/orchestrator/src/http.rs
@@ -14,33 +14,29 @@ pub struct HttpState {
pub fn build_router(persist: Arc) -> Router {
let state = HttpState { persist };
Router::new()
- .route("/jobs/{request_id}/logs", get(get_logs))
+ .route("/jobs/{request_id}/logs", get(get_logs_moved))
.with_state(state)
}
-async fn get_logs(
+async fn get_logs_moved(
Path(request_id): Path,
- axum::extract::State(state): axum::extract::State,
+ _state: axum::extract::State,
) -> Response {
- let Ok(id) = Uuid::parse_str(&request_id) else {
- return StatusCode::BAD_REQUEST.into_response();
+ let base = std::env::var("LOGS_BASE_URL").ok();
+ let msg = if let Some(b) = base.as_ref() {
+ format!("Logs have moved: {}/jobs/{}/logs", b.trim_end_matches('/'), request_id)
+ } else {
+ "Logs endpoint moved to logs-service; set LOGS_BASE_URL to enable 302 redirects".to_string()
};
- if !state.persist.is_enabled() {
- return (StatusCode::SERVICE_UNAVAILABLE, "persistence disabled").into_response();
- }
- match state.persist.get_logs_text(id).await {
- Ok(Some(text)) => (
- StatusCode::OK,
- [(axum::http::header::CONTENT_TYPE, "text/plain; charset=utf-8")],
- text,
- )
- .into_response(),
- Ok(None) => StatusCode::NOT_FOUND.into_response(),
- Err(e) => {
- warn!(error = %e, request_id = %id, "failed to read logs");
- StatusCode::INTERNAL_SERVER_ERROR.into_response()
- }
+ if let Some(b) = base {
+ let loc = format!("{}/jobs/{}/logs", b.trim_end_matches('/'), request_id);
+ return (
+ StatusCode::MOVED_PERMANENTLY,
+ [(axum::http::header::LOCATION, loc.as_str())],
+ msg,
+ ).into_response();
}
+ (StatusCode::GONE, msg).into_response()
}
pub async fn serve(addr: SocketAddr, persist: Arc, shutdown: impl std::future::Future