Add builder VM support for cross-platform and unprivileged builds

Introduce the forge-builder crate that automatically delegates builds to
an ephemeral VM when the host can't build locally (e.g., QCOW2 targets
without root, or OmniOS images on Linux). The builder detects these
conditions, spins up a VM via vm-manager with user-mode networking,
uploads inputs, streams the remote build output, and retrieves artifacts.

Key changes:
- New forge-builder crate with detection, binary resolution, VM lifecycle
  management, file transfer, and miette diagnostic errors
- BuilderNode added to spec-parser schema for per-spec VM config
- --local and --use-builder CLI flags on the build command
- Feature-gated (default on) integration in forger CLI
- Fix ext4 QCOW2 grub-install failure by using absolute paths in chroot
- Improve debootstrap to pass --components and write full sources.list

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Till Wegmueller 2026-02-15 17:17:30 +01:00
parent 3cb982d35c
commit 19c8379fc6
No known key found for this signature in database
19 changed files with 2125 additions and 19 deletions

1019
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -4,6 +4,7 @@ members = [
"crates/spec-parser",
"crates/forge-oci",
"crates/forge-engine",
"crates/forge-builder",
"crates/forger",
]
@ -52,7 +53,16 @@ tempfile = "3"
bytesize = "2"
indicatif = "0.17"
# Builder VM support
vm-manager = { path = "../vm-manager/crates/vm-manager" }
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls-native-roots", "stream"] }
libc = "0.2"
dirs = "6"
ssh2 = "0.9"
ssh-key = { version = "0.6", features = ["ed25519", "rand_core", "getrandom"] }
# Internal crates
spec-parser = { path = "crates/spec-parser" }
forge-oci = { path = "crates/forge-oci" }
forge-engine = { path = "crates/forge-engine" }
forge-builder = { path = "crates/forge-builder" }

View file

@ -0,0 +1,24 @@
[package]
name = "forge-builder"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[dependencies]
spec-parser = { workspace = true }
vm-manager = { workspace = true }
miette = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
reqwest = { workspace = true }
ssh2 = { workspace = true }
ssh-key = { workspace = true }
libc = { workspace = true }
dirs = { workspace = true }
tempfile = { workspace = true }
serde_json = { workspace = true }
tar = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }

View file

@ -0,0 +1,192 @@
use std::path::PathBuf;
use spec_parser::schema::DistroFamily;
use tracing::info;
use crate::error::BuilderError;
/// Resolved forger binary for use inside a builder VM.
pub struct ResolvedBinary {
pub path: PathBuf,
}
/// Map a distro family to the Rust target triple needed inside the builder VM.
pub fn target_triple(distro: &DistroFamily) -> &'static str {
match distro {
DistroFamily::OmniOS => "x86_64-unknown-illumos",
DistroFamily::Ubuntu => "x86_64-unknown-linux-gnu",
}
}
/// Detect whether the current executable is a dev build (running from cargo target dir).
pub fn is_dev_build() -> bool {
std::env::current_exe()
.ok()
.and_then(|p| p.to_str().map(|s| s.contains("/target/")))
.unwrap_or(false)
}
/// Find the workspace root by walking up from the current exe looking for a workspace Cargo.toml.
fn find_workspace_root() -> Option<PathBuf> {
let exe = std::env::current_exe().ok()?;
let mut dir = exe.parent()?;
loop {
let cargo_toml = dir.join("Cargo.toml");
if cargo_toml.exists() {
// Check if it's a workspace root (contains [workspace])
if let Ok(content) = std::fs::read_to_string(&cargo_toml) {
if content.contains("[workspace]") {
return Some(dir.to_path_buf());
}
}
}
dir = dir.parent()?;
}
}
/// Resolve the forger binary path for the given distro.
///
/// In dev mode: looks for cross-compiled binary in the workspace target directory.
/// In release mode: downloads from GitHub releases (cached locally).
pub async fn resolve_forger_binary(distro: &DistroFamily) -> Result<ResolvedBinary, BuilderError> {
let triple = target_triple(distro);
if is_dev_build() {
resolve_dev_binary(triple)
} else {
resolve_release_binary(triple).await
}
}
fn resolve_dev_binary(triple: &str) -> Result<ResolvedBinary, BuilderError> {
let workspace_root = find_workspace_root().ok_or_else(|| BuilderError::BinaryNotFound {
target_triple: triple.to_string(),
path: "<workspace root not found>".to_string(),
})?;
let binary_path = workspace_root
.join("target")
.join(triple)
.join("release")
.join("forger");
if !binary_path.exists() {
return Err(BuilderError::BinaryNotFound {
target_triple: triple.to_string(),
path: binary_path.display().to_string(),
});
}
info!(path = %binary_path.display(), triple, "Using dev cross-compiled forger binary");
Ok(ResolvedBinary { path: binary_path })
}
async fn resolve_release_binary(triple: &str) -> Result<ResolvedBinary, BuilderError> {
let version = env!("CARGO_PKG_VERSION");
let cache_dir = dirs::cache_dir()
.unwrap_or_else(|| PathBuf::from("/tmp"))
.join("forger")
.join("builder-binaries");
let cached_path = cache_dir.join(format!("forger-{triple}-v{version}"));
if cached_path.exists() {
info!(path = %cached_path.display(), "Using cached forger binary");
return Ok(ResolvedBinary { path: cached_path });
}
let url = release_url(version, triple);
info!(%url, "Downloading forger binary for builder VM");
let response = reqwest::get(&url).await.map_err(|e| {
BuilderError::BinaryDownloadFailed {
url: url.clone(),
detail: e.to_string(),
}
})?;
if !response.status().is_success() {
return Err(BuilderError::BinaryDownloadFailed {
url,
detail: format!("HTTP {}", response.status()),
});
}
let bytes = response.bytes().await.map_err(|e| {
BuilderError::BinaryDownloadFailed {
url: url.clone(),
detail: format!("reading response body: {e}"),
}
})?;
std::fs::create_dir_all(&cache_dir).map_err(|e| BuilderError::BinaryDownloadFailed {
url: url.clone(),
detail: format!("creating cache dir: {e}"),
})?;
std::fs::write(&cached_path, &bytes).map_err(|e| BuilderError::BinaryDownloadFailed {
url: url.clone(),
detail: format!("writing cached binary: {e}"),
})?;
// Make executable
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let perms = std::fs::Permissions::from_mode(0o755);
std::fs::set_permissions(&cached_path, perms).map_err(|e| {
BuilderError::BinaryDownloadFailed {
url,
detail: format!("chmod: {e}"),
}
})?;
}
Ok(ResolvedBinary { path: cached_path })
}
fn release_url(version: &str, triple: &str) -> String {
format!(
"https://github.com/CloudNebulaProject/refraction-forger/releases/download/v{version}/forger-{triple}"
)
}
/// Check if a path looks like a cross-compiled forger binary exists for this target.
pub fn dev_binary_path(triple: &str) -> Option<PathBuf> {
let workspace_root = find_workspace_root()?;
let path = workspace_root
.join("target")
.join(triple)
.join("release")
.join("forger");
path.exists().then_some(path)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn target_triple_mapping() {
assert_eq!(target_triple(&DistroFamily::OmniOS), "x86_64-unknown-illumos");
assert_eq!(target_triple(&DistroFamily::Ubuntu), "x86_64-unknown-linux-gnu");
}
#[test]
fn release_url_construction() {
let url = release_url("0.1.0", "x86_64-unknown-linux-gnu");
assert_eq!(
url,
"https://github.com/CloudNebulaProject/refraction-forger/releases/download/v0.1.0/forger-x86_64-unknown-linux-gnu"
);
}
#[test]
fn dev_detection_heuristic() {
// In test context, the binary is under target/
let result = is_dev_build();
// When running under `cargo test`, the binary IS in target/
assert!(result);
}
}

View file

@ -0,0 +1,89 @@
use spec_parser::schema::{BuilderNode, DistroFamily};
/// Resolved builder VM configuration with defaults applied.
#[derive(Debug, Clone)]
pub struct BuilderConfig {
/// OCI reference, URL, or local path to the builder VM image.
pub image: String,
/// Number of virtual CPUs for the builder VM.
pub vcpus: u16,
/// Memory in MB for the builder VM.
pub memory_mb: u64,
}
impl BuilderConfig {
/// Resolve a BuilderConfig from the optional spec node and distro family.
/// Spec values take precedence; convention defaults fill the rest.
pub fn resolve(spec_builder: Option<&BuilderNode>, distro: &DistroFamily) -> Self {
let default_image = Self::default_image(distro);
match spec_builder {
Some(node) => Self {
image: node.image.clone().unwrap_or(default_image),
vcpus: node.vcpus.unwrap_or(2),
memory_mb: node.memory.unwrap_or(2048),
},
None => Self {
image: default_image,
vcpus: 2,
memory_mb: 2048,
},
}
}
fn default_image(distro: &DistroFamily) -> String {
match distro {
DistroFamily::OmniOS => {
"oci://ghcr.io/cloudnebulaproject/omnios-builder:latest".to_string()
}
DistroFamily::Ubuntu => {
"oci://ghcr.io/cloudnebulaproject/ubuntu-builder:latest".to_string()
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn defaults_for_ubuntu() {
let config = BuilderConfig::resolve(None, &DistroFamily::Ubuntu);
assert!(config.image.contains("ubuntu-builder"));
assert_eq!(config.vcpus, 2);
assert_eq!(config.memory_mb, 2048);
}
#[test]
fn defaults_for_omnios() {
let config = BuilderConfig::resolve(None, &DistroFamily::OmniOS);
assert!(config.image.contains("omnios-builder"));
}
#[test]
fn spec_overrides_defaults() {
let node = BuilderNode {
image: Some("oci://custom/image:v1".to_string()),
vcpus: Some(4),
memory: Some(4096),
};
let config = BuilderConfig::resolve(Some(&node), &DistroFamily::Ubuntu);
assert_eq!(config.image, "oci://custom/image:v1");
assert_eq!(config.vcpus, 4);
assert_eq!(config.memory_mb, 4096);
}
#[test]
fn partial_spec_fills_remaining_with_defaults() {
let node = BuilderNode {
image: None,
vcpus: Some(8),
memory: None,
};
let config = BuilderConfig::resolve(Some(&node), &DistroFamily::Ubuntu);
assert!(config.image.contains("ubuntu-builder"));
assert_eq!(config.vcpus, 8);
assert_eq!(config.memory_mb, 2048);
}
}

View file

@ -0,0 +1,92 @@
use spec_parser::schema::{DistroFamily, ImageSpec, TargetKind};
/// Determine whether the current host needs a builder VM to build this spec.
///
/// Returns `true` when:
/// - Any matching target is QCOW2 and we're not running as root (needs losetup/mount/chroot)
/// - The distro is OmniOS and we're not on illumos (needs pkg)
pub fn needs_builder(spec: &ImageSpec, target_name: Option<&str>, force_local: bool) -> bool {
if force_local {
return false;
}
let distro = DistroFamily::from_distro_str(spec.distro.as_deref());
let is_root = unsafe { libc::geteuid() == 0 };
let is_illumos = cfg!(target_os = "illumos");
let has_qcow2 = spec.targets.iter().any(|t| {
let name_matches = target_name.is_none() || target_name == Some(t.name.as_str());
name_matches && t.kind == TargetKind::Qcow2
});
(has_qcow2 && !is_root) || (distro == DistroFamily::OmniOS && !is_illumos)
}
#[cfg(test)]
mod tests {
use super::*;
fn make_spec(distro: Option<&str>, targets: &[(&str, &str)]) -> ImageSpec {
let kdl = {
let mut s = String::new();
s.push_str(&format!(
"metadata name=\"test\" version=\"0.1.0\"\n"
));
if let Some(d) = distro {
s.push_str(&format!("distro \"{d}\"\n"));
}
s.push_str("repositories {}\n");
for (name, kind) in targets {
s.push_str(&format!("target \"{name}\" kind=\"{kind}\" {{\n"));
if *kind == "qcow2" {
s.push_str(" disk-size \"10G\"\n");
}
s.push_str("}\n");
}
s
};
spec_parser::parse(&kdl).unwrap()
}
#[test]
fn force_local_always_returns_false() {
let spec = make_spec(Some("ubuntu-22.04"), &[("vm", "qcow2")]);
assert!(!needs_builder(&spec, None, true));
}
#[test]
fn no_qcow2_targets_no_builder_needed() {
let spec = make_spec(Some("ubuntu-22.04"), &[("img", "oci")]);
// On Linux (not illumos), Ubuntu OCI targets don't need a builder
assert!(!needs_builder(&spec, None, false));
}
#[test]
fn qcow2_without_root_needs_builder() {
let spec = make_spec(Some("ubuntu-22.04"), &[("vm", "qcow2")]);
// This test is meaningful when run as non-root (CI, developer machines)
let is_root = unsafe { libc::geteuid() == 0 };
assert_eq!(needs_builder(&spec, None, false), !is_root);
}
#[test]
fn omnios_on_linux_needs_builder() {
let spec = make_spec(Some("omnios"), &[("img", "artifact")]);
let is_illumos = cfg!(target_os = "illumos");
assert_eq!(needs_builder(&spec, None, false), !is_illumos);
}
#[test]
fn target_filter_respected() {
let spec = make_spec(
Some("ubuntu-22.04"),
&[("vm", "qcow2"), ("img", "oci")],
);
// When targeting only "img" (OCI), no qcow2 → no builder needed for that reason
let is_root = unsafe { libc::geteuid() == 0 };
// Only the OmniOS check or root check matters; "img" is OCI so qcow2 check is false
assert_eq!(needs_builder(&spec, Some("img"), false), false || !is_root && false);
// Actually: has_qcow2 is false (target "img" is oci), distro is Ubuntu not OmniOS
assert!(!needs_builder(&spec, Some("img"), false));
}
}

View file

@ -0,0 +1,71 @@
// See note in vm-manager/src/error.rs about thiserror 2 + edition 2024
#![allow(unused_assignments)]
use miette::Diagnostic;
use thiserror::Error;
#[derive(Debug, Error, Diagnostic)]
pub enum BuilderError {
#[error("failed to resolve builder image: {detail}")]
#[diagnostic(
code(forge_builder::image_resolve_failed),
help("ensure the builder image reference is valid and the registry is reachable — for OCI images, check that GITHUB_TOKEN is set")
)]
ImageResolveFailed { detail: String },
#[error("builder VM lifecycle error at {phase}: {detail}")]
#[diagnostic(
code(forge_builder::vm_lifecycle),
help("ensure QEMU and KVM are available on this host — install qemu-system-x86_64 and check /dev/kvm permissions")
)]
VmLifecycle { phase: String, detail: String },
#[error("forger binary for target {target_triple} not found at {path}")]
#[diagnostic(
code(forge_builder::binary_not_found),
help("build the cross-compiled binary with:\n cargo build --target {target_triple} --release -p forger")
)]
BinaryNotFound {
target_triple: String,
path: String,
},
#[error("failed to download forger binary from {url}: {detail}")]
#[diagnostic(
code(forge_builder::binary_download_failed),
help("check network connectivity and that the release exists at the given URL")
)]
BinaryDownloadFailed { url: String, detail: String },
#[error("file transfer to builder VM failed: {detail}")]
#[diagnostic(
code(forge_builder::transfer_failed),
help("check that the builder VM is reachable via SSH and has enough disk space")
)]
TransferFailed { detail: String },
#[error("remote build inside builder VM failed with exit code {exit_code}")]
#[diagnostic(
code(forge_builder::remote_build_failed),
help("check the build output above for errors — the forger build ran inside the builder VM")
)]
RemoteBuildFailed { exit_code: i32 },
#[error("failed to download build artifacts from builder VM: {detail}")]
#[diagnostic(
code(forge_builder::download_failed),
help("the build may have succeeded but artifact retrieval failed — check VM connectivity")
)]
DownloadFailed { detail: String },
#[error("SSH keypair generation failed: {detail}")]
#[diagnostic(
code(forge_builder::keygen_failed),
help("this is an internal error in Ed25519 key generation — please report it")
)]
KeygenFailed { detail: String },
#[error(transparent)]
#[diagnostic(code(forge_builder::vm_error))]
VmError(#[from] vm_manager::VmError),
}

View file

@ -0,0 +1,96 @@
pub mod binary;
pub mod config;
pub mod detect;
pub mod error;
pub mod lifecycle;
pub mod transfer;
use std::io::{stderr, stdout};
use std::path::Path;
use spec_parser::schema::{DistroFamily, ImageSpec};
use tracing::info;
use crate::config::BuilderConfig;
use crate::error::BuilderError;
/// Run a forger build inside a builder VM.
///
/// This is the top-level orchestrator that:
/// 1. Resolves builder VM configuration from the spec
/// 2. Resolves the correct forger binary for the target OS
/// 3. Spins up an ephemeral builder VM
/// 4. Uploads inputs (binary, spec, files)
/// 5. Runs the build via SSH
/// 6. Downloads output artifacts
/// 7. Tears down the VM (always, even on error)
pub async fn run_in_builder(
spec: &ImageSpec,
spec_path: &Path,
files_dir: &Path,
output_dir: &Path,
target: Option<&str>,
profiles: &[String],
) -> Result<(), BuilderError> {
let distro = DistroFamily::from_distro_str(spec.distro.as_deref());
let config = BuilderConfig::resolve(spec.builder.as_ref(), &distro);
let binary = binary::resolve_forger_binary(&distro).await?;
info!("Starting builder VM for remote build");
let session = lifecycle::BuilderSession::start(&config).await?;
let result = run_build_in_session(&session, &binary.path, spec_path, files_dir, output_dir, target, profiles).await;
// Always teardown, even on error
info!("Tearing down builder VM");
if let Err(e) = session.teardown().await {
tracing::warn!(error = %e, "Builder VM teardown failed (build result preserved)");
}
result
}
async fn run_build_in_session(
session: &lifecycle::BuilderSession,
binary_path: &Path,
spec_path: &Path,
files_dir: &Path,
output_dir: &Path,
target: Option<&str>,
profiles: &[String],
) -> Result<(), BuilderError> {
// Upload inputs
transfer::upload_build_inputs(session, binary_path, spec_path, files_dir)?;
// Build the remote command
let mut cmd = String::from(
"sudo /tmp/forger-build/forger build -s /tmp/forger-build/spec.kdl -o /tmp/forger-build/output/ --local",
);
if let Some(t) = target {
cmd.push_str(&format!(" -t {t}"));
}
for p in profiles {
cmd.push_str(&format!(" -p {p}"));
}
info!(cmd = %cmd, "Running build in builder VM");
// Stream output to the user's terminal
let (_, _, exit_code) =
vm_manager::ssh::exec_streaming(&session.ssh_session, &cmd, stdout(), stderr())
.map_err(|e| BuilderError::TransferFailed {
detail: format!("remote exec: {e}"),
})?;
if exit_code != 0 {
return Err(BuilderError::RemoteBuildFailed { exit_code });
}
// Download artifacts
transfer::download_artifacts(session, output_dir)?;
info!(output = %output_dir.display(), "Build artifacts downloaded successfully");
Ok(())
}

View file

@ -0,0 +1,194 @@
use std::path::PathBuf;
use std::time::Duration;
use ssh2::Session;
use tracing::info;
use vm_manager::image::ImageManager;
use vm_manager::traits::Hypervisor;
use vm_manager::types::{CloudInitConfig, NetworkConfig, SshConfig, VmHandle, VmSpec};
use vm_manager::RouterHypervisor;
use crate::config::BuilderConfig;
use crate::error::BuilderError;
/// An active builder VM session with SSH connectivity.
pub struct BuilderSession {
pub hypervisor: RouterHypervisor,
pub handle: VmHandle,
pub ssh_session: Session,
pub ssh_port: u16,
}
impl BuilderSession {
/// Start a builder VM: resolve image, generate SSH keys, create + boot VM, connect SSH.
pub async fn start(config: &BuilderConfig) -> Result<Self, BuilderError> {
info!(image = %config.image, vcpus = config.vcpus, memory_mb = config.memory_mb, "Starting builder VM");
// 1. Resolve builder image
let image_path = resolve_builder_image(&config.image).await?;
// 2. Generate ephemeral SSH keypair
let (pub_key, priv_pem) = generate_ssh_keypair()?;
// 3. Build cloud-config with builder user + injected pubkey
let cloud_config = format!(
r#"#cloud-config
users:
- name: builder
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- {pub_key}
"#
);
let ssh_config = SshConfig {
user: "builder".to_string(),
public_key: Some(pub_key),
private_key_path: None,
private_key_pem: Some(priv_pem),
};
// 4. Create VmSpec with user-mode networking (no root needed on host)
let vm_name = format!("forger-builder-{}", uuid_short());
let spec = VmSpec {
name: vm_name.clone(),
image_path: image_path.clone(),
vcpus: config.vcpus,
memory_mb: config.memory_mb,
disk_gb: None,
network: NetworkConfig::User,
cloud_init: Some(CloudInitConfig {
user_data: cloud_config.into_bytes(),
instance_id: Some(vm_name.clone()),
hostname: Some("builder".to_string()),
}),
ssh: Some(ssh_config.clone()),
};
// 5. Prepare + start VM
let hypervisor = RouterHypervisor::new(None, None);
let handle = hypervisor.prepare(&spec).await.map_err(|e| {
BuilderError::VmLifecycle {
phase: "prepare".into(),
detail: e.to_string(),
}
})?;
let handle = hypervisor.start(&handle).await.map_err(|e| {
BuilderError::VmLifecycle {
phase: "start".into(),
detail: e.to_string(),
}
})?;
// 6. Connect SSH with retry (user-mode networking uses host port forwarding)
let ssh_port = handle.ssh_host_port.unwrap_or(22);
let ssh_ip = "127.0.0.1";
info!(port = ssh_port, "Waiting for SSH connection to builder VM");
let ssh_session = vm_manager::ssh::connect_with_retry(
ssh_ip,
ssh_port,
&ssh_config,
Duration::from_secs(120),
)
.await
.map_err(|e| BuilderError::VmLifecycle {
phase: "ssh_connect".into(),
detail: e.to_string(),
})?;
info!("Builder VM ready");
Ok(Self {
hypervisor,
handle,
ssh_session,
ssh_port,
})
}
/// Tear down the builder VM, destroying all resources.
pub async fn teardown(self) -> Result<(), BuilderError> {
info!(name = %self.handle.name, "Tearing down builder VM");
// Drop SSH session first
drop(self.ssh_session);
self.hypervisor
.destroy(self.handle)
.await
.map_err(|e| BuilderError::VmLifecycle {
phase: "destroy".into(),
detail: e.to_string(),
})?;
Ok(())
}
}
/// Resolve builder image from OCI reference, URL, or local path.
async fn resolve_builder_image(image: &str) -> Result<PathBuf, BuilderError> {
let mgr = ImageManager::new();
if image.starts_with("oci://") {
let reference = image.strip_prefix("oci://").unwrap();
mgr.pull_oci(reference, None)
.await
.map_err(|e| BuilderError::ImageResolveFailed {
detail: format!("OCI pull {reference}: {e}"),
})
} else if image.starts_with("http://") || image.starts_with("https://") {
mgr.pull(image, None)
.await
.map_err(|e| BuilderError::ImageResolveFailed {
detail: format!("download {image}: {e}"),
})
} else {
// Local path
let path = PathBuf::from(image);
if !path.exists() {
return Err(BuilderError::ImageResolveFailed {
detail: format!("local image not found: {}", path.display()),
});
}
Ok(path)
}
}
fn generate_ssh_keypair() -> Result<(String, String), BuilderError> {
use ssh_key::{Algorithm, LineEnding, PrivateKey, rand_core::OsRng};
let sk = PrivateKey::random(&mut OsRng, Algorithm::Ed25519).map_err(|e| {
BuilderError::KeygenFailed {
detail: format!("Ed25519 key generation: {e}"),
}
})?;
let pub_openssh = sk.public_key().to_openssh().map_err(|e| {
BuilderError::KeygenFailed {
detail: format!("serialize public key: {e}"),
}
})?;
let priv_pem = sk.to_openssh(LineEnding::LF).map_err(|e| {
BuilderError::KeygenFailed {
detail: format!("serialize private key: {e}"),
}
})?;
Ok((pub_openssh, priv_pem.to_string()))
}
fn uuid_short() -> String {
use std::time::{SystemTime, UNIX_EPOCH};
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis();
format!("{:x}", ts & 0xFFFF_FFFF)
}

View file

@ -0,0 +1,149 @@
use std::path::{Path, PathBuf};
use tracing::info;
use vm_manager::ssh;
use crate::error::BuilderError;
use crate::lifecycle::BuilderSession;
const REMOTE_BUILD_DIR: &str = "/tmp/forger-build";
/// Upload all build inputs to the builder VM.
pub fn upload_build_inputs(
session: &BuilderSession,
forger_binary: &Path,
spec_path: &Path,
files_dir: &Path,
) -> Result<(), BuilderError> {
let sess = &session.ssh_session;
// Create remote build directory
ssh::exec(sess, &format!("mkdir -p {REMOTE_BUILD_DIR}/output"))
.map_err(|e| BuilderError::TransferFailed {
detail: format!("mkdir: {e}"),
})?;
// Upload forger binary
info!("Uploading forger binary to builder VM");
let remote_forger = PathBuf::from(format!("{REMOTE_BUILD_DIR}/forger"));
ssh::upload(sess, forger_binary, &remote_forger).map_err(|e| {
BuilderError::TransferFailed {
detail: format!("upload forger binary: {e}"),
}
})?;
// Make executable
ssh::exec(sess, &format!("chmod +x {REMOTE_BUILD_DIR}/forger")).map_err(|e| {
BuilderError::TransferFailed {
detail: format!("chmod forger: {e}"),
}
})?;
// Upload spec file
info!("Uploading spec file");
let remote_spec = PathBuf::from(format!("{REMOTE_BUILD_DIR}/spec.kdl"));
ssh::upload(sess, spec_path, &remote_spec).map_err(|e| BuilderError::TransferFailed {
detail: format!("upload spec: {e}"),
})?;
// Upload files/ directory if it exists (tar locally → upload → extract remotely)
if files_dir.exists() && files_dir.is_dir() {
upload_directory(sess, files_dir, &format!("{REMOTE_BUILD_DIR}/files"))?;
}
Ok(())
}
/// Upload a local directory to the VM by creating a tar, uploading, and extracting.
fn upload_directory(
sess: &ssh2::Session,
local_dir: &Path,
remote_dir: &str,
) -> Result<(), BuilderError> {
info!(local = %local_dir.display(), remote = %remote_dir, "Uploading directory to builder VM");
// Create a tar archive in memory
let mut tar_buf = Vec::new();
{
let mut ar = tar::Builder::new(&mut tar_buf);
ar.append_dir_all(".", local_dir)
.map_err(|e| BuilderError::TransferFailed {
detail: format!("tar {}: {e}", local_dir.display()),
})?;
ar.finish().map_err(|e| BuilderError::TransferFailed {
detail: format!("tar finish: {e}"),
})?;
}
// Write tar to a temp file so we can upload it
let tmp = tempfile::NamedTempFile::new().map_err(|e| BuilderError::TransferFailed {
detail: format!("tempfile: {e}"),
})?;
std::fs::write(tmp.path(), &tar_buf).map_err(|e| BuilderError::TransferFailed {
detail: format!("write tar: {e}"),
})?;
let remote_tar = PathBuf::from(format!("{remote_dir}.tar"));
ssh::upload(sess, tmp.path(), &remote_tar).map_err(|e| BuilderError::TransferFailed {
detail: format!("upload tar: {e}"),
})?;
// Extract on remote
ssh::exec(
sess,
&format!("mkdir -p {remote_dir} && tar xf {remote_dir}.tar -C {remote_dir} && rm {remote_dir}.tar"),
)
.map_err(|e| BuilderError::TransferFailed {
detail: format!("extract tar: {e}"),
})?;
Ok(())
}
/// Download build artifacts from the builder VM.
pub fn download_artifacts(
session: &BuilderSession,
output_dir: &Path,
) -> Result<(), BuilderError> {
let sess = &session.ssh_session;
let remote_output = format!("{REMOTE_BUILD_DIR}/output");
// List files in remote output directory
let (stdout, _, exit_code) = ssh::exec(
sess,
&format!("find {remote_output} -maxdepth 1 -type f -printf '%f\\n'"),
)
.map_err(|e| BuilderError::DownloadFailed {
detail: format!("list remote files: {e}"),
})?;
if exit_code != 0 {
return Err(BuilderError::DownloadFailed {
detail: "failed to list remote output directory".to_string(),
});
}
std::fs::create_dir_all(output_dir).map_err(|e| BuilderError::DownloadFailed {
detail: format!("create output dir: {e}"),
})?;
for filename in stdout.lines() {
let filename = filename.trim();
if filename.is_empty() {
continue;
}
let remote_path = PathBuf::from(format!("{remote_output}/{filename}"));
let local_path = output_dir.join(filename);
info!(file = %filename, "Downloading artifact from builder VM");
ssh::download(sess, &remote_path, &local_path).map_err(|e| {
BuilderError::DownloadFailed {
detail: format!("download {filename}: {e}"),
}
})?;
}
Ok(())
}

View file

@ -119,15 +119,23 @@ async fn execute_apt(
let mirror_url = first_mirror
.map(|m| m.url.as_str())
.unwrap_or("http://archive.ubuntu.com/ubuntu");
let components = first_mirror.and_then(|m| m.components.as_deref());
// Bootstrap the rootfs
crate::tools::apt::debootstrap(runner, suite, root, mirror_url).await?;
crate::tools::apt::debootstrap(runner, suite, root, mirror_url, components).await?;
// Add any additional APT mirror sources (skip the first one used for debootstrap)
for mirror in spec.repositories.apt_mirrors.iter().skip(1) {
let components = mirror.components.as_deref().unwrap_or("main");
let entry = format!("deb {} {} {}", mirror.url, mirror.suite, components);
crate::tools::apt::add_source(runner, root, &entry).await?;
// Write sources.list with full component lists from all apt-mirror entries
let source_entries: Vec<String> = spec
.repositories
.apt_mirrors
.iter()
.map(|m| {
let components = m.components.as_deref().unwrap_or("main");
format!("deb {} {} {}", m.url, m.suite, components)
})
.collect();
if !source_entries.is_empty() {
crate::tools::apt::write_sources_list(root, &source_entries).await?;
}
// Update package lists

View file

@ -90,7 +90,7 @@ pub async fn build_qcow2_ext4(
"chroot",
&[
mount_str,
"grub-install",
"/usr/sbin/grub-install",
"--target=x86_64-efi",
"--efi-directory=/boot/efi",
"--no-nvram",
@ -102,7 +102,7 @@ pub async fn build_qcow2_ext4(
runner
.run(
"chroot",
&[mount_str, "grub-mkconfig", "-o", "/boot/grub/grub.cfg"],
&[mount_str, "/usr/sbin/grub-mkconfig", "-o", "/boot/grub/grub.cfg"],
)
.await?;

View file

@ -10,14 +10,18 @@ pub async fn debootstrap(
suite: &str,
root: &str,
mirror: &str,
components: Option<&str>,
) -> Result<(), ForgeError> {
info!(suite, root, mirror, "Running debootstrap");
runner
.run(
"debootstrap",
&["--arch", "amd64", suite, root, mirror],
)
.await?;
info!(suite, root, mirror, ?components, "Running debootstrap");
let mut args = vec!["--arch", "amd64"];
// Format: --components=main,universe (comma-separated)
let comp_arg;
if let Some(c) = components {
comp_arg = format!("--components={}", c.replace(' ', ","));
args.push(&comp_arg);
}
args.extend_from_slice(&[suite, root, mirror]);
runner.run("debootstrap", &args).await?;
Ok(())
}
@ -47,6 +51,22 @@ pub async fn install(
Ok(())
}
/// Write the primary sources.list in the chroot, replacing the debootstrap default.
pub async fn write_sources_list(
root: &str,
entries: &[String],
) -> Result<(), ForgeError> {
let sources_path = Path::new(root).join("etc/apt/sources.list");
info!(?sources_path, count = entries.len(), "Writing sources.list");
let content = entries.join("\n") + "\n";
std::fs::write(&sources_path, content).map_err(|e| ForgeError::Overlay {
action: "write sources.list".to_string(),
detail: sources_path.display().to_string(),
source: e,
})?;
Ok(())
}
/// Add an APT source entry to the chroot's sources.list.d/.
pub async fn add_source(
runner: &dyn ToolRunner,
@ -112,7 +132,7 @@ mod tests {
#[tokio::test]
async fn test_debootstrap_args() {
let runner = MockToolRunner::new();
debootstrap(&runner, "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu")
debootstrap(&runner, "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu", None)
.await
.unwrap();
@ -125,6 +145,22 @@ mod tests {
);
}
#[tokio::test]
async fn test_debootstrap_with_components() {
let runner = MockToolRunner::new();
debootstrap(&runner, "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu", Some("main universe"))
.await
.unwrap();
let calls = runner.calls();
assert_eq!(calls.len(), 1);
assert_eq!(calls[0].0, "debootstrap");
assert_eq!(
calls[0].1,
vec!["--arch", "amd64", "--components=main,universe", "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu"]
);
}
#[tokio::test]
async fn test_install_args() {
let runner = MockToolRunner::new();

View file

@ -4,10 +4,15 @@ version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[features]
default = ["builder"]
builder = ["dep:forge-builder"]
[dependencies]
spec-parser = { workspace = true }
forge-oci = { workspace = true }
forge-engine = { workspace = true }
forge-builder = { workspace = true, optional = true }
clap = { workspace = true }
miette = { workspace = true }
thiserror = { workspace = true }

View file

@ -11,6 +11,8 @@ pub async fn run(
target: Option<&str>,
profiles: &[String],
output_dir: &PathBuf,
local: bool,
use_builder: bool,
) -> miette::Result<()> {
let kdl_content = std::fs::read_to_string(spec_path)
.into_diagnostic()
@ -33,6 +35,35 @@ pub async fn run(
// Determine files directory (images/files/ relative to spec)
let files_dir = spec_dir.join("files");
// Check if we need a builder VM
#[cfg(feature = "builder")]
{
let needs = forge_builder::detect::needs_builder(&filtered, target, local);
if needs || use_builder {
info!("Delegating build to builder VM");
forge_builder::run_in_builder(
&filtered,
spec_path,
&files_dir,
output_dir,
target,
profiles,
)
.await
.map_err(miette::Report::new)
.wrap_err("Builder VM build failed")?;
println!("Build complete. Output: {}", output_dir.display());
return Ok(());
}
}
// Suppress unused variable warnings when builder feature is disabled
#[cfg(not(feature = "builder"))]
{
let _ = (local, use_builder);
}
let runner = SystemToolRunner;
let ctx = BuildContext {

View file

@ -36,6 +36,14 @@ enum Commands {
/// Output directory for build artifacts
#[arg(short, long, default_value = "./output")]
output_dir: PathBuf,
/// Force local build (skip builder VM detection)
#[arg(long)]
local: bool,
/// Force build inside a builder VM
#[arg(long, conflicts_with = "local")]
use_builder: bool,
},
/// Validate a spec file (parse + resolve includes)
@ -99,8 +107,10 @@ async fn main() -> Result<()> {
target,
profile,
output_dir,
local,
use_builder,
} => {
commands::build::run(&spec, target.as_deref(), &profile, &output_dir).await?;
commands::build::run(&spec, target.as_deref(), &profile, &output_dir, local, use_builder).await?;
}
Commands::Validate { spec } => {
commands::validate::run(&spec)?;

View file

@ -239,4 +239,67 @@ mod tests {
assert_eq!(pool.properties[0].name, "ashift");
assert_eq!(pool.properties[0].value, "12");
}
#[test]
fn test_parse_builder_node_full() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {}
builder {
image "oci://ghcr.io/custom/builder:v1"
vcpus 4
memory 4096
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
let builder = spec.builder.as_ref().unwrap();
assert_eq!(builder.image.as_deref(), Some("oci://ghcr.io/custom/builder:v1"));
assert_eq!(builder.vcpus, Some(4));
assert_eq!(builder.memory, Some(4096));
}
#[test]
fn test_parse_builder_node_partial() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {}
builder {
vcpus 8
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
let builder = spec.builder.as_ref().unwrap();
assert_eq!(builder.image, None);
assert_eq!(builder.vcpus, Some(8));
assert_eq!(builder.memory, None);
}
#[test]
fn test_parse_builder_node_empty() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {}
builder {
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
let builder = spec.builder.as_ref().unwrap();
assert_eq!(builder.image, None);
assert_eq!(builder.vcpus, None);
assert_eq!(builder.memory, None);
}
#[test]
fn test_parse_no_builder_node() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
assert!(spec.builder.is_none());
}
}

View file

@ -210,6 +210,11 @@ fn merge_base(mut base: ImageSpec, child: ImageSpec) -> ImageSpec {
base.targets = child.targets;
}
// builder: child's builder replaces base entirely
if child.builder.is_some() {
base.builder = child.builder;
}
base
}

View file

@ -58,6 +58,22 @@ pub struct ImageSpec {
#[knuffel(children(name = "target"))]
pub targets: Vec<Target>,
#[knuffel(child)]
pub builder: Option<BuilderNode>,
}
/// Configuration for a builder VM used when the host can't build locally.
#[derive(Debug, Decode)]
pub struct BuilderNode {
#[knuffel(child, unwrap(argument))]
pub image: Option<String>,
#[knuffel(child, unwrap(argument))]
pub vcpus: Option<u16>,
#[knuffel(child, unwrap(argument))]
pub memory: Option<u64>,
}
#[derive(Debug, Decode)]