Add Ubuntu/apt support, ext4 QCOW2 builds, and OCI artifact push

- Extend spec-parser schema with distro, AptMirror, filesystem, and
  push-to fields for Ubuntu image support
- Add debootstrap/apt tool wrappers and Phase 1 distro dispatch
  (OmniOS IPS vs Ubuntu apt)
- Add ext4+GPT+EFI QCOW2 build path alongside existing ZFS pipeline
- Add partition tools (sgdisk, mkfs) and loopback partprobe support
- Add ORAS-compatible OCI artifact push/pull for QCOW2 files with
  custom media types (vnd.cloudnebula.qcow2)
- Add --artifact flag to forger push command
- Add auto-push from Phase 2 when target has push-to set
- Add omnios-rust-ci and ubuntu-rust-ci KDL image specs
- Update inspect command to display new fields

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Till Wegmueller 2026-02-15 16:29:12 +01:00
parent 4290439e00
commit 3cb982d35c
No known key found for this signature in database
23 changed files with 1514 additions and 200 deletions

View file

@ -118,6 +118,20 @@ pub enum ForgeError {
)]
TargetNotFound { name: String, available: String },
#[error("Unsupported filesystem '{fs_type}' for target '{target}'")]
#[diagnostic(
help("Supported filesystems are 'zfs' (default for OmniOS) and 'ext4' (for Ubuntu/Linux). Set `filesystem \"zfs\"` or `filesystem \"ext4\"` in the target block."),
code(forge::unsupported_filesystem)
)]
UnsupportedFilesystem { fs_type: String, target: String },
#[error("OCI artifact push failed for {reference}")]
#[diagnostic(
help("Check that the registry is reachable, credentials are valid (GITHUB_TOKEN for ghcr.io), and the reference format is correct.\n{detail}"),
code(forge::artifact_push_failed)
)]
ArtifactPushFailed { reference: String, detail: String },
#[error("IO error")]
Io(#[from] std::io::Error),
}

View file

@ -1,12 +1,13 @@
pub mod customizations;
pub mod overlays;
pub mod packages;
pub mod packages_apt;
pub mod staging;
pub mod variants;
use std::path::{Path, PathBuf};
use spec_parser::schema::ImageSpec;
use spec_parser::schema::{DistroFamily, ImageSpec};
use tracing::info;
use crate::error::ForgeError;
@ -22,19 +23,15 @@ pub struct Phase1Result {
/// Execute Phase 1: assemble a rootfs in a staging directory from the spec.
///
/// Steps:
/// 1. Create staging directory
/// 2. Extract base tarball (if specified)
/// 3. Apply IPS variants
/// 4. Configure package repositories and install packages
/// 5. Apply customizations (users, groups)
/// 6. Apply overlays (files, dirs, symlinks, shadow, devfsadm)
/// Dispatches to the appropriate distro-specific path based on the `distro` field,
/// then applies common customizations and overlays.
pub async fn execute(
spec: &ImageSpec,
files_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<Phase1Result, ForgeError> {
info!(name = %spec.metadata.name, "Starting Phase 1: rootfs assembly");
let distro = DistroFamily::from_distro_str(spec.distro.as_deref());
info!(name = %spec.metadata.name, ?distro, "Starting Phase 1: rootfs assembly");
// 1. Create staging directory
let (staging_dir, staging_root) = staging::create_staging()?;
@ -46,41 +43,18 @@ pub async fn execute(
staging::extract_base_tarball(base, &staging_root)?;
}
// 3. Create IPS image and configure publishers
crate::tools::pkg::image_create(runner, root).await?;
for publisher in &spec.repositories.publishers {
crate::tools::pkg::set_publisher(runner, root, &publisher.name, &publisher.origin).await?;
// 3. Distro-specific package management
match distro {
DistroFamily::OmniOS => execute_ips(spec, root, files_dir, runner).await?,
DistroFamily::Ubuntu => execute_apt(spec, root, runner).await?,
}
// 4. Apply variants
if let Some(ref vars) = spec.variants {
variants::apply_variants(runner, root, vars).await?;
}
// 5. Approve CA certificates
if let Some(ref certs) = spec.certificates {
for ca in &certs.ca {
let certfile_path = files_dir.join(&ca.certfile);
let certfile_str = certfile_path.to_str().unwrap_or(&ca.certfile);
crate::tools::pkg::approve_ca_cert(runner, root, &ca.publisher, certfile_str).await?;
}
}
// 6. Set incorporation
if let Some(ref incorporation) = spec.incorporation {
crate::tools::pkg::set_incorporation(runner, root, incorporation).await?;
}
// 7. Install packages
packages::install_all(runner, root, &spec.packages).await?;
// 8. Apply customizations
// 4. Apply customizations (common)
for customization in &spec.customizations {
customizations::apply(customization, &staging_root)?;
}
// 9. Apply overlays
// 5. Apply overlays (common)
for overlay_block in &spec.overlays {
overlays::apply_overlays(&overlay_block.actions, &staging_root, files_dir, runner).await?;
}
@ -92,3 +66,75 @@ pub async fn execute(
_staging_dir: staging_dir,
})
}
/// IPS/OmniOS-specific Phase 1: pkg image-create, publishers, variants, certs, install.
async fn execute_ips(
spec: &ImageSpec,
root: &str,
files_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
info!("Executing IPS package management path");
crate::tools::pkg::image_create(runner, root).await?;
for publisher in &spec.repositories.publishers {
crate::tools::pkg::set_publisher(runner, root, &publisher.name, &publisher.origin).await?;
}
if let Some(ref vars) = spec.variants {
variants::apply_variants(runner, root, vars).await?;
}
if let Some(ref certs) = spec.certificates {
for ca in &certs.ca {
let certfile_path = files_dir.join(&ca.certfile);
let certfile_str = certfile_path.to_str().unwrap_or(&ca.certfile);
crate::tools::pkg::approve_ca_cert(runner, root, &ca.publisher, certfile_str).await?;
}
}
if let Some(ref incorporation) = spec.incorporation {
crate::tools::pkg::set_incorporation(runner, root, incorporation).await?;
}
packages::install_all(runner, root, &spec.packages).await?;
Ok(())
}
/// APT/Ubuntu-specific Phase 1: debootstrap, add sources, apt update, apt install.
async fn execute_apt(
spec: &ImageSpec,
root: &str,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
info!("Executing APT package management path");
// Determine suite and mirror from apt-mirror entries
let first_mirror = spec.repositories.apt_mirrors.first();
let suite = first_mirror
.map(|m| m.suite.as_str())
.unwrap_or("jammy");
let mirror_url = first_mirror
.map(|m| m.url.as_str())
.unwrap_or("http://archive.ubuntu.com/ubuntu");
// Bootstrap the rootfs
crate::tools::apt::debootstrap(runner, suite, root, mirror_url).await?;
// Add any additional APT mirror sources (skip the first one used for debootstrap)
for mirror in spec.repositories.apt_mirrors.iter().skip(1) {
let components = mirror.components.as_deref().unwrap_or("main");
let entry = format!("deb {} {} {}", mirror.url, mirror.suite, components);
crate::tools::apt::add_source(runner, root, &entry).await?;
}
// Update package lists
crate::tools::apt::update(runner, root).await?;
// Install packages
packages_apt::install_all(runner, root, &spec.packages).await?;
Ok(())
}

View file

@ -0,0 +1,25 @@
use spec_parser::schema::PackageList;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Install all package lists into the staging root via `apt-get install` in chroot.
pub async fn install_all(
runner: &dyn ToolRunner,
root: &str,
package_lists: &[PackageList],
) -> Result<(), ForgeError> {
let all_packages: Vec<String> = package_lists
.iter()
.flat_map(|pl| pl.packages.iter().map(|p| p.name.clone()))
.collect();
if all_packages.is_empty() {
info!("No packages to install");
return Ok(());
}
info!(count = all_packages.len(), "Installing packages via apt");
crate::tools::apt::install(runner, root, &all_packages).await
}

View file

@ -1,6 +1,8 @@
pub mod artifact;
pub mod oci;
pub mod qcow2;
pub mod qcow2_ext4;
pub mod qcow2_zfs;
use std::path::Path;
@ -11,6 +13,9 @@ use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Execute Phase 2: produce the target artifact from the staged rootfs.
///
/// After building the artifact, if a `push_to` reference is set on a QCOW2 target,
/// the QCOW2 file is automatically pushed as an OCI artifact.
pub async fn execute(
target: &Target,
staging_root: &Path,
@ -36,6 +41,42 @@ pub async fn execute(
}
}
// Auto-push QCOW2 to OCI registry if push_to is set
if target.kind == TargetKind::Qcow2 {
if let Some(ref push_ref) = target.push_to {
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
info!(
reference = %push_ref,
path = %qcow2_path.display(),
"Auto-pushing QCOW2 artifact to OCI registry"
);
let qcow2_data = std::fs::read(&qcow2_path).map_err(|e| {
ForgeError::ArtifactPushFailed {
reference: push_ref.clone(),
detail: format!("failed to read QCOW2 file: {e}"),
}
})?;
let metadata = forge_oci::artifact::Qcow2Metadata {
name: target.name.clone(),
version: "latest".to_string(),
architecture: "amd64".to_string(),
os: "linux".to_string(),
description: None,
};
let auth = forge_oci::artifact::resolve_ghcr_auth();
forge_oci::artifact::push_qcow2_artifact(push_ref, qcow2_data, &metadata, &auth, &[])
.await
.map_err(|e| ForgeError::ArtifactPushFailed {
reference: push_ref.clone(),
detail: e.to_string(),
})?;
}
}
info!(target = %target.name, "Phase 2 complete");
Ok(())
}

View file

@ -63,6 +63,8 @@ mod tests {
kind: TargetKind::Oci,
disk_size: None,
bootloader: None,
filesystem: None,
push_to: None,
entrypoint,
environment: env,
pool: None,

View file

@ -1,150 +1,96 @@
use std::path::Path;
use spec_parser::schema::Target;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Build a QCOW2 VM image from the staged rootfs.
/// Build a QCOW2 VM image, dispatching to the appropriate filesystem backend.
///
/// Pipeline:
/// 1. Create raw disk image of specified size
/// 2. Attach loopback device
/// 3. Create ZFS pool with spec properties
/// 4. Create boot environment structure (rpool/ROOT/be-1)
/// 5. Copy staging rootfs into mounted BE
/// 6. Install bootloader via chroot
/// 7. Set bootfs property
/// 8. Export pool, detach loopback
/// 9. Convert raw -> qcow2
/// - `"zfs"` (default): ZFS pool with boot environment
/// - `"ext4"`: GPT+EFI+ext4 with GRUB bootloader
pub async fn build_qcow2(
target: &Target,
staging_root: &Path,
output_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
let disk_size = target
.disk_size
.as_deref()
.ok_or(ForgeError::MissingDiskSize)?;
let bootloader_type = target.bootloader.as_deref().unwrap_or("uefi");
let raw_path = output_dir.join(format!("{}.raw", target.name));
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
let raw_str = raw_path.to_str().unwrap();
let qcow2_str = qcow2_path.to_str().unwrap();
// Collect pool properties
let pool_props: Vec<(&str, &str)> = target
.pool
.as_ref()
.map(|p| {
p.properties
.iter()
.map(|prop| (prop.name.as_str(), prop.value.as_str()))
.collect()
})
.unwrap_or_default();
let pool_name = "rpool";
let be_dataset = format!("{pool_name}/ROOT/be-1");
info!(disk_size, "Step 1: Creating raw disk image");
crate::tools::qemu_img::create_raw(runner, raw_str, disk_size).await?;
info!("Step 2: Attaching loopback device");
let device = crate::tools::loopback::attach(runner, raw_str).await?;
// Wrap the rest in a closure-like structure so we can clean up on error
let result = async {
info!(device = %device, "Step 3: Creating ZFS pool");
crate::tools::zpool::create(runner, pool_name, &device, &pool_props).await?;
info!("Step 4: Creating boot environment structure");
crate::tools::zfs::create(
runner,
&format!("{pool_name}/ROOT"),
&[("canmount", "off"), ("mountpoint", "legacy")],
)
.await?;
let staging_str = staging_root.to_str().unwrap_or(".");
crate::tools::zfs::create(
runner,
&be_dataset,
&[("canmount", "noauto"), ("mountpoint", staging_str)],
)
.await?;
crate::tools::zfs::mount(runner, &be_dataset).await?;
info!("Step 5: Copying staging rootfs into boot environment");
copy_rootfs(staging_root, staging_root)?;
info!("Step 6: Installing bootloader");
crate::tools::bootloader::install(runner, staging_str, pool_name, bootloader_type).await?;
info!("Step 7: Setting bootfs property");
crate::tools::zpool::set(runner, pool_name, "bootfs", &be_dataset).await?;
info!("Step 8: Exporting ZFS pool");
crate::tools::zfs::unmount(runner, &be_dataset).await?;
crate::tools::zpool::export(runner, pool_name).await?;
Ok::<(), ForgeError>(())
match target.filesystem.as_deref().unwrap_or("zfs") {
"zfs" => {
super::qcow2_zfs::build_qcow2_zfs(target, staging_root, output_dir, runner).await
}
"ext4" => {
super::qcow2_ext4::build_qcow2_ext4(target, staging_root, output_dir, runner).await
}
other => Err(ForgeError::UnsupportedFilesystem {
fs_type: other.to_string(),
target: target.name.clone(),
}),
}
.await;
// Always try to detach loopback, even on error
info!("Detaching loopback device");
let detach_result = crate::tools::loopback::detach(runner, &device).await;
// Return the original error if there was one
result?;
detach_result?;
info!("Step 9: Converting raw -> qcow2");
crate::tools::qemu_img::convert_to_qcow2(runner, raw_str, qcow2_str).await?;
// Clean up raw file
std::fs::remove_file(&raw_path).ok();
info!(path = %qcow2_path.display(), "QCOW2 image created");
Ok(())
}
/// Copy the staging rootfs into the mounted BE.
/// Since the BE is mounted at the staging root mountpoint, we use a recursive
/// copy approach for files that need relocation.
fn copy_rootfs(src: &Path, dest: &Path) -> Result<(), ForgeError> {
// In the actual build, the ZFS dataset is mounted at the staging_root path,
// so the files are already in place after package installation. This function
// handles the case where we need to copy from a temp staging dir into the
// mounted ZFS dataset.
if src == dest {
return Ok(());
}
#[cfg(test)]
mod tests {
use super::*;
use spec_parser::schema::{Target, TargetKind};
for entry in walkdir::WalkDir::new(src).follow_links(false) {
let entry = entry.map_err(|e| ForgeError::Qcow2Build {
step: "copy_rootfs".to_string(),
detail: e.to_string(),
})?;
let rel = entry.path().strip_prefix(src).unwrap_or(entry.path());
let target = dest.join(rel);
if entry.path().is_dir() {
std::fs::create_dir_all(&target)?;
} else if entry.path().is_file() {
if let Some(parent) = target.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(entry.path(), &target)?;
fn make_target(fs: Option<&str>) -> Target {
Target {
name: "test".to_string(),
kind: TargetKind::Qcow2,
disk_size: Some("2G".to_string()),
bootloader: Some("uefi".to_string()),
filesystem: fs.map(|s| s.to_string()),
push_to: None,
entrypoint: None,
environment: None,
pool: None,
}
}
Ok(())
#[test]
fn test_unsupported_filesystem_error() {
let target = make_target(Some("btrfs"));
let rt = tokio::runtime::Runtime::new().unwrap();
let result = rt.block_on(async {
// We can't actually run the build, but we can test the dispatcher logic
// by checking the error for an unsupported filesystem
let tmpdir = tempfile::tempdir().unwrap();
let staging = tempfile::tempdir().unwrap();
// Create a mock runner that always succeeds
use crate::tools::{ToolOutput, ToolRunner};
use std::future::Future;
use std::pin::Pin;
struct FailRunner;
impl ToolRunner for FailRunner {
fn run<'a>(
&'a self,
_program: &'a str,
_args: &'a [&'a str],
) -> Pin<Box<dyn Future<Output = Result<ToolOutput, ForgeError>> + Send + 'a>>
{
Box::pin(async {
Err(ForgeError::Qcow2Build {
step: "test".to_string(),
detail: "not expected to be called".to_string(),
})
})
}
}
build_qcow2(&target, staging.path(), tmpdir.path(), &FailRunner).await
});
assert!(result.is_err());
let err = result.unwrap_err();
assert!(matches!(err, ForgeError::UnsupportedFilesystem { .. }));
}
#[test]
fn test_default_filesystem_is_zfs() {
let target = make_target(None);
assert_eq!(target.filesystem.as_deref().unwrap_or("zfs"), "zfs");
}
}

View file

@ -0,0 +1,160 @@
use std::path::Path;
use spec_parser::schema::Target;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Build a QCOW2 VM image from the staged rootfs using ext4+GPT+GRUB.
///
/// Pipeline:
/// 1. Create raw disk image of specified size
/// 2. Attach loopback device + partprobe
/// 3. Create GPT partition table (EFI + root)
/// 4. Format partitions (FAT32 for EFI, ext4 for root)
/// 5. Mount root, copy staging rootfs
/// 6. Mount EFI at /boot/efi
/// 7. Bind-mount /dev, /proc, /sys
/// 8. chroot grub-install
/// 9. chroot grub-mkconfig
/// 10. Unmount all, detach loopback
/// 11. Convert raw -> qcow2
pub async fn build_qcow2_ext4(
target: &Target,
staging_root: &Path,
output_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
let disk_size = target
.disk_size
.as_deref()
.ok_or(ForgeError::MissingDiskSize)?;
let raw_path = output_dir.join(format!("{}.raw", target.name));
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
let raw_str = raw_path.to_str().unwrap();
let qcow2_str = qcow2_path.to_str().unwrap();
info!(disk_size, "Step 1: Creating raw disk image");
crate::tools::qemu_img::create_raw(runner, raw_str, disk_size).await?;
info!("Step 2: Attaching loopback device");
let device = crate::tools::loopback::attach(runner, raw_str).await?;
// Re-read partition table after attaching loopback
let _ = crate::tools::loopback::partprobe(runner, &device).await;
let result = async {
info!(device = %device, "Step 3: Creating GPT partition table");
let (efi_part, root_part) =
crate::tools::partition::create_gpt_efi_root(runner, &device).await?;
// Re-read partition table after creating partitions
crate::tools::loopback::partprobe(runner, &device).await?;
info!("Step 4: Formatting partitions");
crate::tools::partition::mkfs_fat32(runner, &efi_part).await?;
crate::tools::partition::mkfs_ext4(runner, &root_part).await?;
// Create a temporary mountpoint for the root partition
let mount_dir = tempfile::tempdir().map_err(ForgeError::StagingSetup)?;
let mount_str = mount_dir.path().to_str().unwrap();
info!("Step 5: Mounting root partition and copying rootfs");
crate::tools::partition::mount(runner, &root_part, mount_str).await?;
// Copy staging rootfs into mounted root
copy_rootfs(staging_root, mount_dir.path())?;
info!("Step 6: Mounting EFI partition");
let efi_mount = mount_dir.path().join("boot/efi");
std::fs::create_dir_all(&efi_mount)?;
let efi_mount_str = efi_mount.to_str().unwrap();
crate::tools::partition::mount(runner, &efi_part, efi_mount_str).await?;
info!("Step 7: Bind-mounting /dev, /proc, /sys");
let dev_mount = format!("{mount_str}/dev");
let proc_mount = format!("{mount_str}/proc");
let sys_mount = format!("{mount_str}/sys");
std::fs::create_dir_all(&dev_mount)?;
std::fs::create_dir_all(&proc_mount)?;
std::fs::create_dir_all(&sys_mount)?;
crate::tools::partition::bind_mount(runner, "/dev", &dev_mount).await?;
crate::tools::partition::bind_mount(runner, "/proc", &proc_mount).await?;
crate::tools::partition::bind_mount(runner, "/sys", &sys_mount).await?;
info!("Step 8: Installing GRUB bootloader");
runner
.run(
"chroot",
&[
mount_str,
"grub-install",
"--target=x86_64-efi",
"--efi-directory=/boot/efi",
"--no-nvram",
],
)
.await?;
info!("Step 9: Generating GRUB config");
runner
.run(
"chroot",
&[mount_str, "grub-mkconfig", "-o", "/boot/grub/grub.cfg"],
)
.await?;
info!("Step 10: Unmounting");
// Unmount in reverse order: bind mounts, EFI, root
crate::tools::partition::umount(runner, &sys_mount).await?;
crate::tools::partition::umount(runner, &proc_mount).await?;
crate::tools::partition::umount(runner, &dev_mount).await?;
crate::tools::partition::umount(runner, efi_mount_str).await?;
crate::tools::partition::umount(runner, mount_str).await?;
Ok::<(), ForgeError>(())
}
.await;
// Always try to detach loopback, even on error
info!("Detaching loopback device");
let detach_result = crate::tools::loopback::detach(runner, &device).await;
result?;
detach_result?;
info!("Step 11: Converting raw -> qcow2");
crate::tools::qemu_img::convert_to_qcow2(runner, raw_str, qcow2_str).await?;
// Clean up raw file
std::fs::remove_file(&raw_path).ok();
info!(path = %qcow2_path.display(), "QCOW2 (ext4) image created");
Ok(())
}
/// Copy the staging rootfs into the mounted root partition.
fn copy_rootfs(src: &Path, dest: &Path) -> Result<(), ForgeError> {
for entry in walkdir::WalkDir::new(src).follow_links(false) {
let entry = entry.map_err(|e| ForgeError::Qcow2Build {
step: "copy_rootfs".to_string(),
detail: e.to_string(),
})?;
let rel = entry.path().strip_prefix(src).unwrap_or(entry.path());
let target = dest.join(rel);
if entry.path().is_dir() {
std::fs::create_dir_all(&target)?;
} else if entry.path().is_file() {
if let Some(parent) = target.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(entry.path(), &target)?;
}
}
Ok(())
}

View file

@ -0,0 +1,150 @@
use std::path::Path;
use spec_parser::schema::Target;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Build a QCOW2 VM image from the staged rootfs.
///
/// Pipeline:
/// 1. Create raw disk image of specified size
/// 2. Attach loopback device
/// 3. Create ZFS pool with spec properties
/// 4. Create boot environment structure (rpool/ROOT/be-1)
/// 5. Copy staging rootfs into mounted BE
/// 6. Install bootloader via chroot
/// 7. Set bootfs property
/// 8. Export pool, detach loopback
/// 9. Convert raw -> qcow2
pub async fn build_qcow2_zfs(
target: &Target,
staging_root: &Path,
output_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
let disk_size = target
.disk_size
.as_deref()
.ok_or(ForgeError::MissingDiskSize)?;
let bootloader_type = target.bootloader.as_deref().unwrap_or("uefi");
let raw_path = output_dir.join(format!("{}.raw", target.name));
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
let raw_str = raw_path.to_str().unwrap();
let qcow2_str = qcow2_path.to_str().unwrap();
// Collect pool properties
let pool_props: Vec<(&str, &str)> = target
.pool
.as_ref()
.map(|p| {
p.properties
.iter()
.map(|prop| (prop.name.as_str(), prop.value.as_str()))
.collect()
})
.unwrap_or_default();
let pool_name = "rpool";
let be_dataset = format!("{pool_name}/ROOT/be-1");
info!(disk_size, "Step 1: Creating raw disk image");
crate::tools::qemu_img::create_raw(runner, raw_str, disk_size).await?;
info!("Step 2: Attaching loopback device");
let device = crate::tools::loopback::attach(runner, raw_str).await?;
// Wrap the rest in a closure-like structure so we can clean up on error
let result = async {
info!(device = %device, "Step 3: Creating ZFS pool");
crate::tools::zpool::create(runner, pool_name, &device, &pool_props).await?;
info!("Step 4: Creating boot environment structure");
crate::tools::zfs::create(
runner,
&format!("{pool_name}/ROOT"),
&[("canmount", "off"), ("mountpoint", "legacy")],
)
.await?;
let staging_str = staging_root.to_str().unwrap_or(".");
crate::tools::zfs::create(
runner,
&be_dataset,
&[("canmount", "noauto"), ("mountpoint", staging_str)],
)
.await?;
crate::tools::zfs::mount(runner, &be_dataset).await?;
info!("Step 5: Copying staging rootfs into boot environment");
copy_rootfs(staging_root, staging_root)?;
info!("Step 6: Installing bootloader");
crate::tools::bootloader::install(runner, staging_str, pool_name, bootloader_type).await?;
info!("Step 7: Setting bootfs property");
crate::tools::zpool::set(runner, pool_name, "bootfs", &be_dataset).await?;
info!("Step 8: Exporting ZFS pool");
crate::tools::zfs::unmount(runner, &be_dataset).await?;
crate::tools::zpool::export(runner, pool_name).await?;
Ok::<(), ForgeError>(())
}
.await;
// Always try to detach loopback, even on error
info!("Detaching loopback device");
let detach_result = crate::tools::loopback::detach(runner, &device).await;
// Return the original error if there was one
result?;
detach_result?;
info!("Step 9: Converting raw -> qcow2");
crate::tools::qemu_img::convert_to_qcow2(runner, raw_str, qcow2_str).await?;
// Clean up raw file
std::fs::remove_file(&raw_path).ok();
info!(path = %qcow2_path.display(), "QCOW2 image created");
Ok(())
}
/// Copy the staging rootfs into the mounted BE.
/// Since the BE is mounted at the staging root mountpoint, we use a recursive
/// copy approach for files that need relocation.
fn copy_rootfs(src: &Path, dest: &Path) -> Result<(), ForgeError> {
// In the actual build, the ZFS dataset is mounted at the staging_root path,
// so the files are already in place after package installation. This function
// handles the case where we need to copy from a temp staging dir into the
// mounted ZFS dataset.
if src == dest {
return Ok(());
}
for entry in walkdir::WalkDir::new(src).follow_links(false) {
let entry = entry.map_err(|e| ForgeError::Qcow2Build {
step: "copy_rootfs".to_string(),
detail: e.to_string(),
})?;
let rel = entry.path().strip_prefix(src).unwrap_or(entry.path());
let target = dest.join(rel);
if entry.path().is_dir() {
std::fs::create_dir_all(&target)?;
} else if entry.path().is_file() {
if let Some(parent) = target.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(entry.path(), &target)?;
}
}
Ok(())
}

View file

@ -0,0 +1,160 @@
use std::path::Path;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Bootstrap a minimal Debian/Ubuntu rootfs using debootstrap.
pub async fn debootstrap(
runner: &dyn ToolRunner,
suite: &str,
root: &str,
mirror: &str,
) -> Result<(), ForgeError> {
info!(suite, root, mirror, "Running debootstrap");
runner
.run(
"debootstrap",
&["--arch", "amd64", suite, root, mirror],
)
.await?;
Ok(())
}
/// Run `apt-get update` inside the chroot.
pub async fn update(runner: &dyn ToolRunner, root: &str) -> Result<(), ForgeError> {
info!(root, "Running apt-get update in chroot");
runner
.run("chroot", &[root, "apt-get", "update", "-y"])
.await?;
Ok(())
}
/// Install packages inside the chroot using apt-get.
pub async fn install(
runner: &dyn ToolRunner,
root: &str,
packages: &[String],
) -> Result<(), ForgeError> {
if packages.is_empty() {
return Ok(());
}
info!(root, count = packages.len(), "Installing packages via apt-get");
let mut args = vec![root, "apt-get", "install", "-y", "--no-install-recommends"];
let pkg_refs: Vec<&str> = packages.iter().map(|s| s.as_str()).collect();
args.extend(pkg_refs);
runner.run("chroot", &args).await?;
Ok(())
}
/// Add an APT source entry to the chroot's sources.list.d/.
pub async fn add_source(
runner: &dyn ToolRunner,
root: &str,
entry: &str,
) -> Result<(), ForgeError> {
info!(root, entry, "Adding APT source");
let list_path = Path::new(root)
.join("etc/apt/sources.list.d/extra.list");
let list_str = list_path.to_str().unwrap_or("extra.list");
// Append entry to the sources list file
runner
.run("sh", &["-c", &format!("echo '{entry}' >> {list_str}")])
.await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tools::{ToolOutput, ToolRunner};
use std::future::Future;
use std::pin::Pin;
use std::sync::Mutex;
struct MockToolRunner {
calls: Mutex<Vec<(String, Vec<String>)>>,
}
impl MockToolRunner {
fn new() -> Self {
Self {
calls: Mutex::new(Vec::new()),
}
}
fn calls(&self) -> Vec<(String, Vec<String>)> {
self.calls.lock().unwrap().clone()
}
}
impl ToolRunner for MockToolRunner {
fn run<'a>(
&'a self,
program: &'a str,
args: &'a [&'a str],
) -> Pin<Box<dyn Future<Output = Result<ToolOutput, ForgeError>> + Send + 'a>> {
self.calls.lock().unwrap().push((
program.to_string(),
args.iter().map(|s| s.to_string()).collect(),
));
Box::pin(async {
Ok(ToolOutput {
stdout: String::new(),
stderr: String::new(),
exit_code: 0,
})
})
}
}
#[tokio::test]
async fn test_debootstrap_args() {
let runner = MockToolRunner::new();
debootstrap(&runner, "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu")
.await
.unwrap();
let calls = runner.calls();
assert_eq!(calls.len(), 1);
assert_eq!(calls[0].0, "debootstrap");
assert_eq!(
calls[0].1,
vec!["--arch", "amd64", "jammy", "/tmp/root", "http://archive.ubuntu.com/ubuntu"]
);
}
#[tokio::test]
async fn test_install_args() {
let runner = MockToolRunner::new();
let packages = vec!["curl".to_string(), "git".to_string()];
install(&runner, "/tmp/root", &packages).await.unwrap();
let calls = runner.calls();
assert_eq!(calls.len(), 1);
assert_eq!(calls[0].0, "chroot");
assert_eq!(
calls[0].1,
vec!["/tmp/root", "apt-get", "install", "-y", "--no-install-recommends", "curl", "git"]
);
}
#[tokio::test]
async fn test_install_empty() {
let runner = MockToolRunner::new();
install(&runner, "/tmp/root", &[]).await.unwrap();
assert!(runner.calls().is_empty());
}
#[tokio::test]
async fn test_update_args() {
let runner = MockToolRunner::new();
update(&runner, "/tmp/root").await.unwrap();
let calls = runner.calls();
assert_eq!(calls.len(), 1);
assert_eq!(calls[0].0, "chroot");
assert_eq!(calls[0].1, vec!["/tmp/root", "apt-get", "update", "-y"]);
}
}

View file

@ -36,6 +36,25 @@ pub async fn detach(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeEr
Ok(())
}
/// Re-read the partition table of a device.
#[cfg(target_os = "linux")]
pub async fn partprobe(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
info!(device, "Re-reading partition table (partprobe)");
runner.run("partprobe", &[device]).await?;
Ok(())
}
#[cfg(target_os = "illumos")]
pub async fn partprobe(_runner: &dyn ToolRunner, _device: &str) -> Result<(), ForgeError> {
// illumos doesn't need partprobe for lofi devices
Ok(())
}
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
pub async fn partprobe(_runner: &dyn ToolRunner, _device: &str) -> Result<(), ForgeError> {
Ok(())
}
// Stub for unsupported platforms (compile-time guard)
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
pub async fn attach(_runner: &dyn ToolRunner, file_path: &str) -> Result<String, ForgeError> {

View file

@ -1,6 +1,8 @@
pub mod apt;
pub mod bootloader;
pub mod devfsadm;
pub mod loopback;
pub mod partition;
pub mod pkg;
pub mod qemu_img;
pub mod zfs;

View file

@ -0,0 +1,151 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Create a GPT partition table with an EFI system partition and a root partition.
///
/// Returns the partition device paths as (efi_part, root_part).
/// Assumes the device is a loopback device like `/dev/loopN`.
pub async fn create_gpt_efi_root(
runner: &dyn ToolRunner,
device: &str,
) -> Result<(String, String), ForgeError> {
info!(device, "Creating GPT partition table with EFI + root");
// Zap any existing partition table
runner.run("sgdisk", &["--zap-all", device]).await?;
// Create EFI partition (512M, type EF00) and root partition (remainder, type 8300)
runner
.run(
"sgdisk",
&[
"-n", "1:0:+512M",
"-t", "1:EF00",
"-n", "2:0:0",
"-t", "2:8300",
device,
],
)
.await?;
let efi_part = format!("{device}p1");
let root_part = format!("{device}p2");
Ok((efi_part, root_part))
}
/// Format a partition as FAT32.
pub async fn mkfs_fat32(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
info!(device, "Formatting as FAT32");
runner.run("mkfs.fat", &["-F", "32", device]).await?;
Ok(())
}
/// Format a partition as ext4.
pub async fn mkfs_ext4(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
info!(device, "Formatting as ext4");
runner.run("mkfs.ext4", &["-F", device]).await?;
Ok(())
}
/// Mount a device at the given mountpoint.
pub async fn mount(
runner: &dyn ToolRunner,
device: &str,
mountpoint: &str,
) -> Result<(), ForgeError> {
info!(device, mountpoint, "Mounting");
runner.run("mount", &[device, mountpoint]).await?;
Ok(())
}
/// Unmount a mountpoint.
pub async fn umount(runner: &dyn ToolRunner, mountpoint: &str) -> Result<(), ForgeError> {
info!(mountpoint, "Unmounting");
runner.run("umount", &[mountpoint]).await?;
Ok(())
}
/// Bind-mount a source path into the target.
pub async fn bind_mount(
runner: &dyn ToolRunner,
source: &str,
target: &str,
) -> Result<(), ForgeError> {
info!(source, target, "Bind-mounting");
runner.run("mount", &["--bind", source, target]).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tools::{ToolOutput, ToolRunner};
use std::future::Future;
use std::pin::Pin;
use std::sync::Mutex;
struct MockToolRunner {
calls: Mutex<Vec<(String, Vec<String>)>>,
}
impl MockToolRunner {
fn new() -> Self {
Self {
calls: Mutex::new(Vec::new()),
}
}
fn calls(&self) -> Vec<(String, Vec<String>)> {
self.calls.lock().unwrap().clone()
}
}
impl ToolRunner for MockToolRunner {
fn run<'a>(
&'a self,
program: &'a str,
args: &'a [&'a str],
) -> Pin<Box<dyn Future<Output = Result<ToolOutput, ForgeError>> + Send + 'a>> {
self.calls.lock().unwrap().push((
program.to_string(),
args.iter().map(|s| s.to_string()).collect(),
));
Box::pin(async {
Ok(ToolOutput {
stdout: String::new(),
stderr: String::new(),
exit_code: 0,
})
})
}
}
#[tokio::test]
async fn test_create_gpt_efi_root_args() {
let runner = MockToolRunner::new();
let (efi, root) = create_gpt_efi_root(&runner, "/dev/loop0").await.unwrap();
assert_eq!(efi, "/dev/loop0p1");
assert_eq!(root, "/dev/loop0p2");
let calls = runner.calls();
assert_eq!(calls.len(), 2);
assert_eq!(calls[0].0, "sgdisk");
assert_eq!(calls[0].1, vec!["--zap-all", "/dev/loop0"]);
assert_eq!(calls[1].0, "sgdisk");
assert!(calls[1].1.contains(&"-n".to_string()));
assert!(calls[1].1.contains(&"1:0:+512M".to_string()));
}
#[tokio::test]
async fn test_mkfs_ext4() {
let runner = MockToolRunner::new();
mkfs_ext4(&runner, "/dev/loop0p2").await.unwrap();
let calls = runner.calls();
assert_eq!(calls.len(), 1);
assert_eq!(calls[0].0, "mkfs.ext4");
assert_eq!(calls[0].1, vec!["-F", "/dev/loop0p2"]);
}
}

View file

@ -0,0 +1,236 @@
use miette::Diagnostic;
use oci_client::client::{ClientConfig, ClientProtocol, Config, ImageLayer};
use oci_client::{Client, Reference};
use thiserror::Error;
use tracing::info;
use crate::registry::AuthConfig;
pub const QCOW2_CONFIG_MEDIA_TYPE: &str = "application/vnd.cloudnebula.qcow2.config.v1+json";
pub const QCOW2_LAYER_MEDIA_TYPE: &str = "application/vnd.cloudnebula.qcow2.layer.v1";
#[derive(Debug, Error, Diagnostic)]
pub enum ArtifactError {
#[error("Invalid OCI reference: {reference}")]
#[diagnostic(help(
"Use the format <registry>/<repository>:<tag>, e.g. ghcr.io/org/image:v1"
))]
InvalidReference {
reference: String,
#[source]
source: oci_client::ParseError,
},
#[error("Failed to push QCOW2 artifact to registry: {detail}")]
#[diagnostic(help(
"Check registry URL, credentials, and network connectivity. For ghcr.io, ensure GITHUB_TOKEN is set."
))]
PushFailed { detail: String },
#[error("Failed to pull QCOW2 artifact from registry: {detail}")]
#[diagnostic(help(
"Check registry URL, credentials, and network connectivity. For ghcr.io, ensure GITHUB_TOKEN is set."
))]
PullFailed { detail: String },
}
/// Metadata for a QCOW2 artifact pushed as an OCI artifact.
#[derive(Debug, Clone, serde::Serialize)]
pub struct Qcow2Metadata {
pub name: String,
pub version: String,
pub architecture: String,
pub os: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
/// Push a QCOW2 file as an OCI artifact to a registry.
///
/// Uses ORAS-compatible custom media types so the artifact can be pulled
/// with `oras pull` or our own `pull_qcow2_artifact`.
pub async fn push_qcow2_artifact(
reference_str: &str,
qcow2_data: Vec<u8>,
metadata: &Qcow2Metadata,
auth: &AuthConfig,
insecure_registries: &[String],
) -> Result<String, ArtifactError> {
let reference: Reference = reference_str
.parse()
.map_err(|e| ArtifactError::InvalidReference {
reference: reference_str.to_string(),
source: e,
})?;
let client_config = ClientConfig {
protocol: if insecure_registries.is_empty() {
ClientProtocol::Https
} else {
ClientProtocol::HttpsExcept(insecure_registries.to_vec())
},
..Default::default()
};
let client = Client::new(client_config);
let registry_auth = auth.to_registry_auth();
// Config blob is the JSON metadata
let config_json =
serde_json::to_vec(metadata).map_err(|e| ArtifactError::PushFailed {
detail: format!("failed to serialize metadata: {e}"),
})?;
// Build the OCI layer with annotations
let mut annotations = std::collections::BTreeMap::new();
annotations.insert(
"org.opencontainers.image.title".to_string(),
format!("{}.qcow2", metadata.name),
);
let layer = ImageLayer::new(qcow2_data, QCOW2_LAYER_MEDIA_TYPE.to_string(), Some(annotations));
let config = Config::new(config_json, QCOW2_CONFIG_MEDIA_TYPE.to_string(), None);
let image_manifest =
oci_client::manifest::OciImageManifest::build(&[layer.clone()], &config, None);
info!(
reference = %reference,
name = %metadata.name,
"Pushing QCOW2 artifact to registry"
);
let response = client
.push(
&reference,
&[layer],
config,
&registry_auth,
Some(image_manifest),
)
.await
.map_err(|e| ArtifactError::PushFailed {
detail: e.to_string(),
})?;
info!(
manifest_url = %response.manifest_url,
"QCOW2 artifact pushed successfully"
);
Ok(response.manifest_url)
}
/// Pull a QCOW2 file from an OCI artifact registry.
pub async fn pull_qcow2_artifact(
reference_str: &str,
auth: &AuthConfig,
insecure_registries: &[String],
) -> Result<Vec<u8>, ArtifactError> {
let reference: Reference = reference_str
.parse()
.map_err(|e| ArtifactError::InvalidReference {
reference: reference_str.to_string(),
source: e,
})?;
let client_config = ClientConfig {
protocol: if insecure_registries.is_empty() {
ClientProtocol::Https
} else {
ClientProtocol::HttpsExcept(insecure_registries.to_vec())
},
..Default::default()
};
let client = Client::new(client_config);
let registry_auth = auth.to_registry_auth();
info!(reference = %reference, "Pulling QCOW2 artifact from registry");
let image_data = client
.pull(
&reference,
&registry_auth,
vec![QCOW2_LAYER_MEDIA_TYPE, "application/octet-stream"],
)
.await
.map_err(|e| ArtifactError::PullFailed {
detail: e.to_string(),
})?;
let layer = image_data
.layers
.into_iter()
.next()
.ok_or_else(|| ArtifactError::PullFailed {
detail: "artifact contains no layers".to_string(),
})?;
info!(
reference = %reference,
size_bytes = layer.data.len(),
"QCOW2 artifact pulled successfully"
);
Ok(layer.data)
}
/// Resolve authentication for ghcr.io from GITHUB_TOKEN environment variable.
pub fn resolve_ghcr_auth() -> AuthConfig {
match std::env::var("GITHUB_TOKEN") {
Ok(token) => AuthConfig::Basic {
username: "_token".to_string(),
password: token,
},
Err(_) => AuthConfig::Anonymous,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_qcow2_metadata_serialization() {
let metadata = Qcow2Metadata {
name: "ubuntu-rust-ci".to_string(),
version: "0.1.0".to_string(),
architecture: "amd64".to_string(),
os: "linux".to_string(),
description: Some("Ubuntu CI image with Rust".to_string()),
};
let json = serde_json::to_string(&metadata).unwrap();
assert!(json.contains("ubuntu-rust-ci"));
assert!(json.contains("amd64"));
assert!(json.contains("Ubuntu CI image with Rust"));
}
#[test]
fn test_qcow2_metadata_without_description() {
let metadata = Qcow2Metadata {
name: "test".to_string(),
version: "1.0".to_string(),
architecture: "amd64".to_string(),
os: "linux".to_string(),
description: None,
};
let json = serde_json::to_string(&metadata).unwrap();
assert!(!json.contains("description"));
}
#[test]
fn test_media_type_constants() {
assert_eq!(
QCOW2_CONFIG_MEDIA_TYPE,
"application/vnd.cloudnebula.qcow2.config.v1+json"
);
assert_eq!(
QCOW2_LAYER_MEDIA_TYPE,
"application/vnd.cloudnebula.qcow2.layer.v1"
);
}
}

View file

@ -1,6 +1,7 @@
// thiserror/miette derive macros generate code that triggers false-positive unused_assignments
#![allow(unused_assignments)]
pub mod artifact;
pub mod layout;
pub mod manifest;
pub mod registry;

View file

@ -41,7 +41,7 @@ pub enum AuthConfig {
}
impl AuthConfig {
fn to_registry_auth(&self) -> RegistryAuth {
pub fn to_registry_auth(&self) -> RegistryAuth {
match self {
AuthConfig::Anonymous => RegistryAuth::Anonymous,
AuthConfig::Basic { username, password } => {

View file

@ -28,10 +28,21 @@ pub fn run(spec_path: &PathBuf, profiles: &[String]) -> miette::Result<()> {
println!("Description: {desc}");
}
if let Some(ref distro) = filtered.distro {
println!("Distro: {distro}");
}
println!("\nRepositories:");
for pub_entry in &filtered.repositories.publishers {
println!(" {} -> {}", pub_entry.name, pub_entry.origin);
}
for mirror in &filtered.repositories.apt_mirrors {
print!(" apt-mirror: {} suite={}", mirror.url, mirror.suite);
if let Some(ref components) = mirror.components {
print!(" components={components}");
}
println!();
}
if let Some(ref inc) = filtered.incorporation {
println!("\nIncorporation: {inc}");
@ -124,6 +135,12 @@ pub fn run(spec_path: &PathBuf, profiles: &[String]) -> miette::Result<()> {
if let Some(ref bl) = target.bootloader {
print!(" bootloader={bl}");
}
if let Some(ref fs) = target.filesystem {
print!(" filesystem={fs}");
}
if let Some(ref push) = target.push_to {
print!(" push-to={push}");
}
println!();
}
}

View file

@ -3,11 +3,73 @@ use std::path::PathBuf;
use miette::{Context, IntoDiagnostic};
use tracing::info;
/// Push an OCI Image Layout to a registry.
/// Push an OCI Image Layout or QCOW2 artifact to a registry.
pub async fn run(
image_dir: &PathBuf,
reference: &str,
auth_file: Option<&PathBuf>,
artifact: bool,
) -> miette::Result<()> {
let auth = resolve_auth(auth_file)?;
// Determine if we need insecure registries (localhost)
let insecure = if reference.starts_with("localhost") || reference.starts_with("127.0.0.1") {
let host_port = reference.split('/').next().unwrap_or("");
vec![host_port.to_string()]
} else {
vec![]
};
if artifact {
return push_artifact(image_dir, reference, &auth, &insecure).await;
}
push_oci_layout(image_dir, reference, &auth, &insecure).await
}
/// Push a QCOW2 file directly as an OCI artifact.
async fn push_artifact(
qcow2_path: &PathBuf,
reference: &str,
auth: &forge_oci::registry::AuthConfig,
insecure: &[String],
) -> miette::Result<()> {
info!(reference, path = %qcow2_path.display(), "Pushing QCOW2 artifact");
let qcow2_data = std::fs::read(qcow2_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read QCOW2 file: {}", qcow2_path.display()))?;
let name = qcow2_path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("image")
.to_string();
let metadata = forge_oci::artifact::Qcow2Metadata {
name,
version: "latest".to_string(),
architecture: "amd64".to_string(),
os: "linux".to_string(),
description: None,
};
let manifest_url =
forge_oci::artifact::push_qcow2_artifact(reference, qcow2_data, &metadata, auth, insecure)
.await
.map_err(miette::Report::new)
.wrap_err("Artifact push failed")?;
println!("Pushed artifact: {manifest_url}");
Ok(())
}
/// Push an OCI Image Layout to a registry.
async fn push_oci_layout(
image_dir: &PathBuf,
reference: &str,
auth: &forge_oci::registry::AuthConfig,
insecure: &[String],
) -> miette::Result<()> {
// Read the OCI Image Layout index.json
let index_path = image_dir.join("index.json");
@ -73,12 +135,25 @@ pub async fn run(
layers.push(forge_oci::tar_layer::LayerBlob {
data: layer_data,
digest: layer_digest.to_string(),
uncompressed_size: 0, // Not tracked in layout
uncompressed_size: 0,
});
}
// Determine auth
let auth = if let Some(auth_path) = auth_file {
info!(reference, "Pushing OCI image to registry");
let manifest_url =
forge_oci::registry::push_image(reference, layers, config_json, auth, insecure)
.await
.map_err(miette::Report::new)
.wrap_err("Push failed")?;
println!("Pushed: {manifest_url}");
Ok(())
}
/// Resolve authentication from an auth file or environment.
fn resolve_auth(auth_file: Option<&PathBuf>) -> miette::Result<forge_oci::registry::AuthConfig> {
if let Some(auth_path) = auth_file {
let auth_content = std::fs::read_to_string(auth_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read auth file: {}", auth_path.display()))?;
@ -87,40 +162,22 @@ pub async fn run(
serde_json::from_str(&auth_content).into_diagnostic()?;
if let Some(token) = auth_json["token"].as_str() {
forge_oci::registry::AuthConfig::Bearer {
Ok(forge_oci::registry::AuthConfig::Bearer {
token: token.to_string(),
}
})
} else if let (Some(user), Some(pass)) = (
auth_json["username"].as_str(),
auth_json["password"].as_str(),
) {
forge_oci::registry::AuthConfig::Basic {
Ok(forge_oci::registry::AuthConfig::Basic {
username: user.to_string(),
password: pass.to_string(),
}
})
} else {
forge_oci::registry::AuthConfig::Anonymous
Ok(forge_oci::registry::AuthConfig::Anonymous)
}
} else {
forge_oci::registry::AuthConfig::Anonymous
};
// Determine if we need insecure registries (localhost)
let insecure = if reference.starts_with("localhost") || reference.starts_with("127.0.0.1") {
let host_port = reference.split('/').next().unwrap_or("");
vec![host_port.to_string()]
} else {
vec![]
};
info!(reference, "Pushing OCI image to registry");
let manifest_url =
forge_oci::registry::push_image(reference, layers, config_json, &auth, &insecure)
.await
.map_err(miette::Report::new)
.wrap_err("Push failed")?;
println!("Pushed: {manifest_url}");
Ok(())
// Try GITHUB_TOKEN for ghcr.io
Ok(forge_oci::artifact::resolve_ghcr_auth())
}
}

View file

@ -56,9 +56,9 @@ enum Commands {
profile: Vec<String>,
},
/// Push an OCI Image Layout to a registry
/// Push an OCI Image Layout or QCOW2 artifact to a registry
Push {
/// Path to the OCI Image Layout directory
/// Path to the OCI Image Layout directory (or QCOW2 file with --artifact)
#[arg(short, long)]
image: PathBuf,
@ -69,6 +69,10 @@ enum Commands {
/// Path to auth file (JSON with username/password or token)
#[arg(short, long)]
auth_file: Option<PathBuf>,
/// Push as a QCOW2 OCI artifact instead of an OCI Image Layout
#[arg(long)]
artifact: bool,
},
/// List available targets from a spec file
@ -108,8 +112,9 @@ async fn main() -> Result<()> {
image,
reference,
auth_file,
artifact,
} => {
commands::push::run(&image, &reference, auth_file.as_ref()).await?;
commands::push::run(&image, &reference, auth_file.as_ref(), artifact).await?;
}
Commands::Targets { spec } => {
commands::targets::run(&spec)?;

View file

@ -129,6 +129,95 @@ mod tests {
assert_eq!(certs.ca[0].publisher, "omnios");
}
#[test]
fn test_parse_ubuntu_spec() {
let kdl = r#"
metadata name="ubuntu-ci" version="0.1.0"
distro "ubuntu-22.04"
repositories {
apt-mirror "http://archive.ubuntu.com/ubuntu" suite="jammy" components="main universe"
}
packages {
package "build-essential"
package "curl"
}
target "qcow2" kind="qcow2" {
disk-size "8G"
bootloader "grub"
filesystem "ext4"
push-to "ghcr.io/cloudnebulaproject/ubuntu-rust:latest"
}
"#;
let spec = parse(kdl).expect("Failed to parse Ubuntu spec");
assert_eq!(spec.distro, Some("ubuntu-22.04".to_string()));
assert_eq!(spec.repositories.apt_mirrors.len(), 1);
let mirror = &spec.repositories.apt_mirrors[0];
assert_eq!(mirror.url, "http://archive.ubuntu.com/ubuntu");
assert_eq!(mirror.suite, "jammy");
assert_eq!(mirror.components, Some("main universe".to_string()));
let target = &spec.targets[0];
assert_eq!(target.filesystem, Some("ext4".to_string()));
assert_eq!(
target.push_to,
Some("ghcr.io/cloudnebulaproject/ubuntu-rust:latest".to_string())
);
}
#[test]
fn test_parse_omnios_spec_unchanged() {
// Existing OmniOS specs should parse without errors (backward compat)
let kdl = r#"
metadata name="omnios-disk" version="0.0.1"
repositories {
publisher name="omnios" origin="https://pkg.omnios.org/bloody/core/"
}
packages {
package "system/kernel"
}
target "vm" kind="qcow2" {
disk-size "2000M"
bootloader "uefi"
pool {
property name="ashift" value="12"
}
}
"#;
let spec = parse(kdl).expect("Failed to parse OmniOS spec");
assert_eq!(spec.distro, None);
assert!(spec.repositories.apt_mirrors.is_empty());
assert_eq!(spec.targets[0].filesystem, None);
assert_eq!(spec.targets[0].push_to, None);
// DistroFamily should default to OmniOS
assert_eq!(
schema::DistroFamily::from_distro_str(spec.distro.as_deref()),
schema::DistroFamily::OmniOS
);
}
#[test]
fn test_distro_family_detection() {
assert_eq!(
schema::DistroFamily::from_distro_str(None),
schema::DistroFamily::OmniOS
);
assert_eq!(
schema::DistroFamily::from_distro_str(Some("omnios")),
schema::DistroFamily::OmniOS
);
assert_eq!(
schema::DistroFamily::from_distro_str(Some("ubuntu-22.04")),
schema::DistroFamily::Ubuntu
);
assert_eq!(
schema::DistroFamily::from_distro_str(Some("ubuntu-24.04")),
schema::DistroFamily::Ubuntu
);
}
#[test]
fn test_parse_pool_properties() {
let kdl = r#"

View file

@ -134,6 +134,11 @@ fn merge_base(mut base: ImageSpec, child: ImageSpec) -> ImageSpec {
// Metadata comes from the child
base.metadata = child.metadata;
// distro: child overrides
if child.distro.is_some() {
base.distro = child.distro;
}
// build_host: child overrides
if child.build_host.is_some() {
base.build_host = child.build_host;
@ -151,6 +156,18 @@ fn merge_base(mut base: ImageSpec, child: ImageSpec) -> ImageSpec {
}
}
// repositories: merge apt_mirrors from child into base (dedup by URL)
for mirror in child.repositories.apt_mirrors {
if !base
.repositories
.apt_mirrors
.iter()
.any(|m| m.url == mirror.url)
{
base.repositories.apt_mirrors.push(mirror);
}
}
// incorporation: child overrides
if child.incorporation.is_some() {
base.incorporation = child.incorporation;
@ -296,6 +313,71 @@ mod tests {
assert_eq!(resolved.targets.len(), 1);
}
#[test]
fn test_merge_base_with_apt_mirrors() {
let tmp = TempDir::new().unwrap();
let base_kdl = r#"
metadata name="base" version="0.0.1"
distro "ubuntu-22.04"
repositories {
apt-mirror "http://archive.ubuntu.com/ubuntu" suite="jammy" components="main universe"
}
packages {
package "base-pkg"
}
"#;
fs::write(tmp.path().join("base.kdl"), base_kdl).unwrap();
let child_kdl = r#"
metadata name="child" version="1.0.0"
base "base.kdl"
repositories {
apt-mirror "http://ppa.launchpad.net/extra" suite="jammy" components="main"
}
packages {
package "child-pkg"
}
"#;
let spec = crate::parse(child_kdl).unwrap();
let resolved = resolve(spec, tmp.path()).unwrap();
assert_eq!(resolved.distro, Some("ubuntu-22.04".to_string()));
assert_eq!(resolved.repositories.apt_mirrors.len(), 2);
assert_eq!(
resolved.repositories.apt_mirrors[0].url,
"http://archive.ubuntu.com/ubuntu"
);
assert_eq!(
resolved.repositories.apt_mirrors[1].url,
"http://ppa.launchpad.net/extra"
);
}
#[test]
fn test_merge_base_distro_child_overrides() {
let tmp = TempDir::new().unwrap();
let base_kdl = r#"
metadata name="base" version="0.0.1"
repositories {}
"#;
fs::write(tmp.path().join("base.kdl"), base_kdl).unwrap();
let child_kdl = r#"
metadata name="child" version="1.0.0"
base "base.kdl"
distro "ubuntu-22.04"
repositories {}
"#;
let spec = crate::parse(child_kdl).unwrap();
let resolved = resolve(spec, tmp.path()).unwrap();
assert_eq!(resolved.distro, Some("ubuntu-22.04".to_string()));
}
#[test]
fn test_circular_include_detected() {
let tmp = TempDir::new().unwrap();

View file

@ -1,10 +1,31 @@
use knuffel::Decode;
/// Distro family derived from the `distro` string in a spec.
/// Not KDL-decoded directly — computed via `from_distro_str`.
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub enum DistroFamily {
#[default]
OmniOS,
Ubuntu,
}
impl DistroFamily {
pub fn from_distro_str(s: Option<&str>) -> Self {
match s {
Some(d) if d.starts_with("ubuntu") => DistroFamily::Ubuntu,
_ => DistroFamily::OmniOS,
}
}
}
#[derive(Debug, Decode)]
pub struct ImageSpec {
#[knuffel(child)]
pub metadata: Metadata,
#[knuffel(child, unwrap(argument))]
pub distro: Option<String>,
#[knuffel(child, unwrap(argument))]
pub base: Option<String>,
@ -53,6 +74,19 @@ pub struct Metadata {
pub struct Repositories {
#[knuffel(children(name = "publisher"))]
pub publishers: Vec<Publisher>,
#[knuffel(children(name = "apt-mirror"))]
pub apt_mirrors: Vec<AptMirror>,
}
#[derive(Debug, Decode)]
pub struct AptMirror {
#[knuffel(argument)]
pub url: String,
#[knuffel(property)]
pub suite: String,
#[knuffel(property)]
pub components: Option<String>,
}
#[derive(Debug, Decode)]
@ -187,6 +221,12 @@ pub struct Target {
#[knuffel(child, unwrap(argument))]
pub bootloader: Option<String>,
#[knuffel(child, unwrap(argument))]
pub filesystem: Option<String>,
#[knuffel(child, unwrap(argument))]
pub push_to: Option<String>,
#[knuffel(child)]
pub entrypoint: Option<Entrypoint>,

33
images/omnios-rust-ci.kdl Normal file
View file

@ -0,0 +1,33 @@
metadata name="omnios-rust-ci" version="0.1.0" description="OmniOS bloody CI image with Rust"
distro "omnios"
base "omnios-bloody-base.kdl"
include "devfs.kdl"
include "common.kdl"
repositories {}
packages {
package "/system/management/cloud-init"
package "/driver/crypto/viorand"
package "/driver/network/vioif"
package "/driver/storage/vioblk"
package "/developer/build-essential"
package "/developer/lang/rust"
package "/developer/versioning/git"
}
overlays {
shadow username="root" password="$5$kr1VgdIt$OUiUAyZCDogH/uaxH71rMeQxvpDEY2yX.x0ZQRnmeb9"
file destination="/etc/default/init" source="default_init.utc" owner="root" group="root" mode="644"
}
target "qcow2" kind="qcow2" {
disk-size "4000M"
bootloader "uefi"
filesystem "zfs"
push-to "ghcr.io/cloudnebulaproject/omnios-rust:latest"
pool {
property name="ashift" value="12"
}
}

38
images/ubuntu-rust-ci.kdl Normal file
View file

@ -0,0 +1,38 @@
metadata name="ubuntu-rust-ci" version="0.1.0" description="Ubuntu 22.04 CI image with Rust"
distro "ubuntu-22.04"
repositories {
apt-mirror "http://archive.ubuntu.com/ubuntu" suite="jammy" components="main universe"
}
packages {
package "build-essential"
package "pkg-config"
package "curl"
package "git"
package "ca-certificates"
package "rustc"
package "cargo"
package "libssl-dev"
package "openssh-server"
package "cloud-init"
package "grub-efi-amd64"
package "linux-image-generic"
}
customization {
user "ci"
}
overlays {
shadow username="root" password="$5$kr1VgdIt$OUiUAyZCDogH/uaxH71rMeQxvpDEY2yX.x0ZQRnmeb9"
ensure-dir "/home/ci" owner="ci" group="ci" mode="755"
}
target "qcow2" kind="qcow2" {
disk-size "8G"
bootloader "grub"
filesystem "ext4"
push-to "ghcr.io/cloudnebulaproject/ubuntu-rust:latest"
}