Initial implementation of refraction-forger

Standalone workspace with 4 crates for building optimized OS images
and publishing to OCI registries:

- spec-parser: KDL image spec parsing with include resolution and
  profile-based conditional filtering
- forge-oci: OCI image creation (tar layers, manifests, Image Layout)
  and registry push via oci-client
- forge-engine: Build pipeline with Phase 1 (rootfs assembly via native
  package managers with -R) and Phase 2 (QCOW2/OCI/artifact targets),
  plus dyn-compatible ToolRunner trait for external tool execution
- forger: CLI binary with build, validate, inspect, push, and targets
  commands

Ported KDL specs and overlay files from the vm-manager prototype.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Till Wegmueller 2026-02-15 15:30:22 +01:00
commit 48f8db1236
No known key found for this signature in database
57 changed files with 6927 additions and 0 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/target

2916
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

58
Cargo.toml Normal file
View file

@ -0,0 +1,58 @@
[workspace]
resolver = "3"
members = [
"crates/spec-parser",
"crates/forge-oci",
"crates/forge-engine",
"crates/forger",
]
[workspace.package]
edition = "2024"
rust-version = "1.85"
[workspace.dependencies]
# Parsing
knuffel = "3.2"
# Error handling & diagnostics
miette = { version = "7", features = ["fancy"] }
thiserror = "2"
# CLI
clap = { version = "4.5", features = ["derive", "env"] }
# Serialization
serde = { version = "1", features = ["derive"] }
serde_json = "1"
# Async runtime
tokio = { version = "1", features = ["rt-multi-thread", "macros", "fs", "process", "time"] }
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# OCI
oci-spec = "0.8"
oci-client = "0.15"
# Crypto & encoding
sha2 = "0.10"
hex = "0.4"
# IO
bytes = "1"
tar = "0.4"
flate2 = "1"
walkdir = "2"
tempfile = "3"
# Misc
bytesize = "2"
indicatif = "0.17"
# Internal crates
spec-parser = { path = "crates/spec-parser" }
forge-oci = { path = "crates/forge-oci" }
forge-engine = { path = "crates/forge-engine" }

View file

@ -0,0 +1,22 @@
[package]
name = "forge-engine"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[dependencies]
spec-parser = { workspace = true }
forge-oci = { workspace = true }
miette = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
tempfile = { workspace = true }
walkdir = { workspace = true }
bytesize = { workspace = true }
tar = { workspace = true }
flate2 = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }

View file

@ -0,0 +1,123 @@
use miette::Diagnostic;
use thiserror::Error;
#[derive(Debug, Error, Diagnostic)]
pub enum ForgeError {
#[error("Staging directory setup failed")]
#[diagnostic(
help("Ensure sufficient disk space and write permissions for temporary directories"),
code(forge::staging_failed)
)]
StagingSetup(#[source] std::io::Error),
#[error("Base tarball extraction failed: {path}")]
#[diagnostic(
help("Verify the base tarball exists and is a valid tar.gz or tar archive"),
code(forge::base_extract_failed)
)]
BaseExtract {
path: String,
#[source]
source: std::io::Error,
},
#[error("Package manager operation failed: {operation}")]
#[diagnostic(
help("Check that the package manager is available on the build host and the repositories are reachable.\nCommand: {command}"),
code(forge::pkg_failed)
)]
PackageManager {
operation: String,
command: String,
detail: String,
},
#[error("Tool execution failed: `{tool} {args}`")]
#[diagnostic(
help("Ensure '{tool}' is installed and available in PATH.\nStderr: {stderr}"),
code(forge::tool_failed)
)]
ToolExecution {
tool: String,
args: String,
stderr: String,
#[source]
source: std::io::Error,
},
#[error("Tool returned non-zero exit code: `{tool} {args}` (exit code {exit_code})")]
#[diagnostic(
help("The command failed. Check stderr output below for details.\nStderr: {stderr}"),
code(forge::tool_exit_code)
)]
ToolNonZero {
tool: String,
args: String,
exit_code: i32,
stderr: String,
},
#[error("Overlay application failed: {action}")]
#[diagnostic(
help("Check that the source file exists and the destination path is valid.\n{detail}"),
code(forge::overlay_failed)
)]
Overlay {
action: String,
detail: String,
#[source]
source: std::io::Error,
},
#[error("Overlay source file not found: {path}")]
#[diagnostic(
help("Ensure the file exists relative to the images/files/ directory"),
code(forge::overlay_source_missing)
)]
OverlaySourceMissing { path: String },
#[error("Customization failed: {operation}")]
#[diagnostic(
help("{detail}"),
code(forge::customization_failed)
)]
Customization {
operation: String,
detail: String,
},
#[error("QCOW2 image creation failed: {step}")]
#[diagnostic(
help("{detail}"),
code(forge::qcow2_failed)
)]
Qcow2Build { step: String, detail: String },
#[error("OCI image creation failed")]
#[diagnostic(code(forge::oci_failed))]
OciBuild(String),
#[error("Disk size not specified for qcow2 target")]
#[diagnostic(
help("Add a `disk_size \"2000M\"` child node to your qcow2 target block"),
code(forge::missing_disk_size)
)]
MissingDiskSize,
#[error("Invalid disk size: {value}")]
#[diagnostic(
help("Use a value like \"2000M\" or \"20G\""),
code(forge::invalid_disk_size)
)]
InvalidDiskSize { value: String },
#[error("No target named '{name}' found in spec")]
#[diagnostic(
help("Available targets: {available}. Use `forger targets` to list them."),
code(forge::target_not_found)
)]
TargetNotFound { name: String, available: String },
#[error("IO error")]
Io(#[from] std::io::Error),
}

View file

@ -0,0 +1,89 @@
pub mod error;
pub mod phase1;
pub mod phase2;
pub mod tools;
use std::path::Path;
use error::ForgeError;
use spec_parser::schema::{ImageSpec, Target, TargetKind};
use tools::ToolRunner;
use tracing::info;
/// Context for running a build.
pub struct BuildContext<'a> {
/// The resolved and profile-filtered image spec.
pub spec: &'a ImageSpec,
/// Directory containing overlay source files (images/files/).
pub files_dir: &'a Path,
/// Output directory for build artifacts.
pub output_dir: &'a Path,
/// Tool runner for executing external commands.
pub runner: &'a dyn ToolRunner,
}
impl<'a> BuildContext<'a> {
/// Build a specific target by name, or all targets if name is None.
pub async fn build(&self, target_name: Option<&str>) -> Result<(), ForgeError> {
let targets = self.select_targets(target_name)?;
std::fs::create_dir_all(self.output_dir)?;
for target in targets {
info!(target = %target.name, kind = %target.kind, "Building target");
// Phase 1: Assemble rootfs
let phase1_result =
phase1::execute(self.spec, self.files_dir, self.runner).await?;
// Phase 2: Produce target artifact
phase2::execute(
target,
&phase1_result.staging_root,
self.files_dir,
self.output_dir,
self.runner,
)
.await?;
info!(target = %target.name, "Target built successfully");
}
Ok(())
}
fn select_targets(&self, target_name: Option<&str>) -> Result<Vec<&Target>, ForgeError> {
match target_name {
Some(name) => {
let target = self
.spec
.targets
.iter()
.find(|t| t.name == name)
.ok_or_else(|| {
let available = self
.spec
.targets
.iter()
.map(|t| t.name.as_str())
.collect::<Vec<_>>()
.join(", ");
ForgeError::TargetNotFound {
name: name.to_string(),
available,
}
})?;
Ok(vec![target])
}
None => Ok(self.spec.targets.iter().collect()),
}
}
}
/// List available targets from a spec.
pub fn list_targets(spec: &ImageSpec) -> Vec<(&str, &TargetKind)> {
spec.targets
.iter()
.map(|t| (t.name.as_str(), &t.kind))
.collect()
}

View file

@ -0,0 +1,63 @@
use std::path::Path;
use spec_parser::schema::Customization;
use tracing::info;
use crate::error::ForgeError;
/// Apply customizations (user/group creation) by editing files in the staging root.
pub fn apply(customization: &Customization, staging_root: &Path) -> Result<(), ForgeError> {
for user in &customization.users {
create_user(&user.name, staging_root)?;
}
Ok(())
}
/// Create a user by appending entries to passwd, shadow, and group files in
/// the staging root. This does not use `useradd` since we're operating on a
/// staged filesystem, not the running system.
fn create_user(username: &str, staging_root: &Path) -> Result<(), ForgeError> {
info!(username, "Creating user in staging root");
let etc_dir = staging_root.join("etc");
std::fs::create_dir_all(&etc_dir).map_err(|e| ForgeError::Customization {
operation: format!("create /etc directory for user {username}"),
detail: e.to_string(),
})?;
// Append to /etc/passwd
let passwd_path = etc_dir.join("passwd");
let passwd_entry = format!("{username}:x:1000:1000::/home/{username}:/bin/sh\n");
append_or_create(&passwd_path, &passwd_entry).map_err(|e| ForgeError::Customization {
operation: format!("add user {username} to /etc/passwd"),
detail: e.to_string(),
})?;
// Append to /etc/shadow
let shadow_path = etc_dir.join("shadow");
let shadow_entry = format!("{username}:*LK*:::::::\n");
append_or_create(&shadow_path, &shadow_entry).map_err(|e| ForgeError::Customization {
operation: format!("add user {username} to /etc/shadow"),
detail: e.to_string(),
})?;
// Append to /etc/group
let group_path = etc_dir.join("group");
let group_entry = format!("{username}::1000:\n");
append_or_create(&group_path, &group_entry).map_err(|e| ForgeError::Customization {
operation: format!("add group {username} to /etc/group"),
detail: e.to_string(),
})?;
Ok(())
}
fn append_or_create(path: &Path, content: &str) -> Result<(), std::io::Error> {
use std::io::Write;
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}

View file

@ -0,0 +1,94 @@
pub mod customizations;
pub mod overlays;
pub mod packages;
pub mod staging;
pub mod variants;
use std::path::{Path, PathBuf};
use spec_parser::schema::ImageSpec;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Result of Phase 1: a populated staging directory ready for Phase 2.
pub struct Phase1Result {
/// Path to the staging root containing the assembled rootfs.
pub staging_root: PathBuf,
/// The tempdir handle -- dropping it cleans up the staging dir.
pub _staging_dir: tempfile::TempDir,
}
/// Execute Phase 1: assemble a rootfs in a staging directory from the spec.
///
/// Steps:
/// 1. Create staging directory
/// 2. Extract base tarball (if specified)
/// 3. Apply IPS variants
/// 4. Configure package repositories and install packages
/// 5. Apply customizations (users, groups)
/// 6. Apply overlays (files, dirs, symlinks, shadow, devfsadm)
pub async fn execute(
spec: &ImageSpec,
files_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<Phase1Result, ForgeError> {
info!(name = %spec.metadata.name, "Starting Phase 1: rootfs assembly");
// 1. Create staging directory
let (staging_dir, staging_root) = staging::create_staging()?;
let root = staging_root.to_str().unwrap();
info!(root, "Staging directory created");
// 2. Extract base tarball
if let Some(ref base) = spec.base {
staging::extract_base_tarball(base, &staging_root)?;
}
// 3. Create IPS image and configure publishers
crate::tools::pkg::image_create(runner, root).await?;
for publisher in &spec.repositories.publishers {
crate::tools::pkg::set_publisher(runner, root, &publisher.name, &publisher.origin).await?;
}
// 4. Apply variants
if let Some(ref vars) = spec.variants {
variants::apply_variants(runner, root, vars).await?;
}
// 5. Approve CA certificates
if let Some(ref certs) = spec.certificates {
for ca in &certs.ca {
let certfile_path = files_dir.join(&ca.certfile);
let certfile_str = certfile_path.to_str().unwrap_or(&ca.certfile);
crate::tools::pkg::approve_ca_cert(runner, root, &ca.publisher, certfile_str).await?;
}
}
// 6. Set incorporation
if let Some(ref incorporation) = spec.incorporation {
crate::tools::pkg::set_incorporation(runner, root, incorporation).await?;
}
// 7. Install packages
packages::install_all(runner, root, &spec.packages).await?;
// 8. Apply customizations
for customization in &spec.customizations {
customizations::apply(customization, &staging_root)?;
}
// 9. Apply overlays
for overlay_block in &spec.overlays {
overlays::apply_overlays(&overlay_block.actions, &staging_root, files_dir, runner).await?;
}
info!("Phase 1 complete: rootfs assembled");
Ok(Phase1Result {
staging_root,
_staging_dir: staging_dir,
})
}

View file

@ -0,0 +1,323 @@
use std::path::Path;
use spec_parser::schema::OverlayAction;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Apply a list of overlay actions to the staging root.
pub async fn apply_overlays(
actions: &[OverlayAction],
staging_root: &Path,
files_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
for action in actions {
apply_action(action, staging_root, files_dir, runner).await?;
}
Ok(())
}
async fn apply_action(
action: &OverlayAction,
staging_root: &Path,
files_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
match action {
OverlayAction::File(file_overlay) => {
let dest = staging_root.join(
file_overlay
.destination
.strip_prefix('/')
.unwrap_or(&file_overlay.destination),
);
// Ensure parent directory exists
if let Some(parent) = dest.parent() {
std::fs::create_dir_all(parent).map_err(|e| ForgeError::Overlay {
action: format!("create parent dir for {}", file_overlay.destination),
detail: parent.display().to_string(),
source: e,
})?;
}
if let Some(ref source) = file_overlay.source {
// Copy source file to destination
let src_path = files_dir.join(source);
if !src_path.exists() {
return Err(ForgeError::OverlaySourceMissing {
path: src_path.display().to_string(),
});
}
info!(
source = %src_path.display(),
destination = %dest.display(),
"Copying overlay file"
);
std::fs::copy(&src_path, &dest).map_err(|e| ForgeError::Overlay {
action: format!("copy {} -> {}", source, file_overlay.destination),
detail: String::new(),
source: e,
})?;
} else {
// Create empty file
info!(destination = %dest.display(), "Creating empty overlay file");
std::fs::write(&dest, b"").map_err(|e| ForgeError::Overlay {
action: format!("create empty file {}", file_overlay.destination),
detail: String::new(),
source: e,
})?;
}
// Set permissions if specified
#[cfg(unix)]
if let Some(ref mode) = file_overlay.mode {
use std::os::unix::fs::PermissionsExt;
if let Ok(mode_val) = u32::from_str_radix(mode, 8) {
std::fs::set_permissions(&dest, std::fs::Permissions::from_mode(mode_val))
.map_err(|e| ForgeError::Overlay {
action: format!("set mode {} on {}", mode, file_overlay.destination),
detail: String::new(),
source: e,
})?;
}
}
}
OverlayAction::Devfsadm(_) => {
let root_str = staging_root.to_str().unwrap_or(".");
crate::tools::devfsadm::run_devfsadm(runner, root_str).await?;
}
OverlayAction::EnsureDir(ensure_dir) => {
let dir_path = staging_root.join(
ensure_dir
.path
.strip_prefix('/')
.unwrap_or(&ensure_dir.path),
);
info!(path = %dir_path.display(), "Ensuring directory exists");
std::fs::create_dir_all(&dir_path).map_err(|e| ForgeError::Overlay {
action: format!("ensure directory {}", ensure_dir.path),
detail: String::new(),
source: e,
})?;
#[cfg(unix)]
if let Some(ref mode) = ensure_dir.mode {
use std::os::unix::fs::PermissionsExt;
if let Ok(mode_val) = u32::from_str_radix(mode, 8) {
std::fs::set_permissions(
&dir_path,
std::fs::Permissions::from_mode(mode_val),
)
.map_err(|e| ForgeError::Overlay {
action: format!("set mode {} on {}", mode, ensure_dir.path),
detail: String::new(),
source: e,
})?;
}
}
}
OverlayAction::RemoveFiles(remove) => {
if let Some(ref file) = remove.file {
let path = staging_root.join(file.strip_prefix('/').unwrap_or(file));
if path.exists() {
info!(path = %path.display(), "Removing file");
std::fs::remove_file(&path).map_err(|e| ForgeError::Overlay {
action: format!("remove file {file}"),
detail: String::new(),
source: e,
})?;
}
}
if let Some(ref dir) = remove.dir {
let path = staging_root.join(dir.strip_prefix('/').unwrap_or(dir));
if path.exists() {
info!(path = %path.display(), "Removing directory contents");
// Remove directory contents but not the directory itself
for entry in std::fs::read_dir(&path).map_err(|e| ForgeError::Overlay {
action: format!("read directory {dir}"),
detail: String::new(),
source: e,
})? {
let entry = entry.map_err(|e| ForgeError::Overlay {
action: format!("read entry in {dir}"),
detail: String::new(),
source: e,
})?;
let entry_path = entry.path();
if entry_path.is_dir() {
std::fs::remove_dir_all(&entry_path).map_err(|e| {
ForgeError::Overlay {
action: format!(
"remove dir {}",
entry_path.display()
),
detail: String::new(),
source: e,
}
})?;
} else {
std::fs::remove_file(&entry_path).map_err(|e| {
ForgeError::Overlay {
action: format!(
"remove file {}",
entry_path.display()
),
detail: String::new(),
source: e,
}
})?;
}
}
}
}
if let Some(ref pattern) = remove.pattern {
info!(pattern, "Removing files matching pattern");
// Simple glob-like pattern matching: only supports trailing *
let base = staging_root.join(
pattern
.trim_end_matches('*')
.strip_prefix('/')
.unwrap_or(pattern.trim_end_matches('*')),
);
if let Some(parent) = base.parent() {
if parent.exists() {
for entry in std::fs::read_dir(parent).map_err(|e| {
ForgeError::Overlay {
action: format!("read directory for pattern {pattern}"),
detail: String::new(),
source: e,
}
})? {
let entry = entry.map_err(|e| ForgeError::Overlay {
action: format!("read entry for pattern {pattern}"),
detail: String::new(),
source: e,
})?;
let name = entry.file_name().to_string_lossy().to_string();
let base_name = base
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default();
if name.starts_with(&base_name) {
let entry_path = entry.path();
if entry_path.is_file() {
std::fs::remove_file(&entry_path).ok();
}
}
}
}
}
}
}
OverlayAction::EnsureSymlink(symlink) => {
let link_path = staging_root.join(
symlink
.path
.strip_prefix('/')
.unwrap_or(&symlink.path),
);
// Ensure parent directory exists
if let Some(parent) = link_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| ForgeError::Overlay {
action: format!("create parent dir for symlink {}", symlink.path),
detail: String::new(),
source: e,
})?;
}
// Remove existing file/symlink if present
if link_path.exists() || link_path.symlink_metadata().is_ok() {
std::fs::remove_file(&link_path).ok();
}
info!(
link = %link_path.display(),
target = %symlink.target,
"Creating symlink"
);
#[cfg(unix)]
std::os::unix::fs::symlink(&symlink.target, &link_path).map_err(|e| {
ForgeError::Overlay {
action: format!("create symlink {} -> {}", symlink.path, symlink.target),
detail: String::new(),
source: e,
}
})?;
#[cfg(not(unix))]
return Err(ForgeError::Overlay {
action: format!("create symlink {} -> {}", symlink.path, symlink.target),
detail: "Symlinks are only supported on Unix platforms".to_string(),
source: std::io::Error::new(std::io::ErrorKind::Unsupported, "not unix"),
});
}
OverlayAction::Shadow(shadow) => {
let shadow_path = staging_root.join("etc/shadow");
if shadow_path.exists() {
info!(username = %shadow.username, "Updating shadow password");
let content = std::fs::read_to_string(&shadow_path).map_err(|e| {
ForgeError::Overlay {
action: format!("read /etc/shadow for user {}", shadow.username),
detail: String::new(),
source: e,
}
})?;
let mut found = false;
let new_content: String = content
.lines()
.map(|line| {
let parts: Vec<&str> = line.splitn(9, ':').collect();
if parts.len() >= 2 && parts[0] == shadow.username {
found = true;
let mut new_parts = parts.clone();
new_parts[1] = &shadow.password;
new_parts.join(":")
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n");
let final_content = if found {
new_content
} else {
format!("{new_content}\n{}:{}:::::::\n", shadow.username, shadow.password)
};
std::fs::write(&shadow_path, final_content).map_err(|e| ForgeError::Overlay {
action: format!("write /etc/shadow for user {}", shadow.username),
detail: String::new(),
source: e,
})?;
} else {
// Create shadow file with this entry
let content = format!("{}:{}:::::::\n", shadow.username, shadow.password);
let etc_dir = staging_root.join("etc");
std::fs::create_dir_all(&etc_dir).map_err(|e| ForgeError::Overlay {
action: "create /etc for shadow".to_string(),
detail: String::new(),
source: e,
})?;
std::fs::write(&shadow_path, content).map_err(|e| ForgeError::Overlay {
action: format!("create /etc/shadow for user {}", shadow.username),
detail: String::new(),
source: e,
})?;
}
}
}
Ok(())
}

View file

@ -0,0 +1,25 @@
use spec_parser::schema::PackageList;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Install all package lists into the staging root via `pkg -R`.
pub async fn install_all(
runner: &dyn ToolRunner,
root: &str,
package_lists: &[PackageList],
) -> Result<(), ForgeError> {
let all_packages: Vec<String> = package_lists
.iter()
.flat_map(|pl| pl.packages.iter().map(|p| p.name.clone()))
.collect();
if all_packages.is_empty() {
info!("No packages to install");
return Ok(());
}
info!(count = all_packages.len(), "Installing packages");
crate::tools::pkg::install(runner, root, &all_packages).await
}

View file

@ -0,0 +1,47 @@
use std::path::PathBuf;
use tracing::info;
use crate::error::ForgeError;
/// Create a temporary staging directory for rootfs assembly.
pub fn create_staging() -> Result<(tempfile::TempDir, PathBuf), ForgeError> {
let staging_dir = tempfile::TempDir::new().map_err(ForgeError::StagingSetup)?;
let staging_root = staging_dir.path().to_path_buf();
Ok((staging_dir, staging_root))
}
/// Extract a base tarball into the staging directory.
pub fn extract_base_tarball(tarball_path: &str, staging_root: &PathBuf) -> Result<(), ForgeError> {
info!(tarball_path, "Extracting base tarball");
let file =
std::fs::File::open(tarball_path).map_err(|e| ForgeError::BaseExtract {
path: tarball_path.to_string(),
source: e,
})?;
// Try gzip first, fall back to plain tar
let reader = std::io::BufReader::new(file);
if tarball_path.ends_with(".gz") || tarball_path.ends_with(".tgz") {
let decoder = flate2::read::GzDecoder::new(reader);
let mut archive = tar::Archive::new(decoder);
archive
.unpack(staging_root)
.map_err(|e| ForgeError::BaseExtract {
path: tarball_path.to_string(),
source: e,
})?;
} else {
let mut archive = tar::Archive::new(reader);
archive
.unpack(staging_root)
.map_err(|e| ForgeError::BaseExtract {
path: tarball_path.to_string(),
source: e,
})?;
}
info!("Base tarball extracted");
Ok(())
}

View file

@ -0,0 +1,18 @@
use spec_parser::schema::Variants;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Apply IPS variants via `pkg -R change-variant`.
pub async fn apply_variants(
runner: &dyn ToolRunner,
root: &str,
variants: &Variants,
) -> Result<(), ForgeError> {
for var in &variants.vars {
info!(name = %var.name, value = %var.value, "Applying IPS variant");
crate::tools::pkg::change_variant(runner, root, &var.name, &var.value).await?;
}
Ok(())
}

View file

@ -0,0 +1,67 @@
use std::path::Path;
use flate2::write::GzEncoder;
use flate2::Compression;
use spec_parser::schema::Target;
use tracing::info;
use walkdir::WalkDir;
use crate::error::ForgeError;
/// Build a tarball artifact from the staged rootfs.
pub fn build_artifact(
target: &Target,
staging_root: &Path,
output_dir: &Path,
_files_dir: &Path,
) -> Result<(), ForgeError> {
let output_path = output_dir.join(format!("{}.tar.gz", target.name));
info!(path = %output_path.display(), "Creating artifact tarball");
let file = std::fs::File::create(&output_path)?;
let encoder = GzEncoder::new(file, Compression::default());
let mut tar = tar::Builder::new(encoder);
for entry in WalkDir::new(staging_root).follow_links(false) {
let entry = entry.map_err(|e| ForgeError::Qcow2Build {
step: "artifact_walk".to_string(),
detail: e.to_string(),
})?;
let full_path = entry.path();
let rel_path = full_path
.strip_prefix(staging_root)
.unwrap_or(full_path);
if rel_path.as_os_str().is_empty() {
continue;
}
if full_path.is_file() {
tar.append_path_with_name(full_path, rel_path)
.map_err(|e| ForgeError::Qcow2Build {
step: "artifact_tar".to_string(),
detail: e.to_string(),
})?;
} else if full_path.is_dir() {
tar.append_dir(rel_path, full_path)
.map_err(|e| ForgeError::Qcow2Build {
step: "artifact_tar_dir".to_string(),
detail: e.to_string(),
})?;
}
}
let encoder = tar.into_inner().map_err(|e| ForgeError::Qcow2Build {
step: "artifact_tar_finish".to_string(),
detail: e.to_string(),
})?;
encoder.finish().map_err(|e| ForgeError::Qcow2Build {
step: "artifact_gz_finish".to_string(),
detail: e.to_string(),
})?;
info!(path = %output_path.display(), "Artifact tarball created");
Ok(())
}

View file

@ -0,0 +1,41 @@
pub mod artifact;
pub mod oci;
pub mod qcow2;
use std::path::Path;
use spec_parser::schema::{Target, TargetKind};
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Execute Phase 2: produce the target artifact from the staged rootfs.
pub async fn execute(
target: &Target,
staging_root: &Path,
files_dir: &Path,
output_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
info!(
target = %target.name,
kind = %target.kind,
"Starting Phase 2: target production"
);
match target.kind {
TargetKind::Oci => {
oci::build_oci(target, staging_root, output_dir)?;
}
TargetKind::Qcow2 => {
qcow2::build_qcow2(target, staging_root, output_dir, runner).await?;
}
TargetKind::Artifact => {
artifact::build_artifact(target, staging_root, output_dir, files_dir)?;
}
}
info!(target = %target.name, "Phase 2 complete");
Ok(())
}

View file

@ -0,0 +1,52 @@
use std::path::Path;
use spec_parser::schema::Target;
use tracing::info;
use crate::error::ForgeError;
/// Build an OCI container image from the staged rootfs.
pub fn build_oci(
target: &Target,
staging_root: &Path,
output_dir: &Path,
) -> Result<(), ForgeError> {
info!("Building OCI container image");
// Create the tar.gz layer from staging
let layer =
forge_oci::tar_layer::create_layer(staging_root).map_err(|e| ForgeError::OciBuild(e.to_string()))?;
info!(
digest = %layer.digest,
size = layer.data.len(),
"Layer created"
);
// Build image options from target spec
let mut options = forge_oci::manifest::ImageOptions::default();
if let Some(ref ep) = target.entrypoint {
options.entrypoint = Some(vec![ep.command.clone()]);
}
if let Some(ref env) = target.environment {
options.env = env
.vars
.iter()
.map(|v| format!("{}={}", v.key, v.value))
.collect();
}
// Build manifest and config
let (config_json, manifest_json) = forge_oci::manifest::build_manifest(&[layer.clone()], &options)
.map_err(|e| ForgeError::OciBuild(e.to_string()))?;
// Write OCI Image Layout
let oci_output = output_dir.join(format!("{}-oci", target.name));
forge_oci::layout::write_oci_layout(&oci_output, &[layer], &config_json, &manifest_json)
.map_err(|e| ForgeError::OciBuild(e.to_string()))?;
info!(path = %oci_output.display(), "OCI Image Layout written");
Ok(())
}

View file

@ -0,0 +1,150 @@
use std::path::Path;
use spec_parser::schema::Target;
use tracing::info;
use crate::error::ForgeError;
use crate::tools::ToolRunner;
/// Build a QCOW2 VM image from the staged rootfs.
///
/// Pipeline:
/// 1. Create raw disk image of specified size
/// 2. Attach loopback device
/// 3. Create ZFS pool with spec properties
/// 4. Create boot environment structure (rpool/ROOT/be-1)
/// 5. Copy staging rootfs into mounted BE
/// 6. Install bootloader via chroot
/// 7. Set bootfs property
/// 8. Export pool, detach loopback
/// 9. Convert raw -> qcow2
pub async fn build_qcow2(
target: &Target,
staging_root: &Path,
output_dir: &Path,
runner: &dyn ToolRunner,
) -> Result<(), ForgeError> {
let disk_size = target
.disk_size
.as_deref()
.ok_or(ForgeError::MissingDiskSize)?;
let bootloader_type = target.bootloader.as_deref().unwrap_or("uefi");
let raw_path = output_dir.join(format!("{}.raw", target.name));
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
let raw_str = raw_path.to_str().unwrap();
let qcow2_str = qcow2_path.to_str().unwrap();
// Collect pool properties
let pool_props: Vec<(&str, &str)> = target
.pool
.as_ref()
.map(|p| {
p.properties
.iter()
.map(|prop| (prop.name.as_str(), prop.value.as_str()))
.collect()
})
.unwrap_or_default();
let pool_name = "rpool";
let be_dataset = format!("{pool_name}/ROOT/be-1");
info!(disk_size, "Step 1: Creating raw disk image");
crate::tools::qemu_img::create_raw(runner, raw_str, disk_size).await?;
info!("Step 2: Attaching loopback device");
let device = crate::tools::loopback::attach(runner, raw_str).await?;
// Wrap the rest in a closure-like structure so we can clean up on error
let result = async {
info!(device = %device, "Step 3: Creating ZFS pool");
crate::tools::zpool::create(runner, pool_name, &device, &pool_props).await?;
info!("Step 4: Creating boot environment structure");
crate::tools::zfs::create(
runner,
&format!("{pool_name}/ROOT"),
&[("canmount", "off"), ("mountpoint", "legacy")],
)
.await?;
let staging_str = staging_root.to_str().unwrap_or(".");
crate::tools::zfs::create(
runner,
&be_dataset,
&[("canmount", "noauto"), ("mountpoint", staging_str)],
)
.await?;
crate::tools::zfs::mount(runner, &be_dataset).await?;
info!("Step 5: Copying staging rootfs into boot environment");
copy_rootfs(staging_root, staging_root)?;
info!("Step 6: Installing bootloader");
crate::tools::bootloader::install(runner, staging_str, pool_name, bootloader_type).await?;
info!("Step 7: Setting bootfs property");
crate::tools::zpool::set(runner, pool_name, "bootfs", &be_dataset).await?;
info!("Step 8: Exporting ZFS pool");
crate::tools::zfs::unmount(runner, &be_dataset).await?;
crate::tools::zpool::export(runner, pool_name).await?;
Ok::<(), ForgeError>(())
}
.await;
// Always try to detach loopback, even on error
info!("Detaching loopback device");
let detach_result = crate::tools::loopback::detach(runner, &device).await;
// Return the original error if there was one
result?;
detach_result?;
info!("Step 9: Converting raw -> qcow2");
crate::tools::qemu_img::convert_to_qcow2(runner, raw_str, qcow2_str).await?;
// Clean up raw file
std::fs::remove_file(&raw_path).ok();
info!(path = %qcow2_path.display(), "QCOW2 image created");
Ok(())
}
/// Copy the staging rootfs into the mounted BE.
/// Since the BE is mounted at the staging root mountpoint, we use a recursive
/// copy approach for files that need relocation.
fn copy_rootfs(src: &Path, dest: &Path) -> Result<(), ForgeError> {
// In the actual build, the ZFS dataset is mounted at the staging_root path,
// so the files are already in place after package installation. This function
// handles the case where we need to copy from a temp staging dir into the
// mounted ZFS dataset.
if src == dest {
return Ok(());
}
for entry in walkdir::WalkDir::new(src).follow_links(false) {
let entry = entry.map_err(|e| ForgeError::Qcow2Build {
step: "copy_rootfs".to_string(),
detail: e.to_string(),
})?;
let rel = entry.path().strip_prefix(src).unwrap_or(entry.path());
let target = dest.join(rel);
if entry.path().is_dir() {
std::fs::create_dir_all(&target)?;
} else if entry.path().is_file() {
if let Some(parent) = target.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::copy(entry.path(), &target)?;
}
}
Ok(())
}

View file

@ -0,0 +1,78 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Install the bootloader into the staging root.
///
/// For illumos: uses `bootadm` to install the boot archive and UEFI loader.
/// For Linux: uses `grub-install` with chroot.
#[cfg(target_os = "illumos")]
pub async fn install(
runner: &dyn ToolRunner,
staging_root: &str,
pool_name: &str,
bootloader_type: &str,
) -> Result<(), ForgeError> {
info!(staging_root, pool_name, bootloader_type, "Installing bootloader (illumos)");
// Install the boot archive
runner
.run("bootadm", &["update-archive", "-R", staging_root])
.await?;
if bootloader_type == "uefi" {
// Install the UEFI bootloader
runner
.run(
"bootadm",
&["install-bootloader", "-M", "-f", "-P", pool_name, "-R", staging_root],
)
.await?;
}
Ok(())
}
#[cfg(target_os = "linux")]
pub async fn install(
runner: &dyn ToolRunner,
staging_root: &str,
_pool_name: &str,
bootloader_type: &str,
) -> Result<(), ForgeError> {
info!(staging_root, bootloader_type, "Installing bootloader (Linux)");
match bootloader_type {
"grub" | "uefi" => {
runner
.run(
"chroot",
&[staging_root, "grub-install", "--target=x86_64-efi"],
)
.await?;
}
other => {
return Err(ForgeError::Qcow2Build {
step: "bootloader_install".to_string(),
detail: format!("Unsupported bootloader type: {other}"),
});
}
}
Ok(())
}
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
pub async fn install(
_runner: &dyn ToolRunner,
_staging_root: &str,
_pool_name: &str,
bootloader_type: &str,
) -> Result<(), ForgeError> {
Err(ForgeError::Qcow2Build {
step: "bootloader_install".to_string(),
detail: format!(
"Bootloader installation is not supported on this platform (type: {bootloader_type})"
),
})
}

View file

@ -0,0 +1,18 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Run devfsadm in the staging root to populate /dev.
/// This is illumos-specific and is a no-op on other platforms.
#[cfg(target_os = "illumos")]
pub async fn run_devfsadm(runner: &dyn ToolRunner, root: &str) -> Result<(), ForgeError> {
info!(root, "Running devfsadm");
runner.run("devfsadm", &["-r", root]).await?;
Ok(())
}
#[cfg(not(target_os = "illumos"))]
pub async fn run_devfsadm(_runner: &dyn ToolRunner, root: &str) -> Result<(), ForgeError> {
info!(root, "Skipping devfsadm (not on illumos)");
Ok(())
}

View file

@ -0,0 +1,56 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Attach a file to a loopback device and return the device path.
#[cfg(target_os = "linux")]
pub async fn attach(runner: &dyn ToolRunner, file_path: &str) -> Result<String, ForgeError> {
info!(file_path, "Attaching loopback device (Linux)");
let output = runner
.run("losetup", &["--find", "--show", file_path])
.await?;
Ok(output.stdout.trim().to_string())
}
/// Detach a loopback device.
#[cfg(target_os = "linux")]
pub async fn detach(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
info!(device, "Detaching loopback device (Linux)");
runner.run("losetup", &["--detach", device]).await?;
Ok(())
}
/// Attach a file to a loopback device and return the device path.
#[cfg(target_os = "illumos")]
pub async fn attach(runner: &dyn ToolRunner, file_path: &str) -> Result<String, ForgeError> {
info!(file_path, "Attaching loopback device (illumos)");
let output = runner.run("lofiadm", &["-a", file_path]).await?;
Ok(output.stdout.trim().to_string())
}
/// Detach a loopback device.
#[cfg(target_os = "illumos")]
pub async fn detach(runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
info!(device, "Detaching loopback device (illumos)");
runner.run("lofiadm", &["-d", device]).await?;
Ok(())
}
// Stub for unsupported platforms (compile-time guard)
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
pub async fn attach(_runner: &dyn ToolRunner, file_path: &str) -> Result<String, ForgeError> {
Err(ForgeError::Qcow2Build {
step: "loopback_attach".to_string(),
detail: format!("Loopback devices are not supported on this platform (file: {file_path})"),
})
}
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
pub async fn detach(_runner: &dyn ToolRunner, device: &str) -> Result<(), ForgeError> {
Err(ForgeError::Qcow2Build {
step: "loopback_detach".to_string(),
detail: format!(
"Loopback devices are not supported on this platform (device: {device})"
),
})
}

View file

@ -0,0 +1,70 @@
pub mod bootloader;
pub mod devfsadm;
pub mod loopback;
pub mod pkg;
pub mod qemu_img;
pub mod zfs;
pub mod zpool;
use std::future::Future;
use std::pin::Pin;
use crate::error::ForgeError;
/// Output from a tool execution.
#[derive(Debug)]
pub struct ToolOutput {
pub stdout: String,
pub stderr: String,
pub exit_code: i32,
}
/// Trait for executing external tools. Allows mocking in tests.
pub trait ToolRunner: Send + Sync {
fn run<'a>(
&'a self,
program: &'a str,
args: &'a [&'a str],
) -> Pin<Box<dyn Future<Output = Result<ToolOutput, ForgeError>> + Send + 'a>>;
}
/// Real tool runner that uses `tokio::process::Command`.
pub struct SystemToolRunner;
impl ToolRunner for SystemToolRunner {
fn run<'a>(
&'a self,
program: &'a str,
args: &'a [&'a str],
) -> Pin<Box<dyn Future<Output = Result<ToolOutput, ForgeError>> + Send + 'a>> {
Box::pin(async move {
let output = tokio::process::Command::new(program)
.args(args)
.output()
.await
.map_err(|e| ForgeError::ToolExecution {
tool: program.to_string(),
args: args.join(" "),
stderr: String::new(),
source: e,
})?;
let result = ToolOutput {
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
exit_code: output.status.code().unwrap_or(-1),
};
if !output.status.success() {
return Err(ForgeError::ToolNonZero {
tool: program.to_string(),
args: args.join(" "),
exit_code: result.exit_code,
stderr: result.stderr,
});
}
Ok(result)
})
}
}

View file

@ -0,0 +1,93 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Create a new IPS image at the given root path.
pub async fn image_create(runner: &dyn ToolRunner, root: &str) -> Result<(), ForgeError> {
info!(root, "Creating IPS image");
runner.run("pkg", &["image-create", "-F", "-p", root]).await?;
Ok(())
}
/// Set a publisher in the IPS image at the given root.
pub async fn set_publisher(
runner: &dyn ToolRunner,
root: &str,
name: &str,
origin: &str,
) -> Result<(), ForgeError> {
info!(root, name, origin, "Setting publisher");
runner
.run("pkg", &["-R", root, "set-publisher", "-O", origin, name])
.await?;
Ok(())
}
/// Install packages into the IPS image at the given root.
pub async fn install(
runner: &dyn ToolRunner,
root: &str,
packages: &[String],
) -> Result<(), ForgeError> {
if packages.is_empty() {
return Ok(());
}
info!(root, count = packages.len(), "Installing packages");
let mut args = vec!["-R", root, "install", "--accept"];
let pkg_refs: Vec<&str> = packages.iter().map(|s| s.as_str()).collect();
args.extend(pkg_refs);
runner.run("pkg", &args).await?;
Ok(())
}
/// Change an IPS variant in the image at the given root.
pub async fn change_variant(
runner: &dyn ToolRunner,
root: &str,
name: &str,
value: &str,
) -> Result<(), ForgeError> {
info!(root, name, value, "Changing variant");
let variant_arg = format!("{name}={value}");
runner
.run("pkg", &["-R", root, "change-variant", &variant_arg])
.await?;
Ok(())
}
/// Approve a CA certificate for a publisher in the IPS image.
pub async fn approve_ca_cert(
runner: &dyn ToolRunner,
root: &str,
publisher: &str,
certfile: &str,
) -> Result<(), ForgeError> {
info!(root, publisher, certfile, "Approving CA certificate");
runner
.run(
"pkg",
&[
"-R",
root,
"set-publisher",
"--approve-ca-cert",
certfile,
publisher,
],
)
.await?;
Ok(())
}
/// Set the incorporation package.
pub async fn set_incorporation(
runner: &dyn ToolRunner,
root: &str,
incorporation: &str,
) -> Result<(), ForgeError> {
info!(root, incorporation, "Setting incorporation");
runner
.run("pkg", &["-R", root, "install", "--accept", incorporation])
.await?;
Ok(())
}

View file

@ -0,0 +1,40 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Create a raw disk image of the given size.
pub async fn create_raw(
runner: &dyn ToolRunner,
path: &str,
size: &str,
) -> Result<(), ForgeError> {
info!(path, size, "Creating raw disk image");
runner
.run("qemu-img", &["create", "-f", "raw", path, size])
.await?;
Ok(())
}
/// Convert a raw image to qcow2 format.
pub async fn convert_to_qcow2(
runner: &dyn ToolRunner,
input: &str,
output: &str,
) -> Result<(), ForgeError> {
info!(input, output, "Converting raw image to qcow2");
runner
.run(
"qemu-img",
&["convert", "-f", "raw", "-O", "qcow2", input, output],
)
.await?;
Ok(())
}
/// Get info about a disk image (JSON output).
pub async fn info(runner: &dyn ToolRunner, path: &str) -> Result<String, ForgeError> {
let output = runner
.run("qemu-img", &["info", "--output=json", path])
.await?;
Ok(output.stdout)
}

View file

@ -0,0 +1,54 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Create a ZFS dataset.
pub async fn create(
runner: &dyn ToolRunner,
dataset: &str,
properties: &[(&str, &str)],
) -> Result<(), ForgeError> {
info!(dataset, "Creating ZFS dataset");
let mut args = vec!["create"];
let prop_strings: Vec<String> = properties
.iter()
.map(|(k, v)| format!("{k}={v}"))
.collect();
for prop in &prop_strings {
args.push("-o");
args.push(prop);
}
// Create parent datasets if needed
args.push("-p");
args.push(dataset);
runner.run("zfs", &args).await?;
Ok(())
}
/// Mount a ZFS dataset at a given mountpoint.
pub async fn mount(runner: &dyn ToolRunner, dataset: &str) -> Result<(), ForgeError> {
info!(dataset, "Mounting ZFS dataset");
runner.run("zfs", &["mount", dataset]).await?;
Ok(())
}
/// Set a property on a ZFS dataset.
pub async fn set(
runner: &dyn ToolRunner,
dataset: &str,
property: &str,
value: &str,
) -> Result<(), ForgeError> {
let prop_val = format!("{property}={value}");
runner.run("zfs", &["set", &prop_val, dataset]).await?;
Ok(())
}
/// Unmount a ZFS dataset.
pub async fn unmount(runner: &dyn ToolRunner, dataset: &str) -> Result<(), ForgeError> {
runner.run("zfs", &["unmount", dataset]).await?;
Ok(())
}

View file

@ -0,0 +1,56 @@
use crate::error::ForgeError;
use crate::tools::ToolRunner;
use tracing::info;
/// Create a ZFS pool on the given device.
pub async fn create(
runner: &dyn ToolRunner,
pool_name: &str,
device: &str,
properties: &[(&str, &str)],
) -> Result<(), ForgeError> {
info!(pool_name, device, "Creating ZFS pool");
let mut args = vec!["create"];
// Add -o property=value for each pool property
let prop_strings: Vec<String> = properties
.iter()
.map(|(k, v)| format!("{k}={v}"))
.collect();
for prop in &prop_strings {
args.push("-o");
args.push(prop);
}
args.push(pool_name);
args.push(device);
runner.run("zpool", &args).await?;
Ok(())
}
/// Export a ZFS pool.
pub async fn export(runner: &dyn ToolRunner, pool_name: &str) -> Result<(), ForgeError> {
info!(pool_name, "Exporting ZFS pool");
runner.run("zpool", &["export", pool_name]).await?;
Ok(())
}
/// Destroy a ZFS pool (force).
pub async fn destroy(runner: &dyn ToolRunner, pool_name: &str) -> Result<(), ForgeError> {
info!(pool_name, "Destroying ZFS pool");
runner.run("zpool", &["destroy", "-f", pool_name]).await?;
Ok(())
}
/// Set a property on a ZFS pool.
pub async fn set(
runner: &dyn ToolRunner,
pool_name: &str,
property: &str,
value: &str,
) -> Result<(), ForgeError> {
let prop_val = format!("{property}={value}");
runner.run("zpool", &["set", &prop_val, pool_name]).await?;
Ok(())
}

View file

@ -0,0 +1,21 @@
[package]
name = "forge-oci"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[dependencies]
miette = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
oci-spec = { workspace = true }
oci-client = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
bytes = { workspace = true }
tar = { workspace = true }
flate2 = { workspace = true }
walkdir = { workspace = true }

View file

@ -0,0 +1,113 @@
use std::path::Path;
use miette::Diagnostic;
use sha2::{Digest, Sha256};
use thiserror::Error;
use crate::tar_layer::LayerBlob;
#[derive(Debug, Error, Diagnostic)]
pub enum LayoutError {
#[error("Failed to create OCI layout directory: {path}")]
#[diagnostic(help("Ensure the parent directory exists and is writable"))]
CreateDir {
path: String,
#[source]
source: std::io::Error,
},
#[error("Failed to write OCI layout file: {path}")]
#[diagnostic(help("Check disk space and permissions"))]
WriteFile {
path: String,
#[source]
source: std::io::Error,
},
#[error("Failed to build OCI manifest")]
ManifestError(#[from] crate::manifest::ManifestError),
#[error("Failed to serialize OCI layout JSON")]
Serialize(#[from] serde_json::Error),
}
/// Write an OCI Image Layout directory at `output_dir`.
///
/// Structure:
/// ```text
/// output_dir/
/// oci-layout
/// index.json
/// blobs/
/// sha256/
/// <config-digest>
/// <layer-digest>...
/// <manifest-digest>
/// ```
pub fn write_oci_layout(
output_dir: &Path,
layers: &[LayerBlob],
config_json: &[u8],
manifest_json: &[u8],
) -> Result<(), LayoutError> {
let blobs_dir = output_dir.join("blobs").join("sha256");
std::fs::create_dir_all(&blobs_dir).map_err(|e| LayoutError::CreateDir {
path: blobs_dir.display().to_string(),
source: e,
})?;
// Write oci-layout
let oci_layout = serde_json::json!({
"imageLayoutVersion": "1.0.0"
});
write_file(
&output_dir.join("oci-layout"),
serde_json::to_vec_pretty(&oci_layout)?.as_slice(),
)?;
// Write layer blobs
for layer in layers {
let digest_hex = layer
.digest
.strip_prefix("sha256:")
.unwrap_or(&layer.digest);
write_file(&blobs_dir.join(digest_hex), &layer.data)?;
}
// Write config blob
let mut config_hasher = Sha256::new();
config_hasher.update(config_json);
let config_digest_hex = hex::encode(config_hasher.finalize());
write_file(&blobs_dir.join(&config_digest_hex), config_json)?;
// Write manifest blob
let mut manifest_hasher = Sha256::new();
manifest_hasher.update(manifest_json);
let manifest_digest_hex = hex::encode(manifest_hasher.finalize());
write_file(&blobs_dir.join(&manifest_digest_hex), manifest_json)?;
// Write index.json
let index = serde_json::json!({
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": format!("sha256:{manifest_digest_hex}"),
"size": manifest_json.len()
}
]
});
write_file(
&output_dir.join("index.json"),
serde_json::to_vec_pretty(&index)?.as_slice(),
)?;
Ok(())
}
fn write_file(path: &Path, data: &[u8]) -> Result<(), LayoutError> {
std::fs::write(path, data).map_err(|e| LayoutError::WriteFile {
path: path.display().to_string(),
source: e,
})
}

View file

@ -0,0 +1,4 @@
pub mod layout;
pub mod manifest;
pub mod registry;
pub mod tar_layer;

View file

@ -0,0 +1,136 @@
use miette::Diagnostic;
use oci_spec::image::{
ConfigBuilder, DescriptorBuilder, ImageConfigurationBuilder, ImageManifestBuilder,
MediaType, RootFsBuilder, Sha256Digest,
};
use sha2::{Digest, Sha256};
use thiserror::Error;
use crate::tar_layer::LayerBlob;
#[derive(Debug, Error, Diagnostic)]
pub enum ManifestError {
#[error("Failed to build OCI image configuration")]
#[diagnostic(help("This is likely a bug in the manifest builder"))]
ConfigBuild(String),
#[error("Failed to build OCI image manifest")]
#[diagnostic(help("This is likely a bug in the manifest builder"))]
ManifestBuild(String),
#[error("Failed to serialize OCI manifest to JSON")]
Serialize(#[source] serde_json::Error),
}
/// Options for building an OCI image configuration.
pub struct ImageOptions {
pub os: String,
pub architecture: String,
pub entrypoint: Option<Vec<String>>,
pub env: Vec<String>,
}
impl Default for ImageOptions {
fn default() -> Self {
Self {
os: "solaris".to_string(),
architecture: "amd64".to_string(),
entrypoint: None,
env: Vec::new(),
}
}
}
/// Build the OCI image configuration JSON and image manifest from a set of layers.
///
/// Returns `(config_json, manifest_json)`.
pub fn build_manifest(
layers: &[LayerBlob],
options: &ImageOptions,
) -> Result<(Vec<u8>, Vec<u8>), ManifestError> {
// Build the diff_ids for the rootfs (uncompressed layer digests aren't tracked here,
// so we use the compressed digest -- in a full implementation you'd track both)
let diff_ids: Vec<String> = layers.iter().map(|l| l.digest.clone()).collect();
let rootfs = RootFsBuilder::default()
.typ("layers")
.diff_ids(diff_ids)
.build()
.map_err(|e| ManifestError::ConfigBuild(e.to_string()))?;
let mut config_builder = ImageConfigurationBuilder::default()
.os(options.os.as_str())
.architecture(options.architecture.as_str())
.rootfs(rootfs);
// Build a config block with optional entrypoint/env
let mut inner_config_builder = ConfigBuilder::default();
if let Some(ref ep) = options.entrypoint {
inner_config_builder = inner_config_builder.entrypoint(ep.clone());
}
if !options.env.is_empty() {
inner_config_builder = inner_config_builder.env(options.env.clone());
}
let inner_config = inner_config_builder
.build()
.map_err(|e| ManifestError::ConfigBuild(e.to_string()))?;
config_builder = config_builder.config(inner_config);
let image_config = config_builder
.build()
.map_err(|e| ManifestError::ConfigBuild(e.to_string()))?;
let config_json =
serde_json::to_vec_pretty(&image_config).map_err(ManifestError::Serialize)?;
let mut config_hasher = Sha256::new();
config_hasher.update(&config_json);
let config_digest = format!("sha256:{}", hex::encode(config_hasher.finalize()));
let config_sha_digest: Sha256Digest = config_digest
.strip_prefix("sha256:")
.unwrap_or(&config_digest)
.parse()
.map_err(|e: oci_spec::OciSpecError| ManifestError::ConfigBuild(e.to_string()))?;
let config_descriptor = DescriptorBuilder::default()
.media_type(MediaType::ImageConfig)
.size(config_json.len() as u64)
.digest(config_sha_digest)
.build()
.map_err(|e| ManifestError::ConfigBuild(e.to_string()))?;
// Build layer descriptors
let layer_descriptors: Vec<_> = layers
.iter()
.map(|layer| {
let layer_sha: Sha256Digest = layer
.digest
.strip_prefix("sha256:")
.unwrap_or(&layer.digest)
.parse()
.map_err(|e: oci_spec::OciSpecError| {
ManifestError::ManifestBuild(e.to_string())
})?;
DescriptorBuilder::default()
.media_type(MediaType::ImageLayerGzip)
.size(layer.data.len() as u64)
.digest(layer_sha)
.build()
.map_err(|e| ManifestError::ManifestBuild(e.to_string()))
})
.collect::<Result<_, _>>()?;
let manifest = ImageManifestBuilder::default()
.schema_version(2u32)
.media_type(MediaType::ImageManifest)
.config(config_descriptor)
.layers(layer_descriptors)
.build()
.map_err(|e| ManifestError::ManifestBuild(e.to_string()))?;
let manifest_json = serde_json::to_vec_pretty(&manifest).map_err(ManifestError::Serialize)?;
Ok((config_json, manifest_json))
}

View file

@ -0,0 +1,121 @@
use miette::Diagnostic;
use oci_client::client::{ClientConfig, ClientProtocol, Config, ImageLayer};
use oci_client::manifest;
use oci_client::secrets::RegistryAuth;
use oci_client::{Client, Reference};
use thiserror::Error;
use tracing::info;
use crate::tar_layer::LayerBlob;
#[derive(Debug, Error, Diagnostic)]
pub enum RegistryError {
#[error("Invalid image reference: {reference}")]
#[diagnostic(help(
"Use the format <registry>/<repository>:<tag>, e.g. ghcr.io/org/image:v1"
))]
InvalidReference {
reference: String,
#[source]
source: oci_client::ParseError,
},
#[error("Failed to push image to registry")]
#[diagnostic(help("Check registry URL, credentials, and network connectivity"))]
PushFailed(String),
#[error("Failed to read auth file: {path}")]
#[diagnostic(help("Ensure the auth file exists and contains valid JSON"))]
AuthFileRead {
path: String,
#[source]
source: std::io::Error,
},
}
/// Authentication configuration for registry operations.
pub enum AuthConfig {
Anonymous,
Basic { username: String, password: String },
Bearer { token: String },
}
impl AuthConfig {
fn to_registry_auth(&self) -> RegistryAuth {
match self {
AuthConfig::Anonymous => RegistryAuth::Anonymous,
AuthConfig::Basic { username, password } => {
RegistryAuth::Basic(username.clone(), password.clone())
}
AuthConfig::Bearer { token } => RegistryAuth::Bearer(token.clone()),
}
}
}
/// Push an OCI image (layers + config + manifest) to a registry.
pub async fn push_image(
reference_str: &str,
layers: Vec<LayerBlob>,
config_json: Vec<u8>,
auth: &AuthConfig,
insecure_registries: &[String],
) -> Result<String, RegistryError> {
let reference: Reference = reference_str
.parse()
.map_err(|e| RegistryError::InvalidReference {
reference: reference_str.to_string(),
source: e,
})?;
let client_config = ClientConfig {
protocol: if insecure_registries.is_empty() {
ClientProtocol::Https
} else {
ClientProtocol::HttpsExcept(insecure_registries.to_vec())
},
..Default::default()
};
let client = Client::new(client_config);
let registry_auth = auth.to_registry_auth();
let oci_layers: Vec<ImageLayer> = layers
.into_iter()
.map(|layer| {
ImageLayer::new(
layer.data,
manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE.to_string(),
None,
)
})
.collect();
let config = Config::new(
config_json,
manifest::IMAGE_CONFIG_MEDIA_TYPE.to_string(),
None,
);
let image_manifest =
oci_client::manifest::OciImageManifest::build(&oci_layers, &config, None);
info!(reference = %reference, "Pushing image to registry");
let response = client
.push(
&reference,
&oci_layers,
config,
&registry_auth,
Some(image_manifest),
)
.await
.map_err(|e| RegistryError::PushFailed(e.to_string()))?;
info!(
manifest_url = %response.manifest_url,
"Image pushed successfully"
);
Ok(response.manifest_url)
}

View file

@ -0,0 +1,126 @@
use std::path::Path;
use flate2::write::GzEncoder;
use flate2::Compression;
use miette::Diagnostic;
use sha2::{Digest, Sha256};
use thiserror::Error;
use walkdir::WalkDir;
#[derive(Debug, Error, Diagnostic)]
pub enum TarLayerError {
#[error("Failed to walk staging directory: {path}")]
#[diagnostic(help("Ensure the staging directory exists and is readable"))]
WalkDir {
path: String,
#[source]
source: walkdir::Error,
},
#[error("Failed to create tar archive")]
#[diagnostic(help("Check disk space and permissions"))]
TarCreate(#[source] std::io::Error),
#[error("Failed to read file for tar: {path}")]
ReadFile {
path: String,
#[source]
source: std::io::Error,
},
}
/// Result of creating a tar.gz layer from a staging directory.
#[derive(Clone)]
pub struct LayerBlob {
/// Compressed tar.gz data
pub data: Vec<u8>,
/// SHA-256 digest of the compressed data (hex-encoded)
pub digest: String,
/// Uncompressed size in bytes
pub uncompressed_size: u64,
}
/// Create a tar.gz layer from a staging directory. All paths in the tar are
/// relative to the staging root.
pub fn create_layer(staging_dir: &Path) -> Result<LayerBlob, TarLayerError> {
let mut uncompressed_size: u64 = 0;
let buf = Vec::new();
let encoder = GzEncoder::new(buf, Compression::default());
let mut tar = tar::Builder::new(encoder);
for entry in WalkDir::new(staging_dir).follow_links(false) {
let entry = entry.map_err(|e| TarLayerError::WalkDir {
path: staging_dir.display().to_string(),
source: e,
})?;
let full_path = entry.path();
let rel_path = full_path
.strip_prefix(staging_dir)
.unwrap_or(full_path);
// Skip the root directory itself
if rel_path.as_os_str().is_empty() {
continue;
}
let metadata = entry.metadata().map_err(|e| TarLayerError::WalkDir {
path: full_path.display().to_string(),
source: e,
})?;
if metadata.is_file() {
uncompressed_size += metadata.len();
let mut header = tar::Header::new_gnu();
header.set_size(metadata.len());
header.set_mode(0o644);
header.set_cksum();
let file_data =
std::fs::read(full_path).map_err(|e| TarLayerError::ReadFile {
path: full_path.display().to_string(),
source: e,
})?;
tar.append_data(&mut header, rel_path, file_data.as_slice())
.map_err(TarLayerError::TarCreate)?;
} else if metadata.is_dir() {
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Directory);
header.set_size(0);
header.set_mode(0o755);
header.set_cksum();
tar.append_data(&mut header, rel_path, std::io::empty())
.map_err(TarLayerError::TarCreate)?;
} else if metadata.is_symlink() {
let link_target = std::fs::read_link(full_path).map_err(|e| {
TarLayerError::ReadFile {
path: full_path.display().to_string(),
source: e,
}
})?;
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Symlink);
header.set_size(0);
header.set_cksum();
tar.append_link(&mut header, rel_path, &link_target)
.map_err(TarLayerError::TarCreate)?;
}
}
let encoder = tar.into_inner().map_err(TarLayerError::TarCreate)?;
let compressed = encoder.finish().map_err(TarLayerError::TarCreate)?;
let mut hasher = Sha256::new();
hasher.update(&compressed);
let digest = format!("sha256:{}", hex::encode(hasher.finalize()));
Ok(LayerBlob {
data: compressed,
digest,
uncompressed_size,
})
}

19
crates/forger/Cargo.toml Normal file
View file

@ -0,0 +1,19 @@
[package]
name = "forger"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[dependencies]
spec-parser = { workspace = true }
forge-oci = { workspace = true }
forge-engine = { workspace = true }
clap = { workspace = true }
miette = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
indicatif = { workspace = true }

View file

@ -0,0 +1,58 @@
use std::path::PathBuf;
use forge_engine::tools::SystemToolRunner;
use forge_engine::BuildContext;
use miette::{Context, IntoDiagnostic};
use tracing::info;
/// Build an image from a spec file.
pub async fn run(
spec_path: &PathBuf,
target: Option<&str>,
profiles: &[String],
output_dir: &PathBuf,
) -> miette::Result<()> {
let kdl_content = std::fs::read_to_string(spec_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read spec file: {}", spec_path.display()))?;
let spec = spec_parser::parse(&kdl_content)
.map_err(miette::Report::new)
.wrap_err("Failed to parse spec")?;
let spec_dir = spec_path
.parent()
.unwrap_or_else(|| std::path::Path::new("."));
let resolved = spec_parser::resolve::resolve(spec, spec_dir)
.map_err(miette::Report::new)
.wrap_err("Failed to resolve includes")?;
let filtered = spec_parser::profile::apply_profiles(resolved, profiles);
// Determine files directory (images/files/ relative to spec)
let files_dir = spec_dir.join("files");
let runner = SystemToolRunner;
let ctx = BuildContext {
spec: &filtered,
files_dir: &files_dir,
output_dir,
runner: &runner,
};
info!(
spec = %spec_path.display(),
output = %output_dir.display(),
"Starting build"
);
ctx.build(target)
.await
.map_err(miette::Report::new)
.wrap_err("Build failed")?;
println!("Build complete. Output: {}", output_dir.display());
Ok(())
}

View file

@ -0,0 +1,132 @@
use std::path::PathBuf;
use miette::{Context, IntoDiagnostic};
/// Inspect a spec file: parse, resolve includes, apply profiles, and print the result.
pub fn run(spec_path: &PathBuf, profiles: &[String]) -> miette::Result<()> {
let kdl_content = std::fs::read_to_string(spec_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read spec file: {}", spec_path.display()))?;
let spec = spec_parser::parse(&kdl_content)
.map_err(miette::Report::new)
.wrap_err("Failed to parse spec")?;
let spec_dir = spec_path
.parent()
.unwrap_or_else(|| std::path::Path::new("."));
let resolved = spec_parser::resolve::resolve(spec, spec_dir)
.map_err(miette::Report::new)
.wrap_err("Failed to resolve includes")?;
let filtered = spec_parser::profile::apply_profiles(resolved, profiles);
println!("Image: {} v{}", filtered.metadata.name, filtered.metadata.version);
if let Some(ref desc) = filtered.metadata.description {
println!("Description: {desc}");
}
println!("\nRepositories:");
for pub_entry in &filtered.repositories.publishers {
println!(" {} -> {}", pub_entry.name, pub_entry.origin);
}
if let Some(ref inc) = filtered.incorporation {
println!("\nIncorporation: {inc}");
}
if let Some(ref variants) = filtered.variants {
println!("\nVariants:");
for var in &variants.vars {
println!(" {} = {}", var.name, var.value);
}
}
if let Some(ref certs) = filtered.certificates {
println!("\nCertificates:");
for ca in &certs.ca {
println!(" CA: publisher={} certfile={}", ca.publisher, ca.certfile);
}
}
let total_packages: usize = filtered.packages.iter().map(|pl| pl.packages.len()).sum();
println!("\nPackages ({total_packages} total):");
for pl in &filtered.packages {
if let Some(ref cond) = pl.r#if {
println!(" [if={cond}]");
}
for pkg in &pl.packages {
println!(" {}", pkg.name);
}
}
if !filtered.customizations.is_empty() {
println!("\nCustomizations:");
for c in &filtered.customizations {
if let Some(ref cond) = c.r#if {
println!(" [if={cond}]");
}
for user in &c.users {
println!(" user: {}", user.name);
}
}
}
let total_overlays: usize = filtered.overlays.iter().map(|o| o.actions.len()).sum();
println!("\nOverlays ({total_overlays} actions):");
for overlay_block in &filtered.overlays {
if let Some(ref cond) = overlay_block.r#if {
println!(" [if={cond}]");
}
for action in &overlay_block.actions {
match action {
spec_parser::schema::OverlayAction::File(f) => {
println!(
" file: {} <- {}",
f.destination,
f.source.as_deref().unwrap_or("(empty)")
);
}
spec_parser::schema::OverlayAction::Devfsadm(_) => {
println!(" devfsadm");
}
spec_parser::schema::OverlayAction::EnsureDir(d) => {
println!(" ensure-dir: {}", d.path);
}
spec_parser::schema::OverlayAction::RemoveFiles(r) => {
let target = r
.file
.as_deref()
.or(r.dir.as_deref())
.or(r.pattern.as_deref())
.unwrap_or("?");
println!(" remove: {target}");
}
spec_parser::schema::OverlayAction::EnsureSymlink(s) => {
println!(" symlink: {} -> {}", s.path, s.target);
}
spec_parser::schema::OverlayAction::Shadow(s) => {
println!(" shadow: {} (hash set)", s.username);
}
}
}
}
if !filtered.targets.is_empty() {
println!("\nTargets:");
for target in &filtered.targets {
print!(" {} ({})", target.name, target.kind);
if let Some(ref size) = target.disk_size {
print!(" disk_size={size}");
}
if let Some(ref bl) = target.bootloader {
print!(" bootloader={bl}");
}
println!();
}
}
Ok(())
}

View file

@ -0,0 +1,5 @@
pub mod build;
pub mod inspect;
pub mod push;
pub mod targets;
pub mod validate;

View file

@ -0,0 +1,126 @@
use std::path::PathBuf;
use miette::{Context, IntoDiagnostic};
use tracing::info;
/// Push an OCI Image Layout to a registry.
pub async fn run(
image_dir: &PathBuf,
reference: &str,
auth_file: Option<&PathBuf>,
) -> miette::Result<()> {
// Read the OCI Image Layout index.json
let index_path = image_dir.join("index.json");
let index_content = std::fs::read_to_string(&index_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read OCI index: {}", index_path.display()))?;
let index: serde_json::Value =
serde_json::from_str(&index_content).into_diagnostic()?;
let manifests = index["manifests"]
.as_array()
.ok_or_else(|| miette::miette!("Invalid OCI index: missing manifests array"))?;
if manifests.is_empty() {
return Err(miette::miette!("OCI index contains no manifests"));
}
// Read the manifest
let manifest_digest = manifests[0]["digest"]
.as_str()
.ok_or_else(|| miette::miette!("Invalid manifest entry: missing digest"))?;
let digest_hex = manifest_digest
.strip_prefix("sha256:")
.ok_or_else(|| miette::miette!("Unsupported digest algorithm: {manifest_digest}"))?;
let manifest_path = image_dir.join("blobs/sha256").join(digest_hex);
let manifest_json: serde_json::Value = serde_json::from_str(
&std::fs::read_to_string(&manifest_path)
.into_diagnostic()
.wrap_err("Failed to read manifest blob")?,
)
.into_diagnostic()?;
// Read config blob
let config_digest = manifest_json["config"]["digest"]
.as_str()
.ok_or_else(|| miette::miette!("Missing config digest in manifest"))?;
let config_hex = config_digest.strip_prefix("sha256:").unwrap_or(config_digest);
let config_json = std::fs::read(image_dir.join("blobs/sha256").join(config_hex))
.into_diagnostic()
.wrap_err("Failed to read config blob")?;
// Read layer blobs
let layers_json = manifest_json["layers"]
.as_array()
.ok_or_else(|| miette::miette!("Missing layers in manifest"))?;
let mut layers = Vec::new();
for layer_desc in layers_json {
let layer_digest = layer_desc["digest"]
.as_str()
.ok_or_else(|| miette::miette!("Missing layer digest"))?;
let layer_hex = layer_digest
.strip_prefix("sha256:")
.unwrap_or(layer_digest);
let layer_data = std::fs::read(image_dir.join("blobs/sha256").join(layer_hex))
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read layer blob: {layer_digest}"))?;
layers.push(forge_oci::tar_layer::LayerBlob {
data: layer_data,
digest: layer_digest.to_string(),
uncompressed_size: 0, // Not tracked in layout
});
}
// Determine auth
let auth = if let Some(auth_path) = auth_file {
let auth_content = std::fs::read_to_string(auth_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read auth file: {}", auth_path.display()))?;
let auth_json: serde_json::Value =
serde_json::from_str(&auth_content).into_diagnostic()?;
if let Some(token) = auth_json["token"].as_str() {
forge_oci::registry::AuthConfig::Bearer {
token: token.to_string(),
}
} else if let (Some(user), Some(pass)) = (
auth_json["username"].as_str(),
auth_json["password"].as_str(),
) {
forge_oci::registry::AuthConfig::Basic {
username: user.to_string(),
password: pass.to_string(),
}
} else {
forge_oci::registry::AuthConfig::Anonymous
}
} else {
forge_oci::registry::AuthConfig::Anonymous
};
// Determine if we need insecure registries (localhost)
let insecure = if reference.starts_with("localhost") || reference.starts_with("127.0.0.1") {
let host_port = reference.split('/').next().unwrap_or("");
vec![host_port.to_string()]
} else {
vec![]
};
info!(reference, "Pushing OCI image to registry");
let manifest_url =
forge_oci::registry::push_image(reference, layers, config_json, &auth, &insecure)
.await
.map_err(miette::Report::new)
.wrap_err("Push failed")?;
println!("Pushed: {manifest_url}");
Ok(())
}

View file

@ -0,0 +1,36 @@
use std::path::PathBuf;
use miette::{Context, IntoDiagnostic};
/// List available targets from a spec file.
pub fn run(spec_path: &PathBuf) -> miette::Result<()> {
let kdl_content = std::fs::read_to_string(spec_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read spec file: {}", spec_path.display()))?;
let spec = spec_parser::parse(&kdl_content)
.map_err(miette::Report::new)
.wrap_err("Failed to parse spec")?;
let spec_dir = spec_path
.parent()
.unwrap_or_else(|| std::path::Path::new("."));
let resolved = spec_parser::resolve::resolve(spec, spec_dir)
.map_err(miette::Report::new)
.wrap_err("Failed to resolve includes")?;
let targets = forge_engine::list_targets(&resolved);
if targets.is_empty() {
println!("No targets defined in spec.");
return Ok(());
}
println!("Available targets:");
for (name, kind) in targets {
println!(" {name} ({kind})");
}
Ok(())
}

View file

@ -0,0 +1,25 @@
use std::path::PathBuf;
use miette::{Context, IntoDiagnostic};
/// Validate a spec file by parsing it and resolving all includes.
pub fn run(spec_path: &PathBuf) -> miette::Result<()> {
let kdl_content = std::fs::read_to_string(spec_path)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to read spec file: {}", spec_path.display()))?;
let spec = spec_parser::parse(&kdl_content)
.map_err(miette::Report::new)
.wrap_err("Failed to parse spec")?;
let spec_dir = spec_path
.parent()
.unwrap_or_else(|| std::path::Path::new("."));
let _resolved = spec_parser::resolve::resolve(spec, spec_dir)
.map_err(miette::Report::new)
.wrap_err("Failed to resolve includes")?;
println!("Spec is valid: {}", spec_path.display());
Ok(())
}

120
crates/forger/src/main.rs Normal file
View file

@ -0,0 +1,120 @@
mod commands;
use std::path::PathBuf;
use clap::{Parser, Subcommand};
use miette::Result;
use tracing_subscriber::EnvFilter;
#[derive(Parser, Debug)]
#[command(
name = "forger",
version,
about = "Build optimized OS images and publish to OCI registries"
)]
struct Args {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// Build an image from a spec file
Build {
/// Path to the spec file
#[arg(short, long)]
spec: PathBuf,
/// Target name to build (builds all if omitted)
#[arg(short, long)]
target: Option<String>,
/// Active profiles for conditional blocks
#[arg(short, long)]
profile: Vec<String>,
/// Output directory for build artifacts
#[arg(short, long, default_value = "./output")]
output_dir: PathBuf,
},
/// Validate a spec file (parse + resolve includes)
Validate {
/// Path to the spec file
#[arg(short, long)]
spec: PathBuf,
},
/// Inspect a resolved spec (parse + resolve + apply profiles)
Inspect {
/// Path to the spec file
#[arg(short, long)]
spec: PathBuf,
/// Active profiles for conditional blocks
#[arg(short, long)]
profile: Vec<String>,
},
/// Push an OCI Image Layout to a registry
Push {
/// Path to the OCI Image Layout directory
#[arg(short, long)]
image: PathBuf,
/// Registry reference (e.g., ghcr.io/org/image:tag)
#[arg(short, long)]
reference: String,
/// Path to auth file (JSON with username/password or token)
#[arg(short, long)]
auth_file: Option<PathBuf>,
},
/// List available targets from a spec file
Targets {
/// Path to the spec file
#[arg(short, long)]
spec: PathBuf,
},
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
)
.init();
let args = Args::parse();
match args.command {
Commands::Build {
spec,
target,
profile,
output_dir,
} => {
commands::build::run(&spec, target.as_deref(), &profile, &output_dir).await?;
}
Commands::Validate { spec } => {
commands::validate::run(&spec)?;
}
Commands::Inspect { spec, profile } => {
commands::inspect::run(&spec, &profile)?;
}
Commands::Push {
image,
reference,
auth_file,
} => {
commands::push::run(&image, &reference, auth_file.as_ref()).await?;
}
Commands::Targets { spec } => {
commands::targets::run(&spec)?;
}
}
Ok(())
}

View file

@ -0,0 +1,14 @@
[package]
name = "spec-parser"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
[dependencies]
knuffel = { workspace = true }
miette = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }

View file

@ -0,0 +1,152 @@
pub mod profile;
pub mod resolve;
pub mod schema;
use miette::Diagnostic;
use thiserror::Error;
#[derive(Debug, Error, Diagnostic)]
pub enum ParseError {
#[error("Failed to parse KDL spec")]
#[diagnostic(
help("Check the KDL syntax in your spec file"),
code(spec_parser::kdl_parse)
)]
KdlError {
detail: String,
},
}
impl From<knuffel::Error> for ParseError {
fn from(err: knuffel::Error) -> Self {
ParseError::KdlError {
detail: err.to_string(),
}
}
}
pub fn parse(kdl: &str) -> Result<schema::ImageSpec, ParseError> {
knuffel::parse("image.kdl", kdl).map_err(ParseError::from)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_example() {
let kdl = r#"
metadata name="my-image" version="1.0.0" description="A test image"
base "path/to/base.tar.gz"
build-host "path/to/build-vm.qcow2"
repositories {
publisher name="test-pub" origin="http://pkg.test.com"
}
incorporation "pkg:/test/incorporation"
packages {
package "system/kernel"
}
packages if="desktop" {
package "desktop/gnome"
}
customization {
user "admin"
}
overlays {
file source="local/file" destination="/remote/file"
}
target "vm" kind="qcow2" {
disk-size "20G"
bootloader "grub"
}
target "container" kind="oci" {
entrypoint command="/bin/sh"
environment {
set "PATH" "/bin:/usr/bin"
}
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
assert_eq!(spec.metadata.name, "my-image");
assert_eq!(spec.base, Some("path/to/base.tar.gz".to_string()));
assert_eq!(
spec.build_host,
Some("path/to/build-vm.qcow2".to_string())
);
assert_eq!(spec.repositories.publishers.len(), 1);
assert_eq!(spec.packages.len(), 2);
assert_eq!(spec.targets.len(), 2);
let vm_target = &spec.targets[0];
assert_eq!(vm_target.name, "vm");
assert_eq!(vm_target.kind, schema::TargetKind::Qcow2);
assert_eq!(vm_target.disk_size, Some("20G".to_string()));
assert_eq!(vm_target.bootloader, Some("grub".to_string()));
let container_target = &spec.targets[1];
assert_eq!(container_target.name, "container");
assert_eq!(container_target.kind, schema::TargetKind::Oci);
assert_eq!(
container_target.entrypoint.as_ref().unwrap().command,
"/bin/sh"
);
}
#[test]
fn test_parse_variants_and_certificates() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {
publisher name="omnios" origin="https://pkg.omnios.org/bloody/core/"
}
variants {
set name="opensolaris.zone" value="global"
}
certificates {
ca publisher="omnios" certfile="omniosce-ca.cert.pem"
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
let variants = spec.variants.unwrap();
assert_eq!(variants.vars.len(), 1);
assert_eq!(variants.vars[0].name, "opensolaris.zone");
assert_eq!(variants.vars[0].value, "global");
let certs = spec.certificates.unwrap();
assert_eq!(certs.ca.len(), 1);
assert_eq!(certs.ca[0].publisher, "omnios");
}
#[test]
fn test_parse_pool_properties() {
let kdl = r#"
metadata name="test" version="0.1.0"
repositories {}
target "disk" kind="qcow2" {
disk-size "2000M"
bootloader "uefi"
pool {
property name="ashift" value="12"
}
}
"#;
let spec = parse(kdl).expect("Failed to parse KDL");
let target = &spec.targets[0];
let pool = target.pool.as_ref().unwrap();
assert_eq!(pool.properties.len(), 1);
assert_eq!(pool.properties[0].name, "ashift");
assert_eq!(pool.properties[0].value, "12");
}
}

View file

@ -0,0 +1,107 @@
use crate::schema::ImageSpec;
/// Filter an `ImageSpec` so that only blocks matching the active profiles are
/// retained. Blocks with no `if` condition are always included. Blocks whose
/// `if` value matches one of the active profile names are included. All others
/// are removed.
pub fn apply_profiles(mut spec: ImageSpec, active_profiles: &[String]) -> ImageSpec {
spec.packages
.retain(|p| profile_matches(p.r#if.as_deref(), active_profiles));
spec.customizations
.retain(|c| profile_matches(c.r#if.as_deref(), active_profiles));
spec.overlays
.retain(|o| profile_matches(o.r#if.as_deref(), active_profiles));
spec
}
fn profile_matches(condition: Option<&str>, active_profiles: &[String]) -> bool {
match condition {
None => true,
Some(name) => active_profiles.iter().any(|p| p == name),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::parse;
#[test]
fn test_unconditional_blocks_always_included() {
let kdl = r#"
metadata name="test" version="1.0.0"
repositories {
publisher name="main" origin="http://example.com"
}
packages {
package "always/included"
}
packages if="desktop" {
package "desktop/only"
}
"#;
let spec = parse(kdl).unwrap();
let filtered = apply_profiles(spec, &[]);
assert_eq!(filtered.packages.len(), 1);
assert_eq!(filtered.packages[0].packages[0].name, "always/included");
}
#[test]
fn test_matching_profile_included() {
let kdl = r#"
metadata name="test" version="1.0.0"
repositories {
publisher name="main" origin="http://example.com"
}
packages {
package "always/included"
}
packages if="desktop" {
package "desktop/only"
}
packages if="server" {
package "server/only"
}
"#;
let spec = parse(kdl).unwrap();
let filtered = apply_profiles(spec, &["desktop".to_string()]);
assert_eq!(filtered.packages.len(), 2);
assert_eq!(filtered.packages[0].packages[0].name, "always/included");
assert_eq!(filtered.packages[1].packages[0].name, "desktop/only");
}
#[test]
fn test_overlays_and_customizations_filtered() {
let kdl = r#"
metadata name="test" version="1.0.0"
repositories {
publisher name="main" origin="http://example.com"
}
overlays {
ensure-dir "/always" owner="root" group="root" mode="755"
}
overlays if="dev" {
ensure-dir "/dev-only" owner="root" group="root" mode="755"
}
customization {
user "admin"
}
customization if="dev" {
user "developer"
}
"#;
let spec = parse(kdl).unwrap();
let filtered = apply_profiles(spec, &[]);
assert_eq!(filtered.overlays.len(), 1);
assert_eq!(filtered.customizations.len(), 1);
}
}

View file

@ -0,0 +1,322 @@
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use miette::Diagnostic;
use thiserror::Error;
use crate::schema::ImageSpec;
#[derive(Debug, Error, Diagnostic)]
pub enum ResolveError {
#[error("Failed to read spec file: {path}")]
#[diagnostic(help("Ensure the file exists and is readable"))]
ReadFile {
path: String,
#[source]
source: std::io::Error,
},
#[error("Failed to parse included spec: {path}")]
#[diagnostic(help("Check the KDL syntax in the included file"))]
ParseInclude {
path: String,
#[source]
source: crate::ParseError,
},
#[error("Circular include detected: {path}")]
#[diagnostic(
help("The include chain forms a cycle. Remove the circular reference."),
code(spec_parser::circular_include)
)]
CircularInclude { path: String },
#[error("Failed to resolve base spec: {path}")]
#[diagnostic(help("Ensure the base spec path is correct and the file exists"))]
ResolveBase {
path: String,
#[source]
source: Box<ResolveError>,
},
}
/// Resolve all includes and base references in an `ImageSpec`, producing a
/// fully merged spec. The `spec_dir` is the directory containing the root spec
/// file, used to resolve relative paths.
pub fn resolve(spec: ImageSpec, spec_dir: &Path) -> Result<ImageSpec, ResolveError> {
let mut visited = HashSet::new();
resolve_inner(spec, spec_dir, &mut visited)
}
fn resolve_inner(
mut spec: ImageSpec,
spec_dir: &Path,
visited: &mut HashSet<PathBuf>,
) -> Result<ImageSpec, ResolveError> {
// Resolve base spec first (base is the "parent" we inherit from)
if let Some(base_path) = spec.base.take() {
let base_abs = resolve_path(spec_dir, &base_path);
let canonical = base_abs
.canonicalize()
.map_err(|e| ResolveError::ReadFile {
path: base_abs.display().to_string(),
source: e,
})?;
if !visited.insert(canonical.clone()) {
return Err(ResolveError::CircularInclude {
path: canonical.display().to_string(),
});
}
let base_content =
std::fs::read_to_string(&canonical).map_err(|e| ResolveError::ReadFile {
path: canonical.display().to_string(),
source: e,
})?;
let base_spec = crate::parse(&base_content).map_err(|e| ResolveError::ParseInclude {
path: canonical.display().to_string(),
source: e,
})?;
let base_dir = canonical.parent().unwrap_or(spec_dir);
let resolved_base = resolve_inner(base_spec, base_dir, visited).map_err(|e| {
ResolveError::ResolveBase {
path: canonical.display().to_string(),
source: Box::new(e),
}
})?;
spec = merge_base(resolved_base, spec);
}
// Resolve includes (siblings that contribute packages/overlays/customizations)
let includes = std::mem::take(&mut spec.includes);
for include in includes {
let inc_abs = resolve_path(spec_dir, &include.path);
let canonical = inc_abs
.canonicalize()
.map_err(|e| ResolveError::ReadFile {
path: inc_abs.display().to_string(),
source: e,
})?;
if !visited.insert(canonical.clone()) {
return Err(ResolveError::CircularInclude {
path: canonical.display().to_string(),
});
}
let inc_content =
std::fs::read_to_string(&canonical).map_err(|e| ResolveError::ReadFile {
path: canonical.display().to_string(),
source: e,
})?;
let inc_spec = crate::parse(&inc_content).map_err(|e| ResolveError::ParseInclude {
path: canonical.display().to_string(),
source: e,
})?;
let inc_dir = canonical.parent().unwrap_or(spec_dir);
let resolved_inc = resolve_inner(inc_spec, inc_dir, visited)?;
merge_include(&mut spec, resolved_inc);
}
Ok(spec)
}
/// Merge a base (parent) spec with the child. The child's values take
/// precedence; the base provides defaults.
fn merge_base(mut base: ImageSpec, child: ImageSpec) -> ImageSpec {
// Metadata comes from the child
base.metadata = child.metadata;
// build_host: child overrides
if child.build_host.is_some() {
base.build_host = child.build_host;
}
// repositories: merge publishers from child into base (child publishers appended)
for pub_entry in child.repositories.publishers {
if !base
.repositories
.publishers
.iter()
.any(|p| p.name == pub_entry.name)
{
base.repositories.publishers.push(pub_entry);
}
}
// incorporation: child overrides
if child.incorporation.is_some() {
base.incorporation = child.incorporation;
}
// variants: merge
if let Some(child_variants) = child.variants {
if let Some(ref mut base_variants) = base.variants {
for var in child_variants.vars {
if let Some(existing) = base_variants.vars.iter_mut().find(|v| v.name == var.name) {
existing.value = var.value;
} else {
base_variants.vars.push(var);
}
}
} else {
base.variants = Some(child_variants);
}
}
// certificates: merge
if let Some(child_certs) = child.certificates {
if let Some(ref mut base_certs) = base.certificates {
base_certs.ca.extend(child_certs.ca);
} else {
base.certificates = Some(child_certs);
}
}
// packages, customizations, overlays: child appended after base
base.packages.extend(child.packages);
base.customizations.extend(child.customizations);
base.overlays.extend(child.overlays);
// includes: already resolved, don't carry forward
base.includes = Vec::new();
// targets: child's targets replace base entirely
if !child.targets.is_empty() {
base.targets = child.targets;
}
base
}
/// Merge an included spec into the current spec. Includes contribute
/// packages, customizations, and overlays but not metadata/targets.
fn merge_include(spec: &mut ImageSpec, included: ImageSpec) {
spec.packages.extend(included.packages);
spec.customizations.extend(included.customizations);
spec.overlays.extend(included.overlays);
}
fn resolve_path(base_dir: &Path, relative: &str) -> PathBuf {
let path = Path::new(relative);
if path.is_absolute() {
path.to_path_buf()
} else {
base_dir.join(path)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use tempfile::TempDir;
#[test]
fn test_resolve_with_include() {
let tmp = TempDir::new().unwrap();
let included_kdl = r#"
metadata name="included" version="0.0.1"
repositories {
publisher name="extra" origin="http://extra.example.com"
}
packages {
package "extra/pkg"
}
overlays {
ensure-dir "/extra/dir" owner="root" group="root" mode="755"
}
"#;
fs::write(tmp.path().join("included.kdl"), included_kdl).unwrap();
let root_kdl = r#"
metadata name="root" version="1.0.0"
repositories {
publisher name="main" origin="http://main.example.com"
}
include "included.kdl"
packages {
package "main/pkg"
}
"#;
let spec = crate::parse(root_kdl).unwrap();
let resolved = resolve(spec, tmp.path()).unwrap();
assert_eq!(resolved.metadata.name, "root");
assert_eq!(resolved.packages.len(), 2);
assert_eq!(resolved.overlays.len(), 1);
}
#[test]
fn test_resolve_with_base() {
let tmp = TempDir::new().unwrap();
let base_kdl = r#"
metadata name="base" version="0.0.1"
repositories {
publisher name="core" origin="http://core.example.com"
}
packages {
package "base/pkg"
}
"#;
fs::write(tmp.path().join("base.kdl"), base_kdl).unwrap();
let child_kdl = r#"
metadata name="child" version="1.0.0"
base "base.kdl"
repositories {
publisher name="extra" origin="http://extra.example.com"
}
packages {
package "child/pkg"
}
target "vm" kind="qcow2" {
disk-size "10G"
}
"#;
let spec = crate::parse(child_kdl).unwrap();
let resolved = resolve(spec, tmp.path()).unwrap();
assert_eq!(resolved.metadata.name, "child");
assert_eq!(resolved.repositories.publishers.len(), 2);
assert_eq!(resolved.packages.len(), 2);
assert_eq!(resolved.packages[0].packages[0].name, "base/pkg");
assert_eq!(resolved.packages[1].packages[0].name, "child/pkg");
assert_eq!(resolved.targets.len(), 1);
}
#[test]
fn test_circular_include_detected() {
let tmp = TempDir::new().unwrap();
let a_kdl = r#"
metadata name="a" version="0.0.1"
repositories {}
include "b.kdl"
"#;
let b_kdl = r#"
metadata name="b" version="0.0.1"
repositories {}
include "a.kdl"
"#;
fs::write(tmp.path().join("a.kdl"), a_kdl).unwrap();
fs::write(tmp.path().join("b.kdl"), b_kdl).unwrap();
let spec = crate::parse(a_kdl).unwrap();
let result = resolve(spec, tmp.path());
assert!(result.is_err());
let err = result.unwrap_err();
assert!(matches!(err, ResolveError::CircularInclude { .. }));
}
}

View file

@ -0,0 +1,297 @@
use knuffel::Decode;
#[derive(Debug, Decode)]
pub struct ImageSpec {
#[knuffel(child)]
pub metadata: Metadata,
#[knuffel(child, unwrap(argument))]
pub base: Option<String>,
#[knuffel(child, unwrap(argument))]
pub build_host: Option<String>,
#[knuffel(child)]
pub repositories: Repositories,
#[knuffel(child, unwrap(argument))]
pub incorporation: Option<String>,
#[knuffel(child)]
pub variants: Option<Variants>,
#[knuffel(child)]
pub certificates: Option<Certificates>,
#[knuffel(children(name = "packages"))]
pub packages: Vec<PackageList>,
#[knuffel(children(name = "customization"))]
pub customizations: Vec<Customization>,
#[knuffel(children(name = "overlays"))]
pub overlays: Vec<Overlays>,
#[knuffel(children(name = "include"))]
pub includes: Vec<Include>,
#[knuffel(children(name = "target"))]
pub targets: Vec<Target>,
}
#[derive(Debug, Decode)]
pub struct Metadata {
#[knuffel(property)]
pub name: String,
#[knuffel(property)]
pub version: String,
#[knuffel(property)]
pub description: Option<String>,
}
#[derive(Debug, Decode)]
pub struct Repositories {
#[knuffel(children(name = "publisher"))]
pub publishers: Vec<Publisher>,
}
#[derive(Debug, Decode)]
pub struct Publisher {
#[knuffel(property)]
pub name: String,
#[knuffel(property)]
pub origin: String,
}
#[derive(Debug, Decode)]
pub struct PackageList {
#[knuffel(property)]
pub r#if: Option<String>,
#[knuffel(children(name = "package"))]
pub packages: Vec<Package>,
}
#[derive(Debug, Decode)]
pub struct Package {
#[knuffel(argument)]
pub name: String,
}
#[derive(Debug, Decode)]
pub struct Customization {
#[knuffel(property)]
pub r#if: Option<String>,
#[knuffel(children(name = "user"))]
pub users: Vec<User>,
}
#[derive(Debug, Decode)]
pub struct User {
#[knuffel(argument)]
pub name: String,
}
#[derive(Debug, Decode)]
pub struct Overlays {
#[knuffel(property)]
pub r#if: Option<String>,
#[knuffel(children)]
pub actions: Vec<OverlayAction>,
}
#[derive(Debug, Decode)]
pub enum OverlayAction {
File(FileOverlay),
Devfsadm(Devfsadm),
EnsureDir(EnsureDir),
RemoveFiles(RemoveFiles),
EnsureSymlink(EnsureSymlink),
Shadow(ShadowOverlay),
}
#[derive(Debug, Decode)]
pub struct FileOverlay {
#[knuffel(property)]
pub destination: String,
#[knuffel(property)]
pub source: Option<String>,
#[knuffel(property)]
pub owner: Option<String>,
#[knuffel(property)]
pub group: Option<String>,
#[knuffel(property)]
pub mode: Option<String>,
}
#[derive(Debug, Decode)]
pub struct Devfsadm {}
#[derive(Debug, Decode)]
pub struct EnsureDir {
#[knuffel(argument)]
pub path: String,
#[knuffel(property)]
pub owner: Option<String>,
#[knuffel(property)]
pub group: Option<String>,
#[knuffel(property)]
pub mode: Option<String>,
}
#[derive(Debug, Decode)]
pub struct RemoveFiles {
#[knuffel(property)]
pub file: Option<String>,
#[knuffel(property)]
pub dir: Option<String>,
#[knuffel(property)]
pub pattern: Option<String>,
}
#[derive(Debug, Decode)]
pub struct EnsureSymlink {
#[knuffel(argument)]
pub path: String,
#[knuffel(property)]
pub target: String,
#[knuffel(property)]
pub owner: Option<String>,
#[knuffel(property)]
pub group: Option<String>,
}
#[derive(Debug, Decode)]
pub struct ShadowOverlay {
#[knuffel(property)]
pub username: String,
#[knuffel(property)]
pub password: String,
}
#[derive(Debug, Decode)]
pub struct Target {
#[knuffel(argument)]
pub name: String,
#[knuffel(property, str)]
pub kind: TargetKind,
#[knuffel(child, unwrap(argument))]
pub disk_size: Option<String>,
#[knuffel(child, unwrap(argument))]
pub bootloader: Option<String>,
#[knuffel(child)]
pub entrypoint: Option<Entrypoint>,
#[knuffel(child)]
pub environment: Option<Environment>,
#[knuffel(child)]
pub pool: Option<Pool>,
}
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub enum TargetKind {
Qcow2,
Oci,
#[default]
Artifact,
}
impl std::str::FromStr for TargetKind {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_ascii_lowercase().as_str() {
"qcow2" | "qcow" => Ok(TargetKind::Qcow2),
"oci" => Ok(TargetKind::Oci),
"artifact" | "tar" => Ok(TargetKind::Artifact),
other => Err(format!("invalid target kind: {other}")),
}
}
}
impl std::fmt::Display for TargetKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TargetKind::Qcow2 => write!(f, "qcow2"),
TargetKind::Oci => write!(f, "oci"),
TargetKind::Artifact => write!(f, "artifact"),
}
}
}
#[derive(Debug, Decode)]
pub struct Entrypoint {
#[knuffel(property)]
pub command: String,
}
#[derive(Debug, Decode)]
pub struct Environment {
#[knuffel(children(name = "set"))]
pub vars: Vec<EnvVar>,
}
#[derive(Debug, Decode)]
pub struct EnvVar {
#[knuffel(argument)]
pub key: String,
#[knuffel(argument)]
pub value: String,
}
#[derive(Debug, Decode)]
pub struct Pool {
#[knuffel(children(name = "property"))]
pub properties: Vec<PoolProperty>,
}
#[derive(Debug, Decode)]
pub struct PoolProperty {
#[knuffel(property)]
pub name: String,
#[knuffel(property)]
pub value: String,
}
#[derive(Debug, Decode)]
pub struct Variants {
#[knuffel(children(name = "set"))]
pub vars: Vec<VariantPair>,
}
#[derive(Debug, Decode)]
pub struct VariantPair {
#[knuffel(property)]
pub name: String,
#[knuffel(property)]
pub value: String,
}
#[derive(Debug, Decode)]
pub struct Certificates {
#[knuffel(children(name = "ca"))]
pub ca: Vec<CaCertificate>,
}
#[derive(Debug, Decode)]
pub struct CaCertificate {
#[knuffel(property)]
pub publisher: String,
#[knuffel(property)]
pub certfile: String,
}
#[derive(Debug, Decode)]
pub struct Include {
#[knuffel(argument)]
pub path: String,
}

20
images/common.kdl Normal file
View file

@ -0,0 +1,20 @@
// SMF profiles and basic network/name service configuration ported from image-builder JSON
overlays {
// SMF default profiles
ensure-symlink "/etc/svc/profile/generic.xml" target="generic_limited_net.xml" owner="root" group="root"
ensure-symlink "/etc/svc/profile/inetd_services.xml" target="inetd_generic.xml" owner="root" group="root"
ensure-symlink "/etc/svc/profile/platform.xml" target="platform_none.xml" owner="root" group="root"
// Name service profile
ensure-symlink "/etc/svc/profile/name_service.xml" target="ns_dns.xml" owner="root" group="root"
// nsswitch: use the dns profile file; symlink to keep parity with imagesrc copy
ensure-symlink "/etc/nsswitch.conf" target="nsswitch.dns" owner="root" group="root"
// Network basics and identity
file destination="/etc/inet/hosts" source="etc/hosts" owner="root" group="root" mode="644"
file destination="/etc/nodename" source="etc/nodename" owner="root" group="root" mode="644"
// Empty resolv.conf; can be populated by DHCP or later config
file destination="/etc/resolv.conf" owner="root" group="root" mode="644"
}

23
images/devfs.kdl Normal file
View file

@ -0,0 +1,23 @@
overlays {
devfsadm
ensure-dir "/dev/cfg" owner="root" group="root" mode="755"
ensure-dir "/dev/dsk" owner="root" group="sys" mode="755"
ensure-dir "/dev/rdsk" owner="root" group="sys" mode="755"
ensure-dir "/dev/usb" owner="root" group="root" mode="755"
remove-files dir="/dev/cfg"
remove-files dir="/dev/dsk"
remove-files dir="/dev/rdsk"
remove-files dir="/dev/usb"
// Re-create dirs
ensure-dir "/dev/cfg" owner="root" group="root" mode="755"
ensure-dir "/dev/dsk" owner="root" group="sys" mode="755"
ensure-dir "/dev/rdsk" owner="root" group="sys" mode="755"
ensure-dir "/dev/usb" owner="root" group="root" mode="755"
ensure-symlink "/dev/msglog" target="../devices/pseudo/sysmsg@0:msglog" owner="root" group="root"
// Empty file (implied by missing source)
file destination="/reconfigure" owner="root" group="root" mode="644"
}

View file

@ -0,0 +1,4 @@
autoboot_delay="8"
console="ttya"
os_console="ttya"
ttya-mode="115200,8,n,1,-"

View file

@ -0,0 +1,4 @@
# OmniOS default /etc/default/init file (UTC timezone)
# Upstream reference: omnios-image-builder/templates/files/default_init.utc
CMASK=022
TZ=UTC

5
images/files/etc/hosts Normal file
View file

@ -0,0 +1,5 @@
#
# Internet host table
#
::1 unknown unknown.local localhost loghost
127.0.0.1 unknown unknown.local localhost loghost

View file

@ -0,0 +1 @@
unknown

View file

@ -0,0 +1,122 @@
# $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/ccs/bin:/usr/bin:/bin:/usr/sbin:/sbin
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Use the client's locale/language settings
#AcceptEnv LANG LC_ALL LC_CTYPE LC_COLLATE LC_TIME LC_NUMERIC
#AcceptEnv LC_MONETARY LC_MESSAGES
# Authentication:
#LoginGraceTime 2m
PermitRootLogin without-password
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
# but this is overridden so installations will only check .ssh/authorized_keys
AuthorizedKeysFile .ssh/authorized_keys
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
#PermitEmptyPasswords no
# Change to no to disable s/key passwords
#ChallengeResponseAuthentication yes
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
#UsePAM no
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
#X11Forwarding no
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# override default of no subsystems
Subsystem sftp /usr/libexec/amd64/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server

View file

@ -0,0 +1,2 @@
nameserver 1.1.1.1
nameserver 8.8.8.8

View file

@ -0,0 +1,77 @@
#
# Configuration file for sshd(1m) (see also sshd_config(4))
#
Protocol 2
Port 22
# If port forwarding is enabled (default), specify if the server can bind to
# INADDR_ANY.
# This allows the local port forwarding to work when connections are received
# from any remote host.
GatewayPorts no
# X11 tunneling options
X11Forwarding yes
X11DisplayOffset 10
X11UseLocalhost yes
# The maximum number of concurrent unauthenticated connections to sshd.
# start:rate:full see sshd(1) for more information.
# The default is 10 unauthenticated clients.
#MaxStartups 10:30:60
# Banner to be printed before authentication starts.
#Banner /etc/issue
# Should sshd print the /etc/motd file and check for mail.
# On Solaris it is assumed that the login shell will do these (eg /etc/profile).
PrintMotd no
# KeepAlive specifies whether keep alive messages are sent to the client.
# See sshd(1) for detailed description of what this means.
# Note that the client may also be sending keep alive messages to the server.
KeepAlive yes
# Syslog facility and level
SyslogFacility auth
LogLevel info
#
# Authentication configuration
#
# Host private key files
# Must be on a local disk and readable only by the root user (root:sys 600).
# HostKey /etc/ssh/ssh_host_rsa_key
# HostKey /etc/ssh/ssh_host_dsa_key
# Ensure secure permissions on users .ssh directory.
StrictModes yes
# Length of time in seconds before a client that hasn't completed
# authentication is disconnected.
# Default is 600 seconds. 0 means no time limit.
LoginGraceTime 600
# Maximum number of retries for authentication
MaxAuthTries 6
# Are logins to accounts with empty passwords allowed.
# If PermitEmptyPasswords is no, pass PAM_DISALLOW_NULL_AUTHTOK
# to pam_authenticate(3PAM).
PermitEmptyPasswords no
# To disable tunneled clear text passwords, change PasswordAuthentication to no.
# You probably also need to disable ChallengeResponseAuthentication.
PasswordAuthentication yes
# Change to no to disable s/key passwords
#ChallengeResponseAuthentication yes
PermitRootLogin without-password
# sftp subsystem
Subsystem sftp internal-sftp
IgnoreRhosts yes

2
images/files/etc/system Normal file
View file

@ -0,0 +1,2 @@
set zfs:zfs_arc_max = 0x40000000
set noexec_user_stack = 1

View file

View file

@ -0,0 +1,62 @@
# VERSION=1
460800:460800 hupcl:460800 hupcl::307200
307200:307200 hupcl:307200 hupcl::230400
230400:230400 hupcl:230400 hupcl::153600
153600:153600 hupcl:153600 hupcl::115200
115200:115200 hupcl:115200 hupcl::76800
76800:76800 hupcl:76800 hupcl::57600
57600:57600 hupcl:57600 hupcl::38400
38400:38400 hupcl:38400 hupcl::19200
19200:19200 hupcl:19200 hupcl::9600
9600:9600 hupcl:9600 hupcl::4800
4800:4800 hupcl:4800 hupcl::2400
2400:2400 hupcl:2400 hupcl::1200
1200:1200 hupcl:1200 hupcl::300
300:300 hupcl:300 hupcl::460800
460800E:460800 hupcl evenp:460800 evenp::307200
307200E:307200 hupcl evenp:307200 evenp::230400
230400E:230400 hupcl evenp:230400 evenp::153600
153600E:153600 hupcl evenp:153600 evenp::115200
115200E:115200 hupcl evenp:115200 evenp::76800
76800E:76800 hupcl evenp:76800 evenp::57600
57600E:57600 hupcl evenp:57600 evenp::38400
38400E:38400 hupcl evenp:38400 evenp::19200
19200E:19200 hupcl evenp:19200 evenp::9600
9600E:9600 hupcl evenp:9600 evenp::4800
4800E:4800 hupcl evenp:4800 evenp::2400
2400E:2400 hupcl evenp:2400 evenp::1200
1200E:1200 hupcl evenp:1200 evenp::300
300E:300 hupcl evenp:300 evenp::19200
auto:hupcl:sane hupcl:A:9600
console:115200 hupcl opost onlcr:115200::console
console1:1200 hupcl opost onlcr:1200::console2
console2:300 hupcl opost onlcr:300::console3
console3:2400 hupcl opost onlcr:2400::console4
console4:4800 hupcl opost onlcr:4800::console5
console5:19200 hupcl opost onlcr:19200::console
contty:9600 hupcl opost onlcr:9600 sane::contty1
contty1:1200 hupcl opost onlcr:1200 sane::contty2
contty2:300 hupcl opost onlcr:300 sane::contty3
contty3:2400 hupcl opost onlcr:2400 sane::contty4
contty4:4800 hupcl opost onlcr:4800 sane::contty5
contty5:19200 hupcl opost onlcr:19200 sane::contty
4800H:4800:4800 sane hupcl::9600H
9600H:9600:9600 sane hupcl::19200H
19200H:19200:19200 sane hupcl::38400H
38400H:38400:38400 sane hupcl::2400H
2400H:2400:2400 sane hupcl::1200H
1200H:1200:1200 sane hupcl::300H
300H:300:300 sane hupcl::4800H
conttyH:9600 opost onlcr:9600 hupcl sane::contty1H
contty1H:1200 opost onlcr:1200 hupcl sane::contty2H
contty2H:300 opost onlcr:300 hupcl sane::contty3H
contty3H:2400 opost onlcr:2400 hupcl sane::contty4H
contty4H:4800 opost onlcr:4800 hupcl sane::contty5H
contty5H:19200 opost onlcr:19200 hupcl sane::conttyH

View file

@ -0,0 +1,47 @@
// OmniOS bloody base configuration (ported from image-builder JSON)
metadata name="omnios-bloody-base" version="0.0.1" description="OmniOS bloody: core + extra publishers; base incorporation 'entire'"
repositories {
// Core publisher
publisher name="omnios" origin="https://pkg.omnios.org/bloody/core/"
// Extra publisher (enable via features in consumers if applicable)
publisher name="extra.omnios" origin="https://pkg.omnios.org/bloody/extra/"
}
// Prefer the standard OmniOS incorporation umbrella
incorporation "entire"
// Approve IPS CA certs used for mTLS when contacting publishers
certificates {
ca publisher="omnios" certfile="omniosce-ca.cert.pem"
}
// IPS variants to set inside the target image
variants {
// OmniOS global zone
set name="opensolaris.zone" value="global"
}
// Packages from the artifact phase JSON (finalization steps like pkg_purge_history
// and seed_smf are intentionally omitted here)
packages {
package "/editor/vim"
package "/network/rsync"
package "/system/library/gcc-runtime"
package "/system/library/g++-runtime"
package "/network/ftp"
package "/network/openssh-server"
package "/network/telnet"
package "/service/network/ntpsec"
package "/web/curl"
package "/web/wget"
package "/system/management/mdata-client"
}
// Build-only tools
packages if="build" {
package "/developer/build-essential"
package "/developer/omnios-build-tools"
}

View file

@ -0,0 +1,70 @@
// OmniOS bloody final disk image target (ported from image-builder JSON)
// Source JSON reference (abridged):
// {
// "pool": { "name": "rpool", "ashift": 12, "uefi": true, "size": 2000 },
// "steps": [
// { "t": "create_be" },
// { "t": "unpack_tar", "name": "omnios-stable-r${release}.tar" },
// { "t": "include", "name": "devfs" },
// { "t": "assemble_files", "dir": "/etc/versions", "output": "/etc/versions/build", "prefix": "build." },
// { "t": "make_bootable" },
// { "t": "include", "name": "common" },
// { "t": "ensure_file", "file": "/boot/conf.d/console", "src": "boot_console.${console}", "owner": "root", "group": "root", "mode": "644" },
// { "t": "ensure_file", "file": "/etc/ttydefs", "src": "ttydefs.115200", "owner": "root", "group": "sys", "mode": "644" },
// { "t": "ensure_file", "file": "/etc/default/init", "src": "default_init.utc", "owner": "root", "group": "root", "mode": "644" },
// { "t": "shadow", "username": "root", "password": "$5$kr1VgdIt$OUiUAyZCDogH/uaxH71rMeQxvpDEY2yX.x0ZQRnmeb9" },
// { "t": "include", "name": "finalise" }
// ]
// }
// Notes:
// - The refraction KDL schema doesnt model the step DSL directly; we express the
// relevant parts via includes, overlays, and target settings.
// - UEFI + disk size are mapped to the target stanza. Pool properties are now
// modeled via a pool { property name=".." value=".." } block under target.
// - Finalizers are implicit per the toolchain and dont need to be listed.
metadata name="omnios-bloody-disk" version="0.0.1" description="OmniOS bloody: bootable qcow2 target from base spec"
// Derive from the base publisher/variant/cert configuration and base packages
base "images/omnios-bloody-base.kdl"
// Device filesystem overlays and common system files
include "images/devfs.kdl"
include "images/common.kdl"
packages {
package "/system/management/cloud-init"
package "/driver/crypto/viorand"
package "/driver/network/vioif"
package "/driver/storage/vio9p"
package "/driver/storage/vioblk"
package "/driver/storage/vioscsi"
}
// Files that the original JSON ensured
overlays {
// Console configuration (115200 by default)
file destination="/boot/conf.d/console" source="boot_console.115200" owner="root" group="root" mode="644"
// TTY speed definitions
file destination="/etc/ttydefs" source="ttydefs.115200" owner="root" group="sys" mode="644"
// Init defaults (UTC timezone)
file destination="/etc/default/init" source="default_init.utc" owner="root" group="root" mode="644"
// Fallback local login: set a default root password in /etc/shadow
// This is used only if cloud-init (or metadata) fails to provision credentials
shadow username="root" password="$5$kr1VgdIt$OUiUAyZCDogH/uaxH71rMeQxvpDEY2yX.x0ZQRnmeb9"
}
// Bootable qcow2 disk image; size is 2000 MB; use UEFI bootloader
target "qcow2" kind="qcow2" {
disk-size "2000M"
bootloader "uefi"
// ZFS pool properties for the created rpool
pool {
// Match original JSON: ashift=12
property name="ashift" value="12"
}
}