Fix 8 bugs: include merging, UID collision, shell injection, OCI compliance

- spec-parser: merge_include now merges repos, variants, certs, incorporation
- forge-engine: auto-increment UID/GID from existing passwd/group files
- forge-engine: replace shell-based APT source addition with direct file write
- forge-engine/forge-oci: OS field is now distro-aware (solaris vs linux)
- forge-engine: apply owner/group via lchown on file/dir/symlink overlays
- forge-oci: diff_ids now use uncompressed tar digests per OCI image spec
- forge-oci: track real uncompressed_size instead of hardcoded 0
- forge-engine/forge-builder: use spec metadata version instead of "latest"
This commit is contained in:
Till Wegmueller 2026-04-09 22:45:42 +02:00
parent 38b359e382
commit 0510c8f31f
13 changed files with 318 additions and 37 deletions

View file

@ -1,6 +1,6 @@
use std::path::Path; use std::path::Path;
use spec_parser::schema::ImageSpec; use spec_parser::schema::{DistroFamily, ImageSpec};
use tracing::info; use tracing::info;
use crate::error::BuilderError; use crate::error::BuilderError;
@ -39,11 +39,12 @@ pub async fn push_qcow2_outputs(spec: &ImageSpec, output_dir: &Path) -> Result<(
detail: format!("failed to read QCOW2 file {}: {e}", qcow2_path.display()), detail: format!("failed to read QCOW2 file {}: {e}", qcow2_path.display()),
})?; })?;
let distro = DistroFamily::from_distro_str(spec.distro.as_deref());
let metadata = forge_oci::artifact::Qcow2Metadata { let metadata = forge_oci::artifact::Qcow2Metadata {
name: target.name.clone(), name: target.name.clone(),
version: "latest".to_string(), version: spec.metadata.version.clone(),
architecture: "amd64".to_string(), architecture: "amd64".to_string(),
os: "linux".to_string(), os: distro.oci_os().to_string(),
description: None, description: None,
}; };

View file

@ -9,7 +9,7 @@ pub mod tools;
use std::path::Path; use std::path::Path;
use error::ForgeError; use error::ForgeError;
use spec_parser::schema::{ImageSpec, Target, TargetKind}; use spec_parser::schema::{DistroFamily, ImageSpec, Target, TargetKind};
use tools::ToolRunner; use tools::ToolRunner;
use tracing::info; use tracing::info;
@ -46,11 +46,13 @@ impl<'a> BuildContext<'a> {
let phase1_result = let phase1_result =
phase1::execute(self.spec, self.files_dir, self.runner).await?; phase1::execute(self.spec, self.files_dir, self.runner).await?;
let distro = DistroFamily::from_distro_str(self.spec.distro.as_deref());
phase2::execute( phase2::execute(
target, target,
&phase1_result.staging_root, &phase1_result.staging_root,
self.files_dir, self.files_dir,
self.output_dir, self.output_dir,
&distro,
) )
.await?; .await?;
} }
@ -92,7 +94,14 @@ impl<'a> BuildContext<'a> {
// Auto-push to OCI registry if configured (skipped when host-side push handles it) // Auto-push to OCI registry if configured (skipped when host-side push handles it)
if !self.skip_push { if !self.skip_push {
phase2::push_qcow2_if_configured(target, self.output_dir).await?; let distro = DistroFamily::from_distro_str(self.spec.distro.as_deref());
phase2::push_qcow2_if_configured(
target,
self.output_dir,
&distro,
&self.spec.metadata.version,
)
.await?;
} }
Ok(()) Ok(())

View file

@ -25,9 +25,15 @@ fn create_user(username: &str, staging_root: &Path) -> Result<(), ForgeError> {
detail: e.to_string(), detail: e.to_string(),
})?; })?;
// Append to /etc/passwd
let passwd_path = etc_dir.join("passwd"); let passwd_path = etc_dir.join("passwd");
let passwd_entry = format!("{username}:x:1000:1000::/home/{username}:/bin/sh\n"); let group_path = etc_dir.join("group");
// Find the next available UID/GID (start at 1000, scan existing files)
let next_uid = next_id_from_file(&passwd_path, 2);
let next_gid = next_id_from_file(&group_path, 2);
// Append to /etc/passwd
let passwd_entry = format!("{username}:x:{next_uid}:{next_gid}::/home/{username}:/bin/sh\n");
append_or_create(&passwd_path, &passwd_entry).map_err(|e| ForgeError::Customization { append_or_create(&passwd_path, &passwd_entry).map_err(|e| ForgeError::Customization {
operation: format!("add user {username} to /etc/passwd"), operation: format!("add user {username} to /etc/passwd"),
detail: e.to_string(), detail: e.to_string(),
@ -42,8 +48,7 @@ fn create_user(username: &str, staging_root: &Path) -> Result<(), ForgeError> {
})?; })?;
// Append to /etc/group // Append to /etc/group
let group_path = etc_dir.join("group"); let group_entry = format!("{username}::{next_gid}:\n");
let group_entry = format!("{username}::1000:\n");
append_or_create(&group_path, &group_entry).map_err(|e| ForgeError::Customization { append_or_create(&group_path, &group_entry).map_err(|e| ForgeError::Customization {
operation: format!("add group {username} to /etc/group"), operation: format!("add group {username} to /etc/group"),
detail: e.to_string(), detail: e.to_string(),
@ -52,6 +57,32 @@ fn create_user(username: &str, staging_root: &Path) -> Result<(), ForgeError> {
Ok(()) Ok(())
} }
/// Scan a colon-delimited file (passwd or group) and return the next available
/// ID. The `id_field` parameter is the 0-based column index containing the
/// numeric ID (2 for passwd UID, 2 for group GID). Returns at least 1000.
fn next_id_from_file(path: &Path, id_field: usize) -> u32 {
const MIN_ID: u32 = 1000;
let content = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return MIN_ID,
};
let max_id = content
.lines()
.filter_map(|line| {
let fields: Vec<&str> = line.split(':').collect();
fields.get(id_field)?.parse::<u32>().ok()
})
.filter(|&id| id >= MIN_ID)
.max();
match max_id {
Some(id) => id + 1,
None => MIN_ID,
}
}
fn append_or_create(path: &Path, content: &str) -> Result<(), std::io::Error> { fn append_or_create(path: &Path, content: &str) -> Result<(), std::io::Error> {
use std::io::Write; use std::io::Write;
let mut file = std::fs::OpenOptions::new() let mut file = std::fs::OpenOptions::new()
@ -98,16 +129,25 @@ mod tests {
let customization = Customization { let customization = Customization {
r#if: None, r#if: None,
users: vec![ users: vec![
User { name: "alice".to_string() }, User {
User { name: "bob".to_string() }, name: "alice".to_string(),
},
User {
name: "bob".to_string(),
},
], ],
}; };
apply(&customization, staging.path()).unwrap(); apply(&customization, staging.path()).unwrap();
let passwd = std::fs::read_to_string(staging.path().join("etc/passwd")).unwrap(); let passwd = std::fs::read_to_string(staging.path().join("etc/passwd")).unwrap();
assert!(passwd.contains("alice:x:1000:1000::/home/alice:/bin/sh"));
assert!(passwd.contains("bob:x:1001:1001::/home/bob:/bin/sh"));
let group = std::fs::read_to_string(staging.path().join("etc/group")).unwrap();
assert!(passwd.contains("alice")); assert!(passwd.contains("alice"));
assert!(passwd.contains("bob")); assert!(group.contains("alice::1000:"));
assert!(group.contains("bob::1001:"));
} }
#[test] #[test]

View file

@ -6,6 +6,80 @@ use tracing::info;
use crate::error::ForgeError; use crate::error::ForgeError;
use crate::tools::ToolRunner; use crate::tools::ToolRunner;
/// Set ownership on a file or directory by looking up the user/group names
/// in the staging root's /etc/passwd and /etc/group files.
#[cfg(unix)]
fn set_ownership(
path: &Path,
owner: Option<&str>,
group: Option<&str>,
staging_root: &Path,
) -> Result<(), ForgeError> {
use std::ffi::CString;
if owner.is_none() && group.is_none() {
return Ok(());
}
let uid = match owner {
Some(name) => lookup_id_by_name(
&staging_root.join("etc/passwd"),
name,
2, // UID is field 2
)
.unwrap_or(0),
None => u32::MAX, // -1 means "don't change"
};
let gid = match group {
Some(name) => lookup_id_by_name(
&staging_root.join("etc/group"),
name,
2, // GID is field 2
)
.unwrap_or(0),
None => u32::MAX,
};
let c_path = CString::new(path.to_string_lossy().as_bytes()).map_err(|_| {
ForgeError::Overlay {
action: format!("set ownership on {}", path.display()),
detail: "path contains null byte".to_string(),
source: std::io::Error::new(std::io::ErrorKind::InvalidInput, "null byte in path"),
}
})?;
// Use lchown to avoid following symlinks
let ret =
unsafe { libc::lchown(c_path.as_ptr(), uid, gid) };
if ret != 0 {
let err = std::io::Error::last_os_error();
// Don't fail on permission errors in unprivileged builds
if err.kind() != std::io::ErrorKind::PermissionDenied {
return Err(ForgeError::Overlay {
action: format!("set ownership on {}", path.display()),
detail: format!("owner={:?} group={:?}", owner, group),
source: err,
});
}
}
Ok(())
}
/// Look up a numeric ID from a colon-delimited file (passwd or group) by name.
fn lookup_id_by_name(path: &Path, name: &str, id_field: usize) -> Option<u32> {
let content = std::fs::read_to_string(path).ok()?;
content.lines().find_map(|line| {
let fields: Vec<&str> = line.split(':').collect();
if fields.first() == Some(&name) {
fields.get(id_field)?.parse::<u32>().ok()
} else {
None
}
})
}
/// Apply a list of overlay actions to the staging root. /// Apply a list of overlay actions to the staging root.
pub async fn apply_overlays( pub async fn apply_overlays(
actions: &[OverlayAction], actions: &[OverlayAction],
@ -84,6 +158,15 @@ async fn apply_action(
})?; })?;
} }
} }
// Set ownership if specified
#[cfg(unix)]
set_ownership(
&dest,
file_overlay.owner.as_deref(),
file_overlay.group.as_deref(),
staging_root,
)?;
} }
OverlayAction::Devfsadm(_) => { OverlayAction::Devfsadm(_) => {
@ -120,6 +203,15 @@ async fn apply_action(
})?; })?;
} }
} }
// Set ownership if specified
#[cfg(unix)]
set_ownership(
&dir_path,
ensure_dir.owner.as_deref(),
ensure_dir.group.as_deref(),
staging_root,
)?;
} }
OverlayAction::RemoveFiles(remove) => { OverlayAction::RemoveFiles(remove) => {
@ -253,6 +345,15 @@ async fn apply_action(
} }
})?; })?;
// Set ownership if specified
#[cfg(unix)]
set_ownership(
&link_path,
symlink.owner.as_deref(),
symlink.group.as_deref(),
staging_root,
)?;
#[cfg(not(unix))] #[cfg(not(unix))]
return Err(ForgeError::Overlay { return Err(ForgeError::Overlay {
action: format!("create symlink {} -> {}", symlink.path, symlink.target), action: format!("create symlink {} -> {}", symlink.path, symlink.target),

View file

@ -6,7 +6,7 @@ pub mod qcow2_zfs;
use std::path::Path; use std::path::Path;
use spec_parser::schema::{Target, TargetKind}; use spec_parser::schema::{DistroFamily, Target, TargetKind};
use tracing::info; use tracing::info;
use crate::error::ForgeError; use crate::error::ForgeError;
@ -20,6 +20,7 @@ pub async fn execute(
staging_root: &Path, staging_root: &Path,
files_dir: &Path, files_dir: &Path,
output_dir: &Path, output_dir: &Path,
distro: &DistroFamily,
) -> Result<(), ForgeError> { ) -> Result<(), ForgeError> {
info!( info!(
target = %target.name, target = %target.name,
@ -29,7 +30,7 @@ pub async fn execute(
match target.kind { match target.kind {
TargetKind::Oci => { TargetKind::Oci => {
oci::build_oci(target, staging_root, output_dir)?; oci::build_oci(target, staging_root, output_dir, distro)?;
} }
TargetKind::Artifact => { TargetKind::Artifact => {
artifact::build_artifact(target, staging_root, output_dir, files_dir)?; artifact::build_artifact(target, staging_root, output_dir, files_dir)?;
@ -47,6 +48,8 @@ pub async fn execute(
pub async fn push_qcow2_if_configured( pub async fn push_qcow2_if_configured(
target: &Target, target: &Target,
output_dir: &Path, output_dir: &Path,
distro: &DistroFamily,
version: &str,
) -> Result<(), ForgeError> { ) -> Result<(), ForgeError> {
if let Some(ref push_ref) = target.push_to { if let Some(ref push_ref) = target.push_to {
let qcow2_path = output_dir.join(format!("{}.qcow2", target.name)); let qcow2_path = output_dir.join(format!("{}.qcow2", target.name));
@ -65,9 +68,9 @@ pub async fn push_qcow2_if_configured(
let metadata = forge_oci::artifact::Qcow2Metadata { let metadata = forge_oci::artifact::Qcow2Metadata {
name: target.name.clone(), name: target.name.clone(),
version: "latest".to_string(), version: version.to_string(),
architecture: "amd64".to_string(), architecture: "amd64".to_string(),
os: "linux".to_string(), os: distro.oci_os().to_string(),
description: None, description: None,
}; };

View file

@ -1,6 +1,6 @@
use std::path::Path; use std::path::Path;
use spec_parser::schema::Target; use spec_parser::schema::{DistroFamily, Target};
use tracing::info; use tracing::info;
use crate::error::ForgeError; use crate::error::ForgeError;
@ -10,6 +10,7 @@ pub fn build_oci(
target: &Target, target: &Target,
staging_root: &Path, staging_root: &Path,
output_dir: &Path, output_dir: &Path,
distro: &DistroFamily,
) -> Result<(), ForgeError> { ) -> Result<(), ForgeError> {
info!("Building OCI container image"); info!("Building OCI container image");
@ -24,7 +25,10 @@ pub fn build_oci(
); );
// Build image options from target spec // Build image options from target spec
let mut options = forge_oci::manifest::ImageOptions::default(); let mut options = forge_oci::manifest::ImageOptions {
os: distro.oci_os().to_string(),
..Default::default()
};
if let Some(ref ep) = target.entrypoint { if let Some(ref ep) = target.entrypoint {
options.entrypoint = Some(vec![ep.command.clone()]); options.entrypoint = Some(vec![ep.command.clone()]);
@ -54,7 +58,7 @@ pub fn build_oci(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use spec_parser::schema::{Entrypoint, Environment, EnvVar, TargetKind}; use spec_parser::schema::{DistroFamily, Entrypoint, Environment, EnvVar, TargetKind};
use tempfile::TempDir; use tempfile::TempDir;
fn make_target(name: &str, entrypoint: Option<Entrypoint>, env: Option<Environment>) -> Target { fn make_target(name: &str, entrypoint: Option<Entrypoint>, env: Option<Environment>) -> Target {
@ -82,7 +86,7 @@ mod tests {
std::fs::write(staging.path().join("etc/motd"), "Welcome\n").unwrap(); std::fs::write(staging.path().join("etc/motd"), "Welcome\n").unwrap();
let target = make_target("container", None, None); let target = make_target("container", None, None);
build_oci(&target, staging.path(), output.path()).unwrap(); build_oci(&target, staging.path(), output.path(), &DistroFamily::OmniOS).unwrap();
let oci_dir = output.path().join("container-oci"); let oci_dir = output.path().join("container-oci");
assert!(oci_dir.exists()); assert!(oci_dir.exists());
@ -111,7 +115,7 @@ mod tests {
}), }),
); );
build_oci(&target, staging.path(), output.path()).unwrap(); build_oci(&target, staging.path(), output.path(), &DistroFamily::OmniOS).unwrap();
let oci_dir = output.path().join("app-oci"); let oci_dir = output.path().join("app-oci");
assert!(oci_dir.join("oci-layout").exists()); assert!(oci_dir.join("oci-layout").exists());
@ -130,7 +134,7 @@ mod tests {
let output = TempDir::new().unwrap(); let output = TempDir::new().unwrap();
let target = make_target("minimal", None, None); let target = make_target("minimal", None, None);
build_oci(&target, staging.path(), output.path()).unwrap(); build_oci(&target, staging.path(), output.path(), &DistroFamily::OmniOS).unwrap();
assert!(output.path().join("minimal-oci/oci-layout").exists()); assert!(output.path().join("minimal-oci/oci-layout").exists());
} }

View file

@ -69,19 +69,29 @@ pub async fn write_sources_list(
/// Add an APT source entry to the chroot's sources.list.d/. /// Add an APT source entry to the chroot's sources.list.d/.
pub async fn add_source( pub async fn add_source(
runner: &dyn ToolRunner, _runner: &dyn ToolRunner,
root: &str, root: &str,
entry: &str, entry: &str,
) -> Result<(), ForgeError> { ) -> Result<(), ForgeError> {
use std::io::Write;
info!(root, entry, "Adding APT source"); info!(root, entry, "Adding APT source");
let list_path = Path::new(root) let list_path = Path::new(root).join("etc/apt/sources.list.d/extra.list");
.join("etc/apt/sources.list.d/extra.list");
let list_str = list_path.to_str().unwrap_or("extra.list");
// Append entry to the sources list file // Append entry directly to the file, avoiding shell interpolation
runner let mut file = std::fs::OpenOptions::new()
.run("sh", &["-c", &format!("echo '{entry}' >> {list_str}")]) .create(true)
.await?; .append(true)
.open(&list_path)
.map_err(|e| ForgeError::Overlay {
action: "add APT source".to_string(),
detail: list_path.display().to_string(),
source: e,
})?;
writeln!(file, "{entry}").map_err(|e| ForgeError::Overlay {
action: "write APT source entry".to_string(),
detail: list_path.display().to_string(),
source: e,
})?;
Ok(()) Ok(())
} }

View file

@ -48,9 +48,8 @@ pub fn build_manifest(
layers: &[LayerBlob], layers: &[LayerBlob],
options: &ImageOptions, options: &ImageOptions,
) -> Result<(Vec<u8>, Vec<u8>), ManifestError> { ) -> Result<(Vec<u8>, Vec<u8>), ManifestError> {
// Build the diff_ids for the rootfs (uncompressed layer digests aren't tracked here, // diff_ids must be uncompressed layer digests per OCI image spec
// so we use the compressed digest -- in a full implementation you'd track both) let diff_ids: Vec<String> = layers.iter().map(|l| l.uncompressed_digest.clone()).collect();
let diff_ids: Vec<String> = layers.iter().map(|l| l.digest.clone()).collect();
let rootfs = RootFsBuilder::default() let rootfs = RootFsBuilder::default()
.typ("layers") .typ("layers")
@ -154,6 +153,7 @@ mod tests {
build_manifest(&[layer], &ImageOptions::default()).unwrap(); build_manifest(&[layer], &ImageOptions::default()).unwrap();
let config: serde_json::Value = serde_json::from_slice(&config_json).unwrap(); let config: serde_json::Value = serde_json::from_slice(&config_json).unwrap();
// Default is OmniOS → "solaris" in OCI terms
assert_eq!(config["os"], "solaris"); assert_eq!(config["os"], "solaris");
assert_eq!(config["architecture"], "amd64"); assert_eq!(config["architecture"], "amd64");

View file

@ -34,8 +34,11 @@ pub enum TarLayerError {
pub struct LayerBlob { pub struct LayerBlob {
/// Compressed tar.gz data /// Compressed tar.gz data
pub data: Vec<u8>, pub data: Vec<u8>,
/// SHA-256 digest of the compressed data (hex-encoded) /// SHA-256 digest of the compressed data (format: "sha256:<hex>")
pub digest: String, pub digest: String,
/// SHA-256 digest of the uncompressed tar data (format: "sha256:<hex>")
/// Used as the OCI diff_id per the image spec.
pub uncompressed_digest: String,
/// Uncompressed size in bytes /// Uncompressed size in bytes
pub uncompressed_size: u64, pub uncompressed_size: u64,
} }
@ -112,8 +115,28 @@ pub fn create_layer(staging_dir: &Path) -> Result<LayerBlob, TarLayerError> {
} }
let encoder = tar.into_inner().map_err(TarLayerError::TarCreate)?; let encoder = tar.into_inner().map_err(TarLayerError::TarCreate)?;
// Get the uncompressed tar data to compute diff_id before finishing gzip
let uncompressed_tar = encoder.get_ref().clone();
// Actually we need the tar bytes before gzip. The encoder wraps the output buffer.
// Let's compute from the gzip encoder's inner buffer differently.
// The GzEncoder accumulates compressed data. We need to hash the *uncompressed* tar.
// Rebuild: finish the tar into the gzip encoder, then finish gzip.
let compressed = encoder.finish().map_err(TarLayerError::TarCreate)?; let compressed = encoder.finish().map_err(TarLayerError::TarCreate)?;
// To get the uncompressed tar, decompress it back (simplest correct approach)
use flate2::read::GzDecoder;
use std::io::Read;
let mut decoder = GzDecoder::new(compressed.as_slice());
let mut uncompressed_tar = Vec::new();
decoder
.read_to_end(&mut uncompressed_tar)
.map_err(TarLayerError::TarCreate)?;
let mut uncompressed_hasher = Sha256::new();
uncompressed_hasher.update(&uncompressed_tar);
let uncompressed_digest = format!("sha256:{}", hex::encode(uncompressed_hasher.finalize()));
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
hasher.update(&compressed); hasher.update(&compressed);
let digest = format!("sha256:{}", hex::encode(hasher.finalize())); let digest = format!("sha256:{}", hex::encode(hasher.finalize()));
@ -121,7 +144,8 @@ pub fn create_layer(staging_dir: &Path) -> Result<LayerBlob, TarLayerError> {
Ok(LayerBlob { Ok(LayerBlob {
data: compressed, data: compressed,
digest, digest,
uncompressed_size, uncompressed_digest,
uncompressed_size: uncompressed_tar.len() as u64,
}) })
} }

View file

@ -22,3 +22,6 @@ tokio = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
indicatif = { workspace = true } indicatif = { workspace = true }
flate2 = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }

View file

@ -132,10 +132,23 @@ async fn push_oci_layout(
.into_diagnostic() .into_diagnostic()
.wrap_err_with(|| format!("Failed to read layer blob: {layer_digest}"))?; .wrap_err_with(|| format!("Failed to read layer blob: {layer_digest}"))?;
// Decompress to get uncompressed size and digest for diff_id
let mut decoder = flate2::read::GzDecoder::new(layer_data.as_slice());
let mut uncompressed = Vec::new();
std::io::Read::read_to_end(&mut decoder, &mut uncompressed)
.into_diagnostic()
.wrap_err_with(|| format!("Failed to decompress layer: {layer_digest}"))?;
use sha2::{Digest, Sha256};
let mut hasher = Sha256::new();
hasher.update(&uncompressed);
let uncompressed_digest = format!("sha256:{}", hex::encode(hasher.finalize()));
layers.push(forge_oci::tar_layer::LayerBlob { layers.push(forge_oci::tar_layer::LayerBlob {
data: layer_data, data: layer_data,
digest: layer_digest.to_string(), digest: layer_digest.to_string(),
uncompressed_size: 0, uncompressed_digest,
uncompressed_size: uncompressed.len() as u64,
}); });
} }

View file

@ -219,8 +219,62 @@ fn merge_base(mut base: ImageSpec, child: ImageSpec) -> ImageSpec {
} }
/// Merge an included spec into the current spec. Includes contribute /// Merge an included spec into the current spec. Includes contribute
/// packages, customizations, and overlays but not metadata/targets. /// repositories, variants, certificates, packages, customizations, and
/// overlays — but not metadata, distro, targets, or builder.
fn merge_include(spec: &mut ImageSpec, included: ImageSpec) { fn merge_include(spec: &mut ImageSpec, included: ImageSpec) {
// Merge publishers (dedup by name)
for pub_entry in included.repositories.publishers {
if !spec
.repositories
.publishers
.iter()
.any(|p| p.name == pub_entry.name)
{
spec.repositories.publishers.push(pub_entry);
}
}
// Merge apt_mirrors (dedup by URL)
for mirror in included.repositories.apt_mirrors {
if !spec
.repositories
.apt_mirrors
.iter()
.any(|m| m.url == mirror.url)
{
spec.repositories.apt_mirrors.push(mirror);
}
}
// Merge variants
if let Some(inc_variants) = included.variants {
if let Some(ref mut spec_variants) = spec.variants {
for var in inc_variants.vars {
if let Some(existing) = spec_variants.vars.iter_mut().find(|v| v.name == var.name) {
existing.value = var.value;
} else {
spec_variants.vars.push(var);
}
}
} else {
spec.variants = Some(inc_variants);
}
}
// Merge certificates
if let Some(inc_certs) = included.certificates {
if let Some(ref mut spec_certs) = spec.certificates {
spec_certs.ca.extend(inc_certs.ca);
} else {
spec.certificates = Some(inc_certs);
}
}
// Merge incorporation (included overrides only if spec doesn't have one)
if spec.incorporation.is_none() && included.incorporation.is_some() {
spec.incorporation = included.incorporation;
}
spec.packages.extend(included.packages); spec.packages.extend(included.packages);
spec.customizations.extend(included.customizations); spec.customizations.extend(included.customizations);
spec.overlays.extend(included.overlays); spec.overlays.extend(included.overlays);
@ -274,6 +328,17 @@ mod tests {
let resolved = resolve(spec, tmp.path()).unwrap(); let resolved = resolve(spec, tmp.path()).unwrap();
assert_eq!(resolved.metadata.name, "root"); assert_eq!(resolved.metadata.name, "root");
assert_eq!(resolved.repositories.publishers.len(), 2);
assert!(resolved
.repositories
.publishers
.iter()
.any(|p| p.name == "main"));
assert!(resolved
.repositories
.publishers
.iter()
.any(|p| p.name == "extra"));
assert_eq!(resolved.packages.len(), 2); assert_eq!(resolved.packages.len(), 2);
assert_eq!(resolved.overlays.len(), 1); assert_eq!(resolved.overlays.len(), 1);
} }

View file

@ -16,6 +16,14 @@ impl DistroFamily {
_ => DistroFamily::OmniOS, _ => DistroFamily::OmniOS,
} }
} }
/// Return the OCI OS value for this distro family.
pub fn oci_os(&self) -> &'static str {
match self {
DistroFamily::OmniOS => "solaris",
DistroFamily::Ubuntu => "linux",
}
}
} }
#[derive(Debug, Decode)] #[derive(Debug, Decode)]