Refactor ImageCatalog and apply manifest logic

- Added support for obsoleted package database with separate handling in `ImageCatalog`.
- Enhanced apply manifest functionality with progress callback support and processing statistics.
- Introduced LZ4 compression for manifest storage in `ImageCatalog`.
- Removed debugging eprintln statements, replaced with structured logging.
- Updated `pkg6` image creation and installation logic to improve user feedback and error handling.
- Updated database initialization and build processes to handle new obsoleted logic.
This commit is contained in:
Till Wegmueller 2025-08-19 22:43:50 +02:00
parent 77147999b3
commit e4bd9a748a
No known key found for this signature in database
8 changed files with 523 additions and 231 deletions

View file

@ -37,7 +37,7 @@ diff-struct = "0.5.3"
chrono = "0.4.41" chrono = "0.4.41"
tempfile = "3.20.0" tempfile = "3.20.0"
walkdir = "2.4.0" walkdir = "2.4.0"
redb = "3" redb = { version = "3" }
bincode = { version = "2", features = ["serde"] } bincode = { version = "2", features = ["serde"] }
rust-ini = "0.21" rust-ini = "0.21"
reqwest = { version = "0.12", features = ["blocking", "json"] } reqwest = { version = "0.12", features = ["blocking", "json"] }

View file

@ -3,6 +3,7 @@ use std::io::{self, Write};
use std::os::unix::fs as unix_fs; use std::os::unix::fs as unix_fs;
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
use std::path::{Component, Path, PathBuf}; use std::path::{Component, Path, PathBuf};
use std::sync::Arc;
use miette::Diagnostic; use miette::Diagnostic;
use thiserror::Error; use thiserror::Error;
@ -93,28 +94,86 @@ pub enum ActionOrder {
Other = 3, Other = 3,
} }
#[derive(Debug, Default, Clone)] #[derive(Clone)]
pub struct ApplyOptions { pub struct ApplyOptions {
pub dry_run: bool, pub dry_run: bool,
/// Optional progress callback. If set, library will emit coarse-grained progress events.
pub progress: Option<ProgressCallback>,
/// Emit numeric progress every N items per phase. 0 disables periodic progress.
pub progress_interval: usize,
} }
impl std::fmt::Debug for ApplyOptions {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ApplyOptions")
.field("dry_run", &self.dry_run)
.field("progress", &self.progress.as_ref().map(|_| "Some(callback)"))
.field("progress_interval", &self.progress_interval)
.finish()
}
}
impl Default for ApplyOptions {
fn default() -> Self {
Self { dry_run: false, progress: None, progress_interval: 0 }
}
}
/// Progress event emitted by apply_manifest when a callback is provided.
#[derive(Debug, Clone, Copy)]
pub enum ProgressEvent {
StartingPhase { phase: &'static str, total: usize },
Progress { phase: &'static str, current: usize, total: usize },
FinishedPhase { phase: &'static str, total: usize },
}
pub type ProgressCallback = Arc<dyn Fn(ProgressEvent) + Send + Sync + 'static>;
/// Apply a manifest to the filesystem rooted at image_root. /// Apply a manifest to the filesystem rooted at image_root.
/// This function enforces ordering: directories, then files, then links, then others (no-ops for now). /// This function enforces ordering: directories, then files, then links, then others (no-ops for now).
pub fn apply_manifest(image_root: &Path, manifest: &Manifest, opts: &ApplyOptions) -> Result<(), InstallerError> { pub fn apply_manifest(image_root: &Path, manifest: &Manifest, opts: &ApplyOptions) -> Result<(), InstallerError> {
let emit = |evt: ProgressEvent, cb: &Option<ProgressCallback>| {
if let Some(cb) = cb.as_ref() { (cb)(evt); }
};
// Directories first // Directories first
let total_dirs = manifest.directories.len();
if total_dirs > 0 { emit(ProgressEvent::StartingPhase { phase: "directories", total: total_dirs }, &opts.progress); }
let mut i = 0usize;
for d in &manifest.directories { for d in &manifest.directories {
apply_dir(image_root, d, opts)?; apply_dir(image_root, d, opts)?;
i += 1;
if opts.progress_interval > 0 && (i % opts.progress_interval == 0 || i == total_dirs) {
emit(ProgressEvent::Progress { phase: "directories", current: i, total: total_dirs }, &opts.progress);
}
} }
if total_dirs > 0 { emit(ProgressEvent::FinishedPhase { phase: "directories", total: total_dirs }, &opts.progress); }
// Files next // Files next
for f in &manifest.files { let total_files = manifest.files.len();
apply_file(image_root, f, opts)?; if total_files > 0 { emit(ProgressEvent::StartingPhase { phase: "files", total: total_files }, &opts.progress); }
i = 0;
for f_action in &manifest.files {
apply_file(image_root, f_action, opts)?;
i += 1;
if opts.progress_interval > 0 && (i % opts.progress_interval == 0 || i == total_files) {
emit(ProgressEvent::Progress { phase: "files", current: i, total: total_files }, &opts.progress);
}
} }
if total_files > 0 { emit(ProgressEvent::FinishedPhase { phase: "files", total: total_files }, &opts.progress); }
// Links // Links
let total_links = manifest.links.len();
if total_links > 0 { emit(ProgressEvent::StartingPhase { phase: "links", total: total_links }, &opts.progress); }
i = 0;
for l in &manifest.links { for l in &manifest.links {
apply_link(image_root, l, opts)?; apply_link(image_root, l, opts)?;
i += 1;
if opts.progress_interval > 0 && (i % opts.progress_interval == 0 || i == total_links) {
emit(ProgressEvent::Progress { phase: "links", current: i, total: total_links }, &opts.progress);
}
} }
if total_links > 0 { emit(ProgressEvent::FinishedPhase { phase: "links", total: total_links }, &opts.progress); }
// Other action kinds are ignored for now and left for future extension. // Other action kinds are ignored for now and left for future extension.
Ok(()) Ok(())

View file

@ -55,7 +55,7 @@ mod tests {
assert!(ap.manifest.directories.is_empty()); assert!(ap.manifest.directories.is_empty());
assert!(ap.manifest.files.is_empty()); assert!(ap.manifest.files.is_empty());
assert!(ap.manifest.links.is_empty()); assert!(ap.manifest.links.is_empty());
let opts = ApplyOptions { dry_run: true }; let opts = ApplyOptions { dry_run: true, ..Default::default() };
let root = Path::new("/tmp/ips_image_test_nonexistent_root"); let root = Path::new("/tmp/ips_image_test_nonexistent_root");
// Even if root doesn't exist, dry_run should not perform any IO and succeed. // Even if root doesn't exist, dry_run should not perform any IO and succeed.
let res = ap.apply(root, &opts); let res = ap.apply(root, &opts);

View file

@ -8,6 +8,8 @@ use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use thiserror::Error; use thiserror::Error;
use tracing::{info, warn, trace}; use tracing::{info, warn, trace};
use std::io::{Cursor, Read, Write};
use lz4::{Decoder as Lz4Decoder, EncoderBuilder as Lz4EncoderBuilder};
/// Table definition for the catalog database /// Table definition for the catalog database
/// Key: stem@version /// Key: stem@version
@ -24,10 +26,6 @@ pub const OBSOLETED_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("
/// Value: version string as bytes (same format as Fmri::version()) /// Value: version string as bytes (same format as Fmri::version())
pub const INCORPORATE_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("incorporate"); pub const INCORPORATE_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("incorporate");
/// Table definition for the installed packages database
/// Key: full FMRI including publisher (pkg://publisher/stem@version)
/// Value: serialized Manifest
pub const INSTALLED_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("installed");
/// Errors that can occur when working with the image catalog /// Errors that can occur when working with the image catalog
#[derive(Error, Debug, Diagnostic)] #[derive(Error, Debug, Diagnostic)]
@ -64,6 +62,48 @@ pub enum CatalogError {
/// Result type for catalog operations /// Result type for catalog operations
pub type Result<T> = std::result::Result<T, CatalogError>; pub type Result<T> = std::result::Result<T, CatalogError>;
// Internal helpers for (de)compressing manifest JSON payloads stored in redb
fn is_likely_json(bytes: &[u8]) -> bool {
let mut i = 0;
while i < bytes.len() && matches!(bytes[i], b' ' | b'\n' | b'\r' | b'\t') { i += 1; }
if i >= bytes.len() { return false; }
matches!(bytes[i], b'{' | b'[')
}
fn compress_json_lz4(bytes: &[u8]) -> Result<Vec<u8>> {
let mut dst = Vec::with_capacity(bytes.len() / 2 + 32);
let mut enc = Lz4EncoderBuilder::new()
.level(4)
.build(Cursor::new(&mut dst))
.map_err(|e| CatalogError::Database(format!("Failed to create LZ4 encoder: {}", e)))?;
enc.write_all(bytes)
.map_err(|e| CatalogError::Database(format!("Failed to write to LZ4 encoder: {}", e)))?;
let (_out, res) = enc.finish();
res.map_err(|e| CatalogError::Database(format!("Failed to finish LZ4 encoding: {}", e)))?;
Ok(dst)
}
fn decode_manifest_bytes(bytes: &[u8]) -> Result<Manifest> {
// Fast path: uncompressed legacy JSON
if is_likely_json(bytes) {
return Ok(serde_json::from_slice::<Manifest>(bytes)?);
}
// Try LZ4 frame decode
let mut decoder = match Lz4Decoder::new(Cursor::new(bytes)) {
Ok(d) => d,
Err(_) => {
// Fallback: attempt JSON anyway
return Ok(serde_json::from_slice::<Manifest>(bytes)?);
}
};
let mut out = Vec::new();
if let Err(_e) = decoder.read_to_end(&mut out) {
// On decode failure, try JSON as last resort
return Ok(serde_json::from_slice::<Manifest>(bytes)?);
}
Ok(serde_json::from_slice::<Manifest>(&out)?)
}
/// Information about a package in the catalog /// Information about a package in the catalog
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PackageInfo { pub struct PackageInfo {
@ -79,8 +119,10 @@ pub struct PackageInfo {
/// The image catalog, which merges catalogs from all publishers /// The image catalog, which merges catalogs from all publishers
pub struct ImageCatalog { pub struct ImageCatalog {
/// Path to the catalog database /// Path to the catalog database (non-obsolete manifests)
db_path: PathBuf, db_path: PathBuf,
/// Path to the separate obsoleted database
obsoleted_db_path: PathBuf,
/// Path to the catalog directory /// Path to the catalog directory
catalog_dir: PathBuf, catalog_dir: PathBuf,
@ -88,29 +130,37 @@ pub struct ImageCatalog {
impl ImageCatalog { impl ImageCatalog {
/// Create a new image catalog /// Create a new image catalog
pub fn new<P: AsRef<Path>>(catalog_dir: P, db_path: P) -> Self { pub fn new<P: AsRef<Path>>(catalog_dir: P, db_path: P, obsoleted_db_path: P) -> Self {
ImageCatalog { ImageCatalog {
db_path: db_path.as_ref().to_path_buf(), db_path: db_path.as_ref().to_path_buf(),
obsoleted_db_path: obsoleted_db_path.as_ref().to_path_buf(),
catalog_dir: catalog_dir.as_ref().to_path_buf(), catalog_dir: catalog_dir.as_ref().to_path_buf(),
} }
} }
/// Dump the contents of a specific table to stdout for debugging /// Dump the contents of a specific table to stdout for debugging
pub fn dump_table(&self, table_name: &str) -> Result<()> { pub fn dump_table(&self, table_name: &str) -> Result<()> {
// Open the database // Determine which table to dump and open the appropriate database
let db = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?;
// Begin a read transaction
let tx = db.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Determine which table to dump
match table_name { match table_name {
"catalog" => self.dump_catalog_table(&tx)?, "catalog" => {
"obsoleted" => self.dump_obsoleted_table(&tx)?, let db = Database::open(&self.db_path)
"installed" => self.dump_installed_table(&tx)?, .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx = db.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
self.dump_catalog_table(&tx)?;
}
"obsoleted" => {
let db = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
let tx = db.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
self.dump_obsoleted_table(&tx)?;
}
"incorporate" => { "incorporate" => {
let db = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx = db.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Simple dump of incorporate locks // Simple dump of incorporate locks
if let Ok(table) = tx.open_table(INCORPORATE_TABLE) { if let Ok(table) = tx.open_table(INCORPORATE_TABLE) {
for entry in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate incorporate table: {}", e)))? { for entry in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate incorporate table: {}", e)))? {
@ -129,22 +179,21 @@ impl ImageCatalog {
/// Dump the contents of all tables to stdout for debugging /// Dump the contents of all tables to stdout for debugging
pub fn dump_all_tables(&self) -> Result<()> { pub fn dump_all_tables(&self) -> Result<()> {
// Open the database // Catalog DB
let db = Database::open(&self.db_path) let db_cat = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx_cat = db_cat.begin_read()
// Begin a read transaction
let tx = db.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
println!("=== CATALOG TABLE ==="); println!("=== CATALOG TABLE ===");
let _ = self.dump_catalog_table(&tx); let _ = self.dump_catalog_table(&tx_cat);
// Obsoleted DB
let db_obs = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
let tx_obs = db_obs.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
println!("\n=== OBSOLETED TABLE ==="); println!("\n=== OBSOLETED TABLE ===");
let _ = self.dump_obsoleted_table(&tx); let _ = self.dump_obsoleted_table(&tx_obs);
println!("\n=== INSTALLED TABLE ===");
let _ = self.dump_installed_table(&tx);
Ok(()) Ok(())
} }
@ -158,8 +207,8 @@ impl ImageCatalog {
let (key, value) = entry_result.map_err(|e| CatalogError::Database(format!("Failed to get entry from catalog table: {}", e)))?; let (key, value) = entry_result.map_err(|e| CatalogError::Database(format!("Failed to get entry from catalog table: {}", e)))?;
let key_str = key.value(); let key_str = key.value();
// Try to deserialize the manifest // Try to deserialize the manifest (supports JSON or LZ4-compressed JSON)
match serde_json::from_slice::<Manifest>(value.value()) { match decode_manifest_bytes(value.value()) {
Ok(manifest) => { Ok(manifest) => {
// Extract the publisher from the FMRI attribute // Extract the publisher from the FMRI attribute
let publisher = manifest.attributes.iter() let publisher = manifest.attributes.iter()
@ -213,125 +262,80 @@ impl ImageCatalog {
} }
} }
/// Dump the contents of the installed table
fn dump_installed_table(&self, tx: &redb::ReadTransaction) -> Result<()> {
match tx.open_table(INSTALLED_TABLE) {
Ok(table) => {
let mut count = 0;
for entry_result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate installed table: {}", e)))? {
let (key, value) = entry_result.map_err(|e| CatalogError::Database(format!("Failed to get entry from installed table: {}", e)))?;
let key_str = key.value();
// Try to deserialize the manifest
match serde_json::from_slice::<Manifest>(value.value()) {
Ok(manifest) => {
// Extract the publisher from the FMRI attribute
let publisher = manifest.attributes.iter()
.find(|attr| attr.key == "pkg.fmri")
.and_then(|attr| attr.values.get(0).cloned())
.unwrap_or_else(|| "unknown".to_string());
println!("Key: {}", key_str);
println!(" FMRI: {}", publisher);
println!(" Attributes: {}", manifest.attributes.len());
println!(" Files: {}", manifest.files.len());
println!(" Directories: {}", manifest.directories.len());
println!(" Dependencies: {}", manifest.dependencies.len());
},
Err(e) => {
println!("Key: {}", key_str);
println!(" Error deserializing manifest: {}", e);
}
}
count += 1;
}
println!("Total entries in installed table: {}", count);
Ok(())
},
Err(e) => {
println!("Error opening installed table: {}", e);
Err(CatalogError::Database(format!("Failed to open installed table: {}", e)))
}
}
}
/// Get database statistics /// Get database statistics
pub fn get_db_stats(&self) -> Result<()> { pub fn get_db_stats(&self) -> Result<()> {
// Open the database // Open the catalog database
let db = Database::open(&self.db_path) let db_cat = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx_cat = db_cat.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Begin a read transaction // Open the obsoleted database
let tx = db.begin_read() let db_obs = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
let tx_obs = db_obs.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Get table statistics // Get table statistics
let mut catalog_count = 0; let mut catalog_count = 0;
let mut obsoleted_count = 0; let mut obsoleted_count = 0;
let mut installed_count = 0;
// Count catalog entries // Count catalog entries
if let Ok(table) = tx.open_table(CATALOG_TABLE) { if let Ok(table) = tx_cat.open_table(CATALOG_TABLE) {
for result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate catalog table: {}", e)))? { for result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate catalog table: {}", e)))? {
let _ = result.map_err(|e| CatalogError::Database(format!("Failed to get entry from catalog table: {}", e)))?; let _ = result.map_err(|e| CatalogError::Database(format!("Failed to get entry from catalog table: {}", e)))?;
catalog_count += 1; catalog_count += 1;
} }
} }
// Count obsoleted entries // Count obsoleted entries (separate DB)
if let Ok(table) = tx.open_table(OBSOLETED_TABLE) { if let Ok(table) = tx_obs.open_table(OBSOLETED_TABLE) {
for result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate obsoleted table: {}", e)))? { for result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate obsoleted table: {}", e)))? {
let _ = result.map_err(|e| CatalogError::Database(format!("Failed to get entry from obsoleted table: {}", e)))?; let _ = result.map_err(|e| CatalogError::Database(format!("Failed to get entry from obsoleted table: {}", e)))?;
obsoleted_count += 1; obsoleted_count += 1;
} }
} }
// Count installed entries
if let Ok(table) = tx.open_table(INSTALLED_TABLE) {
for result in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate installed table: {}", e)))? {
let _ = result.map_err(|e| CatalogError::Database(format!("Failed to get entry from installed table: {}", e)))?;
installed_count += 1;
}
}
// Print statistics // Print statistics
println!("Database path: {}", self.db_path.display()); println!("Catalog database path: {}", self.db_path.display());
println!("Obsoleted database path: {}", self.obsoleted_db_path.display());
println!("Catalog directory: {}", self.catalog_dir.display()); println!("Catalog directory: {}", self.catalog_dir.display());
println!("Table statistics:"); println!("Table statistics:");
println!(" Catalog table: {} entries", catalog_count); println!(" Catalog table: {} entries", catalog_count);
println!(" Obsoleted table: {} entries", obsoleted_count); println!(" Obsoleted table: {} entries", obsoleted_count);
println!(" Installed table: {} entries", installed_count); println!("Total entries: {}", catalog_count + obsoleted_count);
println!("Total entries: {}", catalog_count + obsoleted_count + installed_count);
Ok(()) Ok(())
} }
/// Initialize the catalog database /// Initialize the catalog database
pub fn init_db(&self) -> Result<()> { pub fn init_db(&self) -> Result<()> {
// Create a parent directory if it doesn't exist // Ensure parent directories exist
if let Some(parent) = self.db_path.parent() { if let Some(parent) = self.db_path.parent() { fs::create_dir_all(parent)?; }
fs::create_dir_all(parent)?; if let Some(parent) = self.obsoleted_db_path.parent() { fs::create_dir_all(parent)?; }
}
// Open or create the database // Create/open catalog database and tables
let db = Database::create(&self.db_path) let db_cat = Database::create(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to create database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to create catalog database: {}", e)))?;
let tx_cat = db_cat.begin_write()
// Create tables
let tx = db.begin_write()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
tx_cat.open_table(CATALOG_TABLE)
tx.open_table(CATALOG_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create catalog table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to create catalog table: {}", e)))?;
tx_cat.open_table(INCORPORATE_TABLE)
tx.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create obsoleted table: {}", e)))?;
tx.open_table(INCORPORATE_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create incorporate table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to create incorporate table: {}", e)))?;
tx_cat.commit()
.map_err(|e| CatalogError::Database(format!("Failed to commit catalog transaction: {}", e)))?;
tx.commit() // Create/open obsoleted database and table
.map_err(|e| CatalogError::Database(format!("Failed to commit transaction: {}", e)))?; let db_obs = Database::create(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to create obsoleted database: {}", e)))?;
let tx_obs = db_obs.begin_write()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
tx_obs.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create obsoleted table: {}", e)))?;
tx_obs.commit()
.map_err(|e| CatalogError::Database(format!("Failed to commit obsoleted transaction: {}", e)))?;
Ok(()) Ok(())
} }
@ -346,24 +350,28 @@ impl ImageCatalog {
return Err(CatalogError::NoPublishers); return Err(CatalogError::NoPublishers);
} }
// Open the database // Open the databases
trace!("Opening database at {:?}", self.db_path); trace!("Opening databases at {:?} and {:?}", self.db_path, self.obsoleted_db_path);
let db = Database::open(&self.db_path) let db_cat = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
let db_obs = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
// Begin a writing transaction // Begin writing transactions
trace!("Beginning write transaction"); trace!("Beginning write transactions");
let tx = db.begin_write() let tx_cat = db_cat.begin_write()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin catalog transaction: {}", e)))?;
let tx_obs = db_obs.begin_write()
.map_err(|e| CatalogError::Database(format!("Failed to begin obsoleted transaction: {}", e)))?;
// Open the catalog table // Open the catalog table
trace!("Opening catalog table"); trace!("Opening catalog table");
let mut catalog_table = tx.open_table(CATALOG_TABLE) let mut catalog_table = tx_cat.open_table(CATALOG_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?;
// Open the obsoleted table // Open the obsoleted table
trace!("Opening obsoleted table"); trace!("Opening obsoleted table");
let mut obsoleted_table = tx.open_table(OBSOLETED_TABLE) let mut obsoleted_table = tx_obs.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?;
// Process each publisher // Process each publisher
@ -409,13 +417,20 @@ impl ImageCatalog {
.map_err(|e| CatalogError::Repository(crate::repository::RepositoryError::Other(format!("Failed to load catalog part: {}", e))))?; .map_err(|e| CatalogError::Repository(crate::repository::RepositoryError::Other(format!("Failed to load catalog part: {}", e))))?;
} }
// Process each catalog part // Process each catalog part in a deterministic order: base, dependency, summary, others
for (part_name, _) in parts { let mut part_names: Vec<String> = parts.keys().cloned().collect();
part_names.sort_by_key(|name| {
if name.contains(".base") { 1 }
else if name.contains(".dependency") { 0 }
else if name.contains(".summary") { 2 }
else { 3 }
});
for part_name in part_names {
trace!("Processing catalog part: {}", part_name); trace!("Processing catalog part: {}", part_name);
if let Some(part) = catalog_manager.get_part(&part_name) { if let Some(part) = catalog_manager.get_part(&part_name) {
trace!("Found catalog part: {}", part_name); trace!("Found catalog part: {}", part_name);
trace!("Packages in part: {:?}", part.packages.keys().collect::<Vec<_>>()); trace!("Packages in part: {:?}", part.packages.keys().collect::<Vec<_>>());
self.process_catalog_part(&mut catalog_table, &mut obsoleted_table, part, publisher)?; self.process_catalog_part(&mut catalog_table, &mut obsoleted_table, &part_name, part, publisher)?;
} else { } else {
trace!("Catalog part not found: {}", part_name); trace!("Catalog part not found: {}", part_name);
} }
@ -426,9 +441,11 @@ impl ImageCatalog {
drop(catalog_table); drop(catalog_table);
drop(obsoleted_table); drop(obsoleted_table);
// Commit the transaction // Commit the transactions
tx.commit() tx_cat.commit()
.map_err(|e| CatalogError::Database(format!("Failed to commit transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to commit catalog transaction: {}", e)))?;
tx_obs.commit()
.map_err(|e| CatalogError::Database(format!("Failed to commit obsoleted transaction: {}", e)))?;
info!("Catalog built successfully"); info!("Catalog built successfully");
Ok(()) Ok(())
@ -439,6 +456,7 @@ impl ImageCatalog {
&self, &self,
catalog_table: &mut redb::Table<&str, &[u8]>, catalog_table: &mut redb::Table<&str, &[u8]>,
obsoleted_table: &mut redb::Table<&str, &[u8]>, obsoleted_table: &mut redb::Table<&str, &[u8]>,
part_name: &str,
part: &CatalogPart, part: &CatalogPart,
publisher: &str, publisher: &str,
) -> Result<()> { ) -> Result<()> {
@ -448,7 +466,9 @@ impl ImageCatalog {
if let Some(publisher_packages) = part.packages.get(publisher) { if let Some(publisher_packages) = part.packages.get(publisher) {
let total_versions: usize = publisher_packages.values().map(|v| v.len()).sum(); let total_versions: usize = publisher_packages.values().map(|v| v.len()).sum();
let mut processed: usize = 0; let mut processed: usize = 0;
let mut obsolete_count: usize = 0; // Count of packages marked obsolete in this part, including those skipped because they were already marked obsolete in earlier parts.
let mut obsolete_count_incl_skipped: usize = 0;
let mut skipped_obsolete: usize = 0;
let progress_step: usize = 500; // report every N packages let progress_step: usize = 500; // report every N packages
trace!( trace!(
@ -483,9 +503,30 @@ impl ImageCatalog {
let catalog_key = format!("{}@{}", stem, version_entry.version); let catalog_key = format!("{}@{}", stem, version_entry.version);
let obsoleted_key = fmri.to_string(); let obsoleted_key = fmri.to_string();
// If this is not the base part and this package/version was already marked
// obsolete in an earlier part (present in obsoleted_table) and is NOT present
// in the catalog_table, skip importing it from this part.
if !part_name.contains(".base") {
let has_catalog = matches!(catalog_table.get(catalog_key.as_str()), Ok(Some(_)));
if !has_catalog {
let was_obsoleted = matches!(obsoleted_table.get(obsoleted_key.as_str()), Ok(Some(_)));
if was_obsoleted {
// Count as obsolete for progress accounting, even though we skip processing
obsolete_count_incl_skipped += 1;
skipped_obsolete += 1;
trace!(
"Skipping {} from part {} because it is marked obsolete and not present in catalog",
obsoleted_key,
part_name
);
continue;
}
}
}
// Check if we already have this package in the catalog // Check if we already have this package in the catalog
let existing_manifest = match catalog_table.get(catalog_key.as_str()) { let existing_manifest = match catalog_table.get(catalog_key.as_str()) {
Ok(Some(bytes)) => Some(serde_json::from_slice::<Manifest>(bytes.value())?), Ok(Some(bytes)) => Some(decode_manifest_bytes(bytes.value())?),
_ => None, _ => None,
}; };
@ -494,7 +535,7 @@ impl ImageCatalog {
// Check if the package is obsolete // Check if the package is obsolete
let is_obsolete = self.is_package_obsolete(&manifest); let is_obsolete = self.is_package_obsolete(&manifest);
if is_obsolete { obsolete_count += 1; } if is_obsolete { obsolete_count_incl_skipped += 1; }
// Serialize the manifest // Serialize the manifest
let manifest_bytes = serde_json::to_vec(&manifest)?; let manifest_bytes = serde_json::to_vec(&manifest)?;
@ -508,19 +549,22 @@ impl ImageCatalog {
.map_err(|e| CatalogError::Database(format!("Failed to insert into obsoleted table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to insert into obsoleted table: {}", e)))?;
} else { } else {
// Store non-obsolete packages in the catalog table with stem@version as a key // Store non-obsolete packages in the catalog table with stem@version as a key
let compressed = compress_json_lz4(&manifest_bytes)?;
catalog_table catalog_table
.insert(catalog_key.as_str(), manifest_bytes.as_slice()) .insert(catalog_key.as_str(), compressed.as_slice())
.map_err(|e| CatalogError::Database(format!("Failed to insert into catalog table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to insert into catalog table: {}", e)))?;
} }
processed += 1; processed += 1;
if processed % progress_step == 0 { if processed % progress_step == 0 {
info!( info!(
"Import progress (publisher {}): {}/{} packages ({} obsolete)", "Import progress (publisher {}, part {}): {}/{} versions processed ({} obsolete incl. skipped, {} skipped)",
publisher, publisher,
part_name,
processed, processed,
total_versions, total_versions,
obsolete_count obsolete_count_incl_skipped,
skipped_obsolete
); );
} }
} }
@ -528,10 +572,12 @@ impl ImageCatalog {
// Final summary for this part/publisher // Final summary for this part/publisher
info!( info!(
"Finished import for publisher {}: {} packages processed ({} obsolete)", "Finished import for publisher {}, part {}: {} versions processed ({} obsolete incl. skipped, {} skipped)",
publisher, publisher,
part_name,
processed, processed,
obsolete_count obsolete_count_incl_skipped,
skipped_obsolete
); );
} else { } else {
trace!("No packages found for publisher: {}", publisher); trace!("No packages found for publisher: {}", publisher);
@ -673,20 +719,23 @@ impl ImageCatalog {
/// Query the catalog for packages matching a pattern /// Query the catalog for packages matching a pattern
pub fn query_packages(&self, pattern: Option<&str>) -> Result<Vec<PackageInfo>> { pub fn query_packages(&self, pattern: Option<&str>) -> Result<Vec<PackageInfo>> {
// Open the database // Open the catalog database
let db = Database::open(&self.db_path) let db_cat = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
// Begin a read transaction // Begin a read transaction
let tx = db.begin_read() let tx_cat = db_cat.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Open the catalog table // Open the catalog table
let catalog_table = tx.open_table(CATALOG_TABLE) let catalog_table = tx_cat.open_table(CATALOG_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?;
// Open the obsoleted table // Open the obsoleted database
let obsoleted_table = tx.open_table(OBSOLETED_TABLE) let db_obs = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
let tx_obs = db_obs.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
let obsoleted_table = tx_obs.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?;
let mut results = Vec::new(); let mut results = Vec::new();
@ -715,7 +764,7 @@ impl ImageCatalog {
let version = parts[1]; let version = parts[1];
// Deserialize the manifest // Deserialize the manifest
let manifest: Manifest = serde_json::from_slice(value.value())?; let manifest: Manifest = decode_manifest_bytes(value.value())?;
// Extract the publisher from the FMRI attribute // Extract the publisher from the FMRI attribute
let publisher = manifest.attributes.iter() let publisher = manifest.attributes.iter()
@ -792,55 +841,45 @@ impl ImageCatalog {
/// Get a manifest from the catalog /// Get a manifest from the catalog
pub fn get_manifest(&self, fmri: &Fmri) -> Result<Option<Manifest>> { pub fn get_manifest(&self, fmri: &Fmri) -> Result<Option<Manifest>> {
// Open the database // Open the catalog database
let db = Database::open(&self.db_path) let db_cat = Database::open(&self.db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open database: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog database: {}", e)))?;
// Begin a read transaction // Begin a read transaction
let tx = db.begin_read() let tx_cat = db_cat.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
// Open the catalog table // Open the catalog table
let catalog_table = tx.open_table(CATALOG_TABLE) let catalog_table = tx_cat.open_table(CATALOG_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?; .map_err(|e| CatalogError::Database(format!("Failed to open catalog table: {}", e)))?;
// Open the obsoleted table
let obsoleted_table = tx.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?;
// Create the key for the catalog table (stem@version) // Create the key for the catalog table (stem@version)
let catalog_key = format!("{}@{}", fmri.stem(), fmri.version()); let catalog_key = format!("{}@{}", fmri.stem(), fmri.version());
// Create the key for the obsoleted table (full FMRI including publisher)
let obsoleted_key = fmri.to_string();
// Try to get the manifest from the catalog table // Try to get the manifest from the catalog table
if let Ok(Some(bytes)) = catalog_table.get(catalog_key.as_str()) { if let Ok(Some(bytes)) = catalog_table.get(catalog_key.as_str()) {
return Ok(Some(serde_json::from_slice(bytes.value())?)); return Ok(Some(decode_manifest_bytes(bytes.value())?));
} }
// Check if the package is in the obsoleted table // If not found in catalog DB, check obsoleted DB
let db_obs = Database::open(&self.obsoleted_db_path)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted database: {}", e)))?;
let tx_obs = db_obs.begin_read()
.map_err(|e| CatalogError::Database(format!("Failed to begin transaction: {}", e)))?;
let obsoleted_table = tx_obs.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to open obsoleted table: {}", e)))?;
let obsoleted_key = fmri.to_string();
if let Ok(Some(_)) = obsoleted_table.get(obsoleted_key.as_str()) { if let Ok(Some(_)) = obsoleted_table.get(obsoleted_key.as_str()) {
// The package is obsolete, but we don't store the manifest in the obsoleted table
// We could return a minimal manifest with just the FMRI and obsolete flag
let mut manifest = Manifest::new(); let mut manifest = Manifest::new();
// Add the FMRI attribute
let mut attr = crate::actions::Attr::default(); let mut attr = crate::actions::Attr::default();
attr.key = "pkg.fmri".to_string(); attr.key = "pkg.fmri".to_string();
attr.values = vec![fmri.to_string()]; attr.values = vec![fmri.to_string()];
manifest.attributes.push(attr); manifest.attributes.push(attr);
// Add the obsolete attribute
let mut attr = crate::actions::Attr::default(); let mut attr = crate::actions::Attr::default();
attr.key = "pkg.obsolete".to_string(); attr.key = "pkg.obsolete".to_string();
attr.values = vec!["true".to_string()]; attr.values = vec!["true".to_string()];
manifest.attributes.push(attr); manifest.attributes.push(attr);
return Ok(Some(manifest)); return Ok(Some(manifest));
} }
// Manifest not found
Ok(None) Ok(None)
} }
} }

View file

@ -293,6 +293,11 @@ impl Image {
self.metadata_dir().join("catalog.redb") self.metadata_dir().join("catalog.redb")
} }
/// Returns the path to the obsoleted packages database (separate DB)
pub fn obsoleted_db_path(&self) -> PathBuf {
self.metadata_dir().join("obsoleted.redb")
}
/// Creates the metadata directory if it doesn't exist /// Creates the metadata directory if it doesn't exist
pub fn create_metadata_dir(&self) -> Result<()> { pub fn create_metadata_dir(&self) -> Result<()> {
let metadata_dir = self.metadata_dir(); let metadata_dir = self.metadata_dir();
@ -479,7 +484,7 @@ impl Image {
/// Initialize the catalog database /// Initialize the catalog database
pub fn init_catalog_db(&self) -> Result<()> { pub fn init_catalog_db(&self) -> Result<()> {
let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path()); let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path(), self.obsoleted_db_path());
catalog.init_db().map_err(|e| { catalog.init_db().map_err(|e| {
ImageError::Database(format!("Failed to initialize catalog database: {}", e)) ImageError::Database(format!("Failed to initialize catalog database: {}", e))
}) })
@ -574,7 +579,7 @@ impl Image {
.collect(); .collect();
// Create the catalog and build it // Create the catalog and build it
let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path()); let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path(), self.obsoleted_db_path());
catalog.build_catalog(&publisher_names).map_err(|e| { catalog.build_catalog(&publisher_names).map_err(|e| {
ImageError::Database(format!("Failed to build catalog: {}", e)) ImageError::Database(format!("Failed to build catalog: {}", e))
}) })
@ -582,7 +587,7 @@ impl Image {
/// Query the catalog for packages matching a pattern /// Query the catalog for packages matching a pattern
pub fn query_catalog(&self, pattern: Option<&str>) -> Result<Vec<PackageInfo>> { pub fn query_catalog(&self, pattern: Option<&str>) -> Result<Vec<PackageInfo>> {
let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path()); let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path(), self.obsoleted_db_path());
catalog.query_packages(pattern).map_err(|e| { catalog.query_packages(pattern).map_err(|e| {
ImageError::Database(format!("Failed to query catalog: {}", e)) ImageError::Database(format!("Failed to query catalog: {}", e))
}) })
@ -631,7 +636,7 @@ impl Image {
/// Get a manifest from the catalog /// Get a manifest from the catalog
pub fn get_manifest_from_catalog(&self, fmri: &crate::fmri::Fmri) -> Result<Option<crate::actions::Manifest>> { pub fn get_manifest_from_catalog(&self, fmri: &crate::fmri::Fmri) -> Result<Option<crate::actions::Manifest>> {
let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path()); let catalog = ImageCatalog::new(self.catalog_dir(), self.catalog_db_path(), self.obsoleted_db_path());
catalog.get_manifest(fmri).map_err(|e| { catalog.get_manifest(fmri).map_err(|e| {
ImageError::Database(format!("Failed to get manifest from catalog: {}", e)) ImageError::Database(format!("Failed to get manifest from catalog: {}", e))
}) })

View file

@ -15,13 +15,13 @@
//! resolve_install builds a resolvo Problem from user constraints, runs the //! resolve_install builds a resolvo Problem from user constraints, runs the
//! solver, and assembles an InstallPlan from the chosen solvables. //! solver, and assembles an InstallPlan from the chosen solvables.
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
use miette::Diagnostic; use miette::Diagnostic;
// Begin resolvo wiring imports (names discovered by compiler) // Begin resolvo wiring imports (names discovered by compiler)
// We start broad and refine with compiler guidance. // We start broad and refine with compiler guidance.
use resolvo::{self, Candidates, Condition, ConditionId, ConditionalRequirement, Dependencies as RDependencies, DependencyProvider, HintDependenciesAvailable, Interner, KnownDependencies, Mapping, NameId, Problem as RProblem, Requirement as RRequirement, SolvableId, Solver as RSolver, SolverCache, StringId, UnsolvableOrCancelled, VersionSetId, VersionSetUnionId}; use resolvo::{self, Candidates, Condition, ConditionId, ConditionalRequirement, Dependencies as RDependencies, DependencyProvider, HintDependenciesAvailable, Interner, KnownDependencies, Mapping, NameId, Problem as RProblem, SolvableId, Solver as RSolver, SolverCache, StringId, UnsolvableOrCancelled, VersionSetId, VersionSetUnionId};
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
use thiserror::Error; use thiserror::Error;
use crate::actions::Manifest; use crate::actions::Manifest;
@ -81,17 +81,12 @@ impl<'a> IpsProvider<'a> {
} }
fn build_index(&mut self) { fn build_index(&mut self) {
// Take a snapshot of the catalog to avoid borrow conflicts while interning // Move the catalog cache out temporarily to avoid borrow conflicts and expensive cloning
let snapshot: Vec<(String, Vec<PackageInfo>)> = self let cache = std::mem::take(&mut self.catalog.cache);
.catalog for (stem, list) in cache.iter() {
.cache let name_id = self.intern_name(stem);
.iter() let mut ids: Vec<SolvableId> = Vec::with_capacity(list.len());
.map(|(k, v)| (k.clone(), v.clone())) for pkg in list {
.collect();
for (stem, list) in snapshot {
let name_id = self.intern_name(&stem);
let mut ids: Vec<SolvableId> = Vec::new();
for pkg in &list {
// allocate next solvable id based on current len // allocate next solvable id based on current len
let sid = SolvableId(self.solvables.len() as u32); let sid = SolvableId(self.solvables.len() as u32);
self.solvables.insert( self.solvables.insert(
@ -112,6 +107,8 @@ impl<'a> IpsProvider<'a> {
}); });
self.cands_by_name.insert(name_id, ids); self.cands_by_name.insert(name_id, ids);
} }
// Restore the cache
self.catalog.cache = cache;
} }
fn intern_name(&mut self, name: &str) -> NameId { fn intern_name(&mut self, name: &str) -> NameId {
@ -139,11 +136,17 @@ impl<'a> Interner for IpsProvider<'a> {
} }
fn display_solvable_name(&self, solvable: SolvableId) -> impl Display + '_ { fn display_solvable_name(&self, solvable: SolvableId) -> impl Display + '_ {
todo!() let name_id = self.solvable_name(solvable);
self.display_name(name_id).to_string()
} }
fn display_merged_solvables(&self, solvables: &[SolvableId]) -> impl Display + '_ { fn display_merged_solvables(&self, solvables: &[SolvableId]) -> impl Display + '_ {
todo!() let joined = solvables
.iter()
.map(|s| self.display_solvable(*s).to_string())
.collect::<Vec<_>>()
.join(" | ");
joined
} }
fn display_name(&self, name: NameId) -> impl std::fmt::Display + '_ { fn display_name(&self, name: NameId) -> impl std::fmt::Display + '_ {
@ -186,7 +189,11 @@ impl<'a> Interner for IpsProvider<'a> {
} }
fn resolve_condition(&self, condition: ConditionId) -> Condition { fn resolve_condition(&self, condition: ConditionId) -> Condition {
todo!() // Interpret ConditionId as referencing a VersionSetId directly.
// This supports simple conditions of the form "requirement holds if
// version set X is selected". Complex boolean conditions are not
// generated by this provider at present.
Condition::Requirement(VersionSetId(condition.as_u32()))
} }
} }
@ -241,6 +248,18 @@ impl<'a> DependencyProvider for IpsProvider<'a> {
version_set: VersionSetId, version_set: VersionSetId,
inverse: bool, inverse: bool,
) -> Vec<SolvableId> { ) -> Vec<SolvableId> {
// If an incorporation lock exists for this name, we intentionally ignore
// the incoming version_set constraint so that incorporation can override
// transitive dependency version requirements. The base candidate set
// returned by get_candidates is already restricted to the locked version(s).
let name = self.version_set_name(version_set);
let stem = self.display_name(name).to_string();
if let Ok(Some(_locked_ver)) = self.image.get_incorporated_release(&stem) {
// Treat all candidates as matching the requirement; the solver's inverse
// queries should see an empty set to avoid excluding the locked candidate.
return if inverse { vec![] } else { candidates.to_vec() };
}
let kind = self let kind = self
.version_sets .version_sets
.borrow() .borrow()
@ -525,7 +544,7 @@ pub fn resolve_install(image: &Image, constraints: &[Constraint]) -> Result<Inst
let mut provider = IpsProvider::new(image)?; let mut provider = IpsProvider::new(image)?;
// Construct problem requirements from top-level constraints // Construct problem requirements from top-level constraints
let mut problem = RProblem::default(); let problem = RProblem::default();
// Augment publisher preferences for roots and create version sets // Augment publisher preferences for roots and create version sets
let image_pub_order: Vec<String> = image.publishers().iter().map(|p| p.name.clone()).collect(); let image_pub_order: Vec<String> = image.publishers().iter().map(|p| p.name.clone()).collect();
@ -747,7 +766,7 @@ mod solver_integration_tests {
} }
fn mark_obsolete(image: &Image, fmri: &Fmri) { fn mark_obsolete(image: &Image, fmri: &Fmri) {
let db = Database::open(image.catalog_db_path()).expect("open catalog db"); let db = Database::open(image.obsoleted_db_path()).expect("open obsoleted db");
let tx = db.begin_write().expect("begin write"); let tx = db.begin_write().expect("begin write");
{ {
let mut table = tx.open_table(OBSOLETED_TABLE).expect("open obsoleted table"); let mut table = tx.open_table(OBSOLETED_TABLE).expect("open obsoleted table");
@ -1009,9 +1028,18 @@ mod solver_error_message_tests {
let c = Constraint { stem: "pkg/root".to_string(), version_req: None, preferred_publishers: vec![], branch: None }; let c = Constraint { stem: "pkg/root".to_string(), version_req: None, preferred_publishers: vec![], branch: None };
let err = resolve_install(&img, &[c]).err().expect("expected solver error"); let err = resolve_install(&img, &[c]).err().expect("expected solver error");
let msg = err.message; let msg = err.message;
assert!(!msg.contains("ClauseId("), "message should not include ClauseId identifiers: {}", msg); let lower = msg.to_lowercase();
assert!(msg.to_lowercase().contains("rejected because"), "expected rejection explanation in message: {}", msg); assert!(!lower.contains("clauseid("), "message should not include ClauseId identifiers: {}", msg);
assert!(msg.to_lowercase().contains("unsatisfied dependency"), "expected unsatisfied dependency in message: {}", msg); assert!(
lower.contains("cannot be installed") || lower.contains("rejected because"),
"expected a clear rejection explanation in message: {}",
msg
);
assert!(
lower.contains("unsatisfied dependency") || lower.contains("no candidates"),
"expected explanation about missing candidates or unsatisfied dependency in message: {}",
msg
);
} }
} }
@ -1019,9 +1047,10 @@ mod solver_error_message_tests {
#[cfg(test)] #[cfg(test)]
mod incorporate_lock_tests { mod incorporate_lock_tests {
use super::*; use super::*;
use crate::actions::Dependency;
use crate::fmri::Version; use crate::fmri::Version;
use crate::image::ImageType;
use crate::image::catalog::CATALOG_TABLE; use crate::image::catalog::CATALOG_TABLE;
use crate::image::ImageType;
use redb::Database; use redb::Database;
use tempfile::tempdir; use tempfile::tempdir;
@ -1089,6 +1118,71 @@ mod incorporate_lock_tests {
assert_eq!(plan.add.len(), 1); assert_eq!(plan.add.len(), 1);
assert_eq!(plan.add[0].fmri.version(), v_new.version()); assert_eq!(plan.add[0].fmri.version(), v_new.version());
} }
#[test]
fn incorporation_overrides_transitive_requirement() {
let img = make_image_with_publishers(&[("pubA", true)]);
// Build package chain: gzip -> system/library -> system/library/mozilla-nss -> database/sqlite-3@3.46
let gzip = mk_fmri("pubA", "compress/gzip", mk_version("1.14", None, Some("20250411T052732Z")));
let slib = mk_fmri("pubA", "system/library", mk_version("0.5.11", None, Some("20240101T000000Z")));
let nss = mk_fmri("pubA", "system/library/mozilla-nss", mk_version("3.98", None, Some("20240102T000000Z")));
// sqlite candidates
let sqlite_old = mk_fmri("pubA", "database/sqlite-3", Version::new("3.46"));
let sqlite_new = mk_fmri("pubA", "database/sqlite-3", Version::parse("3.50.4-2025.0.0.0").unwrap());
// gzip requires system/library (no version)
let mut man_gzip = Manifest::new();
let mut attr = crate::actions::Attr::default();
attr.key = "pkg.fmri".to_string();
attr.values = vec![gzip.to_string()];
man_gzip.attributes.push(attr);
let mut d = Dependency::default();
d.fmri = Some(Fmri::with_publisher("pubA", "system/library", None));
d.dependency_type = "require".to_string();
man_gzip.dependencies.push(d);
write_manifest_to_catalog(&img, &gzip, &man_gzip);
// system/library requires mozilla-nss (no version)
let mut man_slib = Manifest::new();
let mut attr = crate::actions::Attr::default();
attr.key = "pkg.fmri".to_string();
attr.values = vec![slib.to_string()];
man_slib.attributes.push(attr);
let mut d = Dependency::default();
d.fmri = Some(Fmri::with_publisher("pubA", "system/library/mozilla-nss", None));
d.dependency_type = "require".to_string();
man_slib.dependencies.push(d);
write_manifest_to_catalog(&img, &slib, &man_slib);
// mozilla-nss requires sqlite-3@3.46
let mut man_nss = Manifest::new();
let mut attr = crate::actions::Attr::default();
attr.key = "pkg.fmri".to_string();
attr.values = vec![nss.to_string()];
man_nss.attributes.push(attr);
let mut d = Dependency::default();
d.fmri = Some(Fmri::with_version("database/sqlite-3", Version::new("3.46")));
d.dependency_type = "require".to_string();
man_nss.dependencies.push(d);
write_manifest_to_catalog(&img, &nss, &man_nss);
// Add sqlite candidates to catalog (empty manifests)
write_manifest_to_catalog(&img, &sqlite_old, &Manifest::new());
write_manifest_to_catalog(&img, &sqlite_new, &Manifest::new());
// Add incorporation lock to newer sqlite
img.add_incorporation_lock("database/sqlite-3", &sqlite_new.version()).expect("add sqlite lock");
// Resolve from top-level gzip; expect sqlite_new to be chosen, overriding 3.46 requirement
let c = Constraint { stem: "compress/gzip".to_string(), version_req: None, preferred_publishers: vec![], branch: None };
let plan = resolve_install(&img, &[c]).expect("resolve");
let picked_sqlite = plan.add.iter().find(|p| p.fmri.stem() == "database/sqlite-3").expect("sqlite present");
let v = picked_sqlite.fmri.version.as_ref().unwrap();
assert_eq!(v.release, "3.50.4");
assert_eq!(v.build.as_deref(), Some("2025.0.0.0"));
}
} }
#[cfg(test)] #[cfg(test)]
@ -1181,3 +1275,93 @@ mod composite_release_tests {
assert!(err.message.contains("No candidates") || err.message.contains("dependency solving failed")); assert!(err.message.contains("No candidates") || err.message.contains("dependency solving failed"));
} }
} }
#[cfg(test)]
mod circular_dependency_tests {
use super::*;
use crate::actions::Dependency;
use crate::fmri::{Fmri, Version};
use crate::image::catalog::CATALOG_TABLE;
use crate::image::ImageType;
use redb::Database;
use std::collections::HashSet;
fn mk_version(release: &str, branch: Option<&str>, timestamp: Option<&str>) -> Version {
let mut v = Version::new(release);
if let Some(b) = branch { v.branch = Some(b.to_string()); }
if let Some(t) = timestamp { v.timestamp = Some(t.to_string()); }
v
}
fn mk_fmri(publisher: &str, name: &str, v: Version) -> Fmri {
Fmri::with_publisher(publisher, name, Some(v))
}
fn mk_manifest_with_reqs(parent: &Fmri, reqs: &[Fmri]) -> Manifest {
let mut m = Manifest::new();
// pkg.fmri attribute
let mut attr = crate::actions::Attr::default();
attr.key = "pkg.fmri".to_string();
attr.values = vec![parent.to_string()];
m.attributes.push(attr);
// require dependencies
for df in reqs {
let mut d = Dependency::default();
d.fmri = Some(df.clone());
d.dependency_type = "require".to_string();
m.dependencies.push(d);
}
m
}
fn write_manifest_to_catalog(image: &Image, fmri: &Fmri, manifest: &Manifest) {
let db = Database::open(image.catalog_db_path()).expect("open catalog db");
let tx = db.begin_write().expect("begin write");
{
let mut table = tx.open_table(CATALOG_TABLE).expect("open catalog table");
let key = format!("{}@{}", fmri.stem(), fmri.version());
let val = serde_json::to_vec(manifest).expect("serialize manifest");
table.insert(key.as_str(), val.as_slice()).expect("insert manifest");
}
tx.commit().expect("commit");
}
fn make_image_with_publishers(pubs: &[(&str, bool)]) -> Image {
let td = tempfile::tempdir().expect("tempdir");
// Persist the directory for the duration of the test
let path = td.keep();
let mut img = Image::create_image(&path, ImageType::Partial).expect("create image");
for (name, is_default) in pubs.iter().copied() {
img.add_publisher(name, &format!("https://example.com/{name}"), vec![], is_default)
.expect("add publisher");
}
img
}
#[test]
fn two_node_cycle_resolves_once_each() {
let img = make_image_with_publishers(&[("pubA", true)]);
let a = mk_fmri("pubA", "pkg/a", mk_version("1.0", None, Some("20200101T000000Z")));
let b = mk_fmri("pubA", "pkg/b", mk_version("1.0", None, Some("20200101T000000Z")));
let a_req_b = Fmri::with_version("pkg/b", Version::new("1.0"));
let b_req_a = Fmri::with_version("pkg/a", Version::new("1.0"));
let man_a = mk_manifest_with_reqs(&a, &[a_req_b]);
let man_b = mk_manifest_with_reqs(&b, &[b_req_a]);
write_manifest_to_catalog(&img, &a, &man_a);
write_manifest_to_catalog(&img, &b, &man_b);
let c = Constraint { stem: "pkg/a".to_string(), version_req: None, preferred_publishers: vec![], branch: None };
let plan = resolve_install(&img, &[c]).expect("resolve");
// Ensure both packages are present and no duplicates
let stems: HashSet<String> = plan.add.iter().map(|p| p.fmri.stem().to_string()).collect();
assert_eq!(stems.len(), plan.add.len(), "no duplicates in plan");
assert!(stems.contains("pkg/a"));
assert!(stems.contains("pkg/b"));
}
}

View file

@ -5,6 +5,7 @@ use clap::{Parser, Subcommand};
use serde::Serialize; use serde::Serialize;
use std::path::PathBuf; use std::path::PathBuf;
use std::io::Write; use std::io::Write;
use std::sync::Arc;
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::{EnvFilter, fmt}; use tracing_subscriber::{EnvFilter, fmt};
@ -485,9 +486,6 @@ fn determine_image_path(image_path: Option<PathBuf>) -> PathBuf {
} }
fn main() -> Result<()> { fn main() -> Result<()> {
// Add debug statement at the very beginning
eprintln!("MAIN: Starting pkg6 command");
// Initialize the tracing subscriber with the default log level as debug and no decorations // Initialize the tracing subscriber with the default log level as debug and no decorations
// Parse the environment filter first, handling any errors with our custom error type // Parse the environment filter first, handling any errors with our custom error type
let env_filter = EnvFilter::builder() let env_filter = EnvFilter::builder()
@ -506,16 +504,8 @@ fn main() -> Result<()> {
.with_writer(std::io::stderr) .with_writer(std::io::stderr)
.init(); .init();
eprintln!("MAIN: Parsing command line arguments");
let cli = App::parse(); let cli = App::parse();
// Print the command that was parsed
match &cli.command {
Commands::Publisher { .. } => eprintln!("MAIN: Publisher command detected"),
Commands::DebugDb { .. } => eprintln!("MAIN: Debug database command detected"),
_ => eprintln!("MAIN: Other command detected: {:?}", cli.command),
};
match &cli.command { match &cli.command {
Commands::Refresh { full, quiet, publishers } => { Commands::Refresh { full, quiet, publishers } => {
info!("Refreshing package catalog"); info!("Refreshing package catalog");
@ -615,6 +605,7 @@ fn main() -> Result<()> {
} }
// Resolve install plan // Resolve install plan
if !quiet { println!("Resolving dependencies..."); }
let plan = match libips::solver::resolve_install(&image, &constraints) { let plan = match libips::solver::resolve_install(&image, &constraints) {
Ok(p) => p, Ok(p) => p,
Err(e) => { Err(e) => {
@ -626,15 +617,38 @@ fn main() -> Result<()> {
if !quiet { println!("Resolved {} package(s) to install", plan.add.len()); } if !quiet { println!("Resolved {} package(s) to install", plan.add.len()); }
// Build and apply action plan // Build and apply action plan
if !quiet { println!("Building action plan..."); }
let ap = libips::image::action_plan::ActionPlan::from_install_plan(&plan); let ap = libips::image::action_plan::ActionPlan::from_install_plan(&plan);
let apply_opts = libips::actions::executors::ApplyOptions { dry_run: *dry_run }; let quiet_mode = *quiet;
let progress_cb: libips::actions::executors::ProgressCallback = Arc::new(move |evt| {
if quiet_mode { return; }
match evt {
libips::actions::executors::ProgressEvent::StartingPhase { phase, total } => {
println!("Applying: {} (total {})...", phase, total);
}
libips::actions::executors::ProgressEvent::Progress { phase, current, total } => {
println!("Applying: {} {}/{}", phase, current, total);
}
libips::actions::executors::ProgressEvent::FinishedPhase { phase, total } => {
println!("Done: {} (total {})", phase, total);
}
}
});
let apply_opts = libips::actions::executors::ApplyOptions { dry_run: *dry_run, progress: Some(progress_cb), progress_interval: 10 };
if !quiet { println!("Applying action plan (dry-run: {})", dry_run); } if !quiet { println!("Applying action plan (dry-run: {})", dry_run); }
ap.apply(image.path(), &apply_opts)?; ap.apply(image.path(), &apply_opts)?;
// Update installed DB after success (skip on dry-run) // Update installed DB after success (skip on dry-run)
if !*dry_run { if !*dry_run {
if !quiet { println!("Recording installation in image database..."); }
let total_pkgs = plan.add.len();
let mut idx = 0usize;
for rp in &plan.add { for rp in &plan.add {
image.install_package(&rp.fmri, &rp.manifest)?; image.install_package(&rp.fmri, &rp.manifest)?;
idx += 1;
if !quiet && (idx % 5 == 0 || idx == total_pkgs) {
println!("Recorded {}/{} packages", idx, total_pkgs);
}
// Save full manifest into manifests directory for reproducibility // Save full manifest into manifests directory for reproducibility
match image.save_manifest(&rp.fmri, &rp.manifest) { match image.save_manifest(&rp.fmri, &rp.manifest) {
Ok(path) => { Ok(path) => {
@ -1090,18 +1104,15 @@ fn main() -> Result<()> {
let mut image = libips::image::Image::create_image(&full_path, image_type)?; let mut image = libips::image::Image::create_image(&full_path, image_type)?;
info!("Image created successfully at: {}", full_path.display()); info!("Image created successfully at: {}", full_path.display());
// If publisher and origin are provided, add the publisher and download the catalog // If publisher and origin are provided, only add the publisher; do not download/open catalogs here.
if let (Some(publisher_name), Some(origin_url)) = (publisher.as_ref(), origin.as_ref()) { if let (Some(publisher_name), Some(origin_url)) = (publisher.as_ref(), origin.as_ref()) {
info!("Adding publisher {} with origin {}", publisher_name, origin_url); info!("Adding publisher {} with origin {}", publisher_name, origin_url);
// Add the publisher // Add the publisher
image.add_publisher(publisher_name, origin_url, vec![], true)?; image.add_publisher(publisher_name, origin_url, vec![], true)?;
// Download the catalog
image.download_publisher_catalog(publisher_name)?;
info!("Publisher {} configured with origin: {}", publisher_name, origin_url); info!("Publisher {} configured with origin: {}", publisher_name, origin_url);
info!("Catalog downloaded from publisher: {}", publisher_name); info!("Catalogs are not downloaded during image creation. Use 'pkg6 -R {} refresh {}' to download and open catalogs.", full_path.display(), publisher_name);
} else { } else {
info!("No publisher configured. Use 'pkg6 set-publisher' to add a publisher."); info!("No publisher configured. Use 'pkg6 set-publisher' to add a publisher.");
} }
@ -1131,7 +1142,8 @@ fn main() -> Result<()> {
// Create a catalog object for the catalog.redb database // Create a catalog object for the catalog.redb database
let catalog = libips::image::catalog::ImageCatalog::new( let catalog = libips::image::catalog::ImageCatalog::new(
image.catalog_dir(), image.catalog_dir(),
image.catalog_db_path() image.catalog_db_path(),
image.obsoleted_db_path()
); );
// Create an installed packages object for the installed.redb database // Create an installed packages object for the installed.redb database

View file

@ -65,23 +65,16 @@ fi
# 3) Show publishers for confirmation (table output) # 3) Show publishers for confirmation (table output)
"$PKG6_BIN" -R "$IMG_PATH" publisher -o table "$PKG6_BIN" -R "$IMG_PATH" publisher -o table
# 4) Dry-run install # 4) Real install
# clap short flag for --dry-run is -d in this CLI RUST_LOG=trace "$PKG6_BIN" -R "$IMG_PATH" install "pkg://$PUBLISHER/$PKG_NAME" || {
"$PKG6_BIN" -R "$IMG_PATH" install -d "pkg://$PUBLISHER/$PKG_NAME" || {
echo "Dry-run install failed" >&2
exit 1
}
# 5) Real install
"$PKG6_BIN" -R "$IMG_PATH" install "pkg://$PUBLISHER/$PKG_NAME" || {
echo "Real install failed" >&2 echo "Real install failed" >&2
exit 1 exit 1
} }
# 6) Show installed packages # 5) Show installed packages
"$PKG6_BIN" -R "$IMG_PATH" list "$PKG6_BIN" -R "$IMG_PATH" list
# 7) Dump installed database # 6) Dump installed database
"$PKG6_BIN" -R "$IMG_PATH" debug-db --dump-table installed "$PKG6_BIN" -R "$IMG_PATH" debug-db --dump-table installed
echo "Sample installation completed successfully at $IMG_PATH" echo "Sample installation completed successfully at $IMG_PATH"