Add incorporation lock support and update dependencies across the project

- Implemented incorporation lock functionality for packages, including pre-checks, addition, and retrieval methods.
- Enhanced package installation logic to handle incorporation constraints.
- Updated dependency versions in `Cargo.toml` and `Cargo.lock`, aligning with latest compatible releases.
- Introduced new tests to verify incorporation lock behavior, including scenarios with and without locks.
- Refactored code for improved readability and added error handling improvements.
- Fixed minor typos and inconsistencies in comments and log messages.
This commit is contained in:
Till Wegmueller 2025-08-19 14:30:55 +02:00
parent 39124f9df4
commit 77147999b3
No known key found for this signature in database
12 changed files with 852 additions and 601 deletions

1101
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -17,16 +17,16 @@ keywords.workspace = true
[dependencies]
regex = "1.5.5"
thiserror = "1.0.50"
thiserror = "2"
miette = "7.6.0"
tracing = "0.1.37"
maplit = "0.1.6"
object = "0.23.0"
sha2 = "0.9.3"
sha3 = "0.9.1"
maplit = "1"
object = "0.37"
sha2 = "0.10"
sha3 = "0.10"
pest = "2.1.3"
pest_derive = "2.1.0"
strum = { version = "0.24.1", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
serde = { version = "1.0.207", features = ["derive"] }
serde_json = "1.0.124"
serde_cbor = "0.11.2"
@ -37,11 +37,11 @@ diff-struct = "0.5.3"
chrono = "0.4.41"
tempfile = "3.20.0"
walkdir = "2.4.0"
redb = "1.5.0"
bincode = "1.3.3"
rust-ini = "0.21.2"
reqwest = { version = "0.11", features = ["blocking", "json"] }
resolvo = "0.7"
redb = "3"
bincode = { version = "2", features = ["serde"] }
rust-ini = "0.21"
reqwest = { version = "0.12", features = ["blocking", "json"] }
resolvo = "0.10"
[features]
default = ["redb-index"]

View file

@ -115,7 +115,7 @@ impl Digest {
format!("{:x}", sha2::Sha256::digest(b))
}
DigestAlgorithm::SHA512Half => {
format!("{:x}", sha2::Sha512Trunc256::digest(b))
format!("{:x}", sha2::Sha512_256::digest(b))
}
DigestAlgorithm::SHA512 => {
format!("{:x}", sha2::Sha512::digest(b))

View file

@ -2,7 +2,7 @@ use crate::actions::{Manifest};
use crate::fmri::Fmri;
use crate::repository::catalog::{CatalogManager, CatalogPart, PackageVersionEntry};
use miette::Diagnostic;
use redb::{Database, ReadableTable, TableDefinition};
use redb::{Database, ReadableDatabase, ReadableTable, TableDefinition};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::{Path, PathBuf};
@ -19,6 +19,11 @@ pub const CATALOG_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("ca
/// Value: nothing
pub const OBSOLETED_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("obsoleted");
/// Table definition for the incorporate locks table
/// Key: stem (e.g., "compress/gzip")
/// Value: version string as bytes (same format as Fmri::version())
pub const INCORPORATE_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("incorporate");
/// Table definition for the installed packages database
/// Key: full FMRI including publisher (pkg://publisher/stem@version)
/// Value: serialized Manifest
@ -105,6 +110,17 @@ impl ImageCatalog {
"catalog" => self.dump_catalog_table(&tx)?,
"obsoleted" => self.dump_obsoleted_table(&tx)?,
"installed" => self.dump_installed_table(&tx)?,
"incorporate" => {
// Simple dump of incorporate locks
if let Ok(table) = tx.open_table(INCORPORATE_TABLE) {
for entry in table.iter().map_err(|e| CatalogError::Database(format!("Failed to iterate incorporate table: {}", e)))? {
let (k, v) = entry.map_err(|e| CatalogError::Database(format!("Failed to read incorporate table entry: {}", e)))?;
let stem = k.value();
let ver = String::from_utf8_lossy(v.value());
println!("{} -> {}", stem, ver);
}
}
}
_ => return Err(CatalogError::Database(format!("Unknown table: {}", table_name))),
}
@ -311,6 +327,9 @@ impl ImageCatalog {
tx.open_table(OBSOLETED_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create obsoleted table: {}", e)))?;
tx.open_table(INCORPORATE_TABLE)
.map_err(|e| CatalogError::Database(format!("Failed to create incorporate table: {}", e)))?;
tx.commit()
.map_err(|e| CatalogError::Database(format!("Failed to commit transaction: {}", e)))?;

View file

@ -1,7 +1,7 @@
use crate::actions::Manifest;
use crate::fmri::Fmri;
use miette::Diagnostic;
use redb::{Database, ReadableTable, TableDefinition};
use redb::{Database, ReadableDatabase, ReadableTable, TableDefinition};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::{Path, PathBuf};

View file

@ -9,12 +9,13 @@ use std::collections::HashMap;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use thiserror::Error;
use redb::{Database, ReadableDatabase, ReadableTable};
use crate::repository::{ReadableRepository, RepositoryError, RestBackend, FileBackend};
// Export the catalog module
pub mod catalog;
use catalog::{ImageCatalog, PackageInfo};
use catalog::{ImageCatalog, PackageInfo, INCORPORATE_TABLE};
// Export the installed packages module
pub mod installed;
@ -338,10 +339,41 @@ impl Image {
/// Add a package to the installed packages database
pub fn install_package(&self, fmri: &crate::fmri::Fmri, manifest: &crate::actions::Manifest) -> Result<()> {
// Precheck incorporation dependencies: fail if any stem already has a lock
for d in &manifest.dependencies {
if d.dependency_type == "incorporate" {
if let Some(df) = &d.fmri {
let stem = df.stem();
if let Some(_) = self.get_incorporated_release(stem)? {
return Err(ImageError::Database(format!(
"Incorporation lock already exists for stem {}", stem
)));
}
}
}
}
// Add to installed database
let installed = InstalledPackages::new(self.installed_db_path());
installed.add_package(fmri, manifest).map_err(|e| {
ImageError::Database(format!("Failed to add package to installed database: {}", e))
})
})?;
// Write incorporation locks for any incorporate dependencies
for d in &manifest.dependencies {
if d.dependency_type == "incorporate" {
if let Some(df) = &d.fmri {
let stem = df.stem();
let ver = df.version();
if !ver.is_empty() {
// Store the full version string (release[,branch][-build][:timestamp])
// Ignore errors here? Better to propagate to ensure consistency
self.add_incorporation_lock(stem, &ver)?;
}
}
}
}
Ok(())
}
/// Remove a package from the installed packages database
@ -555,6 +587,47 @@ impl Image {
ImageError::Database(format!("Failed to query catalog: {}", e))
})
}
/// Look up an incorporation lock for a given stem.
/// Returns Some(release) if a lock exists, otherwise None.
pub fn get_incorporated_release(&self, stem: &str) -> Result<Option<String>> {
let db = Database::open(self.catalog_db_path())
.map_err(|e| ImageError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx = db.begin_read()
.map_err(|e| ImageError::Database(format!("Failed to begin read transaction: {}", e)))?;
match tx.open_table(INCORPORATE_TABLE) {
Ok(table) => {
match table.get(stem) {
Ok(Some(val)) => Ok(Some(String::from_utf8_lossy(val.value()).to_string())),
Ok(None) => Ok(None),
Err(e) => Err(ImageError::Database(format!("Failed to read incorporate lock: {}", e))),
}
}
Err(_) => Ok(None),
}
}
/// Add an incorporation lock for a stem to a specific release.
/// Fails if a lock already exists for the stem.
pub fn add_incorporation_lock(&self, stem: &str, release: &str) -> Result<()> {
let db = Database::open(self.catalog_db_path())
.map_err(|e| ImageError::Database(format!("Failed to open catalog database: {}", e)))?;
let tx = db.begin_write()
.map_err(|e| ImageError::Database(format!("Failed to begin write transaction: {}", e)))?;
{
let mut table = tx.open_table(INCORPORATE_TABLE)
.map_err(|e| ImageError::Database(format!("Failed to open incorporate table: {}", e)))?;
if let Ok(Some(_)) = table.get(stem) {
return Err(ImageError::Database(format!("Incorporation lock already exists for stem {}", stem)));
}
table.insert(stem, release.as_bytes())
.map_err(|e| ImageError::Database(format!("Failed to insert incorporate lock: {}", e)))?;
}
tx.commit()
.map_err(|e| ImageError::Database(format!("Failed to commit incorporate lock: {}", e)))?
;
Ok(())
}
/// Get a manifest from the catalog
pub fn get_manifest_from_catalog(&self, fmri: &crate::fmri::Fmri) -> Result<Option<crate::actions::Manifest>> {

View file

@ -192,12 +192,17 @@ impl From<redb::CommitError> for RepositoryError {
}
}
impl From<bincode::Error> for RepositoryError {
fn from(err: bincode::Error) -> Self {
impl From<bincode::error::DecodeError> for RepositoryError {
fn from(err: bincode::error::DecodeError) -> Self {
RepositoryError::Other(format!("Serialization error: {}", err))
}
}
impl From<bincode::error::EncodeError> for RepositoryError {
fn from(err: bincode::error::EncodeError) -> Self {
RepositoryError::Other(format!("Serialization error: {}", err))
}
}
pub mod catalog;
mod file_backend;
mod obsoleted;

View file

@ -3,7 +3,7 @@ use crate::repository::{Result, RepositoryError};
use chrono::{DateTime, Duration as ChronoDuration, Utc};
use miette::Diagnostic;
use regex::Regex;
use redb::{Database, ReadableTable, TableDefinition};
use redb::{Database, ReadableDatabase, ReadableTable, TableDefinition};
use serde::{Deserialize, Serialize};
use serde_json;
use serde_cbor;
@ -212,8 +212,14 @@ impl From<redb::CommitError> for ObsoletedPackageError {
}
}
impl From<bincode::Error> for ObsoletedPackageError {
fn from(err: bincode::Error) -> Self {
impl From<bincode::error::EncodeError> for ObsoletedPackageError {
fn from(err: bincode::error::EncodeError) -> Self {
ObsoletedPackageError::SerializationError(err.to_string())
}
}
impl From<bincode::error::DecodeError> for ObsoletedPackageError {
fn from(err: bincode::error::DecodeError) -> Self {
ObsoletedPackageError::SerializationError(err.to_string())
}
}
@ -792,7 +798,7 @@ impl RedbObsoletedPackageIndex {
/// Clear the index
fn clear(&self) -> Result<()> {
// Begin a write transaction
// Begin a writing transaction
let write_txn = self.db.begin_write()?;
{
// Clear all tables by removing all entries
@ -802,7 +808,7 @@ impl RedbObsoletedPackageIndex {
{
let mut hash_to_manifest = write_txn.open_table(FMRI_TO_METADATA_TABLE)?;
let keys_to_remove = {
// First collect all keys in a separate scope
// First, collect all keys in a separate scope
let read_txn = self.db.begin_read()?;
let hash_to_manifest_read = read_txn.open_table(FMRI_TO_METADATA_TABLE)?;
let mut keys = Vec::new();
@ -824,7 +830,7 @@ impl RedbObsoletedPackageIndex {
{
let mut hash_to_manifest = write_txn.open_table(HASH_TO_MANIFEST_TABLE)?;
let keys_to_remove = {
// First collect all keys in a separate scope
// First, collect all keys in a separate scope
let read_txn = self.db.begin_read()?;
let hash_to_manifest_read = read_txn.open_table(HASH_TO_MANIFEST_TABLE)?;
let mut keys = Vec::new();
@ -884,7 +890,7 @@ impl RedbObsoletedPackageIndex {
/// Constant for null hash value, indicating no manifest content is stored
/// When this value is used for content_hash, the original manifest is not stored
/// When this value is used for content_hash, the original manifest is not stored,
/// and a minimal manifest with obsoletion attributes is generated on-the-fly when requested
pub const NULL_HASH: &str = "null";
@ -1325,7 +1331,7 @@ impl ObsoletedPackageManager {
/// * `obsoleted_by` - Optional list of FMRIs that replace this package
/// * `deprecation_message` - Optional message explaining why the package was obsoleted
/// * `store_manifest` - Whether to store the original manifest content
/// If false, a null hash is used and no manifest file is stored
/// If false, a null hash is used, and no manifest file is stored
///
/// # Returns
///
@ -1365,7 +1371,7 @@ impl ObsoletedPackageManager {
)
};
// Construct path for the obsoleted package
// Construct a path for the obsoleted package
let stem = fmri.stem();
let version = fmri.version();
let pkg_dir = publisher_dir.join(stem);
@ -1375,7 +1381,7 @@ impl ObsoletedPackageManager {
let encoded_version = url_encode(&version);
let metadata_path = pkg_dir.join(format!("{}.json", encoded_version));
// Write metadata to file
// Write metadata to a file
let metadata_json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, metadata_json)?;
@ -1401,7 +1407,7 @@ impl ObsoletedPackageManager {
/// Check if a package is obsoleted
pub fn is_obsoleted(&self, publisher: &str, fmri: &Fmri) -> bool {
// First check the filesystem directly for faster results in tests
// First, check the filesystem directly for faster results in tests
let stem = fmri.stem();
let version = fmri.version();
let encoded_version = url_encode(&version);
@ -1478,7 +1484,7 @@ impl ObsoletedPackageManager {
},
Err(e) => {
warn!("Failed to get entry from index: {}", e);
// Fall back to the filesystem check if there's an error
// Fall back to the filesystem to check if there's an error
self.get_obsoleted_package_metadata_from_filesystem(publisher, fmri)
}
}
@ -1583,7 +1589,7 @@ impl ObsoletedPackageManager {
},
Err(e) => {
warn!("Failed to get entry from index: {}", e);
// Fall back to the filesystem check if there's an error
// Fall back to the filesystem to check if there's an error
self.get_obsoleted_package_manifest_from_filesystem(publisher, fmri)
}
}
@ -1672,7 +1678,7 @@ impl ObsoletedPackageManager {
/// Get manifest content and remove an obsoleted package
///
/// This method retrieves the manifest content of an obsoleted package and removes it
/// from the obsoleted packages directory. It's used as part of the process to restore
/// from the obsoleted packages' directory. It's used as part of the process to restore
/// an obsoleted package to the main repository.
///
/// # Arguments
@ -1710,7 +1716,7 @@ impl ObsoletedPackageManager {
/// Remove an obsoleted package
///
/// This method removes an obsoleted package from the obsoleted packages directory.
/// This method removes an obsoleted package from the obsoleted packages' directory.
/// It can be used after restoring a package to the main repository.
///
/// # Arguments
@ -1814,7 +1820,7 @@ impl ObsoletedPackageManager {
warn!("Failed to acquire write lock on index, package not removed from index: {}: {}", fmri, e);
// If we can't get a write lock, mark the index as dirty so it will be rebuilt next time
if let Ok(index) = self.index.write() {
// This is a new write attempt, so it might succeed even if the previous one failed
// This is a new writing attempt, so it might succeed even if the previous one failed
if let Err(e) = index.clear() {
warn!("Failed to clear index: {}", e);
}
@ -2017,7 +2023,7 @@ impl ObsoletedPackageManager {
// Get packages for the requested page
let packages = if start_idx >= total_count {
// If start index is beyond the total count, return an empty page
// If the start index is beyond the total count, return an empty page
Vec::new()
} else {
all_packages[start_idx..end_idx.min(total_count)].to_vec()
@ -2475,7 +2481,7 @@ impl ObsoletedPackageManager {
/// Batch process multiple obsoleted packages
///
/// This method applies a function to multiple obsoleted packages in batch.
/// This method applies a function to multiple obsoleted packages in a batch.
/// It's useful for operations that need to be performed on many packages at once.
///
/// # Arguments
@ -2636,7 +2642,7 @@ mod tests {
let results = manager.search_obsoleted_packages("test", "package-.*").unwrap();
assert_eq!(results.len(), 2);
// Test search for specific version
// Test search for a specific version
let results = manager.search_obsoleted_packages("test", "2.0").unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].to_string(), fmri2.to_string());
@ -2688,7 +2694,7 @@ mod tests {
assert_eq!(page2.page, 2);
let page4 = manager.list_obsoleted_packages_paginated("test", Some(4), Some(3)).unwrap();
assert_eq!(page4.packages.len(), 1); // Last page has only 1 item
assert_eq!(page4.packages.len(), 1); // The last page has only 1 item
// Test pagination with page beyond total
let empty_page = manager.list_obsoleted_packages_paginated("test", Some(5), Some(3)).unwrap();

View file

@ -17,11 +17,11 @@
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
use miette::Diagnostic;
// Begin resolvo wiring imports (names discovered by compiler)
// We start broad and refine with compiler guidance.
use resolvo::{self, Candidates, Dependencies as RDependencies, DependencyProvider, Interner, KnownDependencies, Mapping, NameId, Problem as RProblem, Requirement as RRequirement, SolvableId, Solver as RSolver, SolverCache, StringId, UnsolvableOrCancelled, VersionSetId, VersionSetUnionId};
use resolvo::{self, Candidates, Condition, ConditionId, ConditionalRequirement, Dependencies as RDependencies, DependencyProvider, HintDependenciesAvailable, Interner, KnownDependencies, Mapping, NameId, Problem as RProblem, Requirement as RRequirement, SolvableId, Solver as RSolver, SolverCache, StringId, UnsolvableOrCancelled, VersionSetId, VersionSetUnionId};
use thiserror::Error;
use crate::actions::Manifest;
@ -138,6 +138,14 @@ impl<'a> Interner for IpsProvider<'a> {
fmri.to_string()
}
fn display_solvable_name(&self, solvable: SolvableId) -> impl Display + '_ {
todo!()
}
fn display_merged_solvables(&self, solvables: &[SolvableId]) -> impl Display + '_ {
todo!()
}
fn display_name(&self, name: NameId) -> impl std::fmt::Display + '_ {
self.names.get(name).cloned().unwrap_or_default()
}
@ -176,6 +184,10 @@ impl<'a> Interner for IpsProvider<'a> {
.unwrap_or_default()
.into_iter()
}
fn resolve_condition(&self, condition: ConditionId) -> Condition {
todo!()
}
}
// Helper to evaluate if a candidate FMRI matches a VersionSetKind constraint
@ -248,11 +260,50 @@ impl<'a> DependencyProvider for IpsProvider<'a> {
async fn get_candidates(&self, name: NameId) -> Option<Candidates> {
let list = self.cands_by_name.get(&name)?;
// Check if an incorporation lock exists for this stem; if so, restrict candidates
let stem = self.display_name(name).to_string();
if let Ok(Some(locked_ver)) = self.image.get_incorporated_release(&stem) {
// Parse the locked version; if parsed, match by release/branch/build and optionally timestamp.
let parsed_lock = crate::fmri::Version::parse(&locked_ver).ok();
let locked_cands: Vec<SolvableId> = list
.iter()
.copied()
.filter(|sid| {
let fmri = &self.solvables.get(*sid).unwrap().fmri;
if let Some(cv) = fmri.version.as_ref() {
if let Some(lv) = parsed_lock.as_ref() {
// Match release/branch/build exactly; timestamp must match only if lock includes it
if cv.release != lv.release { return false; }
if cv.branch != lv.branch { return false; }
if cv.build != lv.build { return false; }
if lv.timestamp.is_some() {
return cv.timestamp == lv.timestamp;
}
true
} else {
// Fallback: compare stringified version
fmri.version() == locked_ver
}
} else {
false
}
})
.collect();
if !locked_cands.is_empty() {
return Some(Candidates {
candidates: locked_cands,
favored: None,
locked: None,
hint_dependencies_available: HintDependenciesAvailable::None,
excluded: vec![],
});
}
}
Some(Candidates {
candidates: list.clone(),
favored: None,
locked: None,
hint_dependencies_available: vec![],
hint_dependencies_available: HintDependenciesAvailable::None,
excluded: vec![],
})
}
@ -302,7 +353,7 @@ impl<'a> DependencyProvider for IpsProvider<'a> {
};
// Build requirements for "require" deps
let mut reqs: Vec<RRequirement> = Vec::new();
let mut reqs: Vec<ConditionalRequirement> = Vec::new();
let parent_branch = fmri
.version
.as_ref()
@ -327,7 +378,7 @@ impl<'a> DependencyProvider for IpsProvider<'a> {
(None, None) => VersionSetKind::Any,
};
let vs_id = self.version_set_for(child_name_id, vs_kind);
reqs.push(RRequirement::from(vs_id));
reqs.push(ConditionalRequirement::from(vs_id));
// Set publisher preferences for the child to parent-first, then image order
let order = build_publisher_preference(parent_pub, self.image);
@ -486,6 +537,7 @@ pub fn resolve_install(image: &Image, constraints: &[Constraint]) -> Result<Inst
// Track each root's NameId with the originating constraint for diagnostics
let mut root_names: Vec<(NameId, Constraint)> = Vec::new();
let mut reqs: Vec<ConditionalRequirement> = Vec::new();
for c in constraints.iter().cloned() {
// Intern name
let name_id = provider.intern_name(&c.stem);
@ -512,8 +564,9 @@ pub fn resolve_install(image: &Image, constraints: &[Constraint]) -> Result<Inst
(None, None) => VersionSetKind::Any,
};
let vs_id = provider.version_set_for(name_id, vs_kind);
problem.requirements.push(RRequirement::from(vs_id));
reqs.push(ConditionalRequirement::from(vs_id));
}
let problem = problem.requirements(reqs);
// Early diagnostic: detect roots with zero candidates before invoking solver
let mut missing: Vec<String> = Vec::new();
@ -963,6 +1016,81 @@ mod solver_error_message_tests {
}
#[cfg(test)]
mod incorporate_lock_tests {
use super::*;
use crate::fmri::Version;
use crate::image::ImageType;
use crate::image::catalog::CATALOG_TABLE;
use redb::Database;
use tempfile::tempdir;
fn mk_version(release: &str, branch: Option<&str>, timestamp: Option<&str>) -> Version {
let mut v = Version::new(release);
if let Some(b) = branch { v.branch = Some(b.to_string()); }
if let Some(t) = timestamp { v.timestamp = Some(t.to_string()); }
v
}
fn mk_fmri(publisher: &str, name: &str, v: Version) -> Fmri { Fmri::with_publisher(publisher, name, Some(v)) }
fn write_manifest_to_catalog(image: &Image, fmri: &Fmri, manifest: &Manifest) {
let db = Database::open(image.catalog_db_path()).expect("open catalog db");
let tx = db.begin_write().expect("begin write");
{
let mut table = tx.open_table(CATALOG_TABLE).expect("open catalog table");
let key = format!("{}@{}", fmri.stem(), fmri.version());
let val = serde_json::to_vec(manifest).expect("serialize manifest");
table.insert(key.as_str(), val.as_slice()).expect("insert manifest");
}
tx.commit().expect("commit");
}
fn make_image_with_publishers(pubs: &[(&str, bool)]) -> Image {
let td = tempdir().expect("tempdir");
let path = td.keep();
let mut img = Image::create_image(&path, ImageType::Partial).expect("create image");
for (name, is_default) in pubs.iter().copied() {
img.add_publisher(name, &format!("https://example.com/{name}"), vec![], is_default)
.expect("add publisher");
}
img
}
#[test]
fn incorporate_lock_enforced() {
let img = make_image_with_publishers(&[("pubA", true)]);
// Two versions of same stem in catalog
let v_old = mk_fmri("pubA", "compress/gzip", mk_version("1.0.0", None, Some("20200101T000000Z")));
let v_new = mk_fmri("pubA", "compress/gzip", mk_version("2.0.0", None, Some("20200201T000000Z")));
write_manifest_to_catalog(&img, &v_old, &Manifest::new());
write_manifest_to_catalog(&img, &v_new, &Manifest::new());
// Add incorporation lock to old version
img.add_incorporation_lock("compress/gzip", &v_old.version()).expect("add lock");
// Resolve without version constraints should pick locked version
let c = Constraint { stem: "compress/gzip".to_string(), version_req: None, preferred_publishers: vec![], branch: None };
let plan = resolve_install(&img, &[c]).expect("resolve");
assert_eq!(plan.add.len(), 1);
assert_eq!(plan.add[0].fmri.version(), v_old.version());
}
#[test]
fn incorporate_lock_ignored_if_missing() {
let img = make_image_with_publishers(&[("pubA", true)]);
// Only version 2.0 exists
let v_new = mk_fmri("pubA", "compress/gzip", mk_version("2.0.0", None, Some("20200201T000000Z")));
write_manifest_to_catalog(&img, &v_new, &Manifest::new());
// Add lock to non-existent 1.0.0 -> should be ignored
img.add_incorporation_lock("compress/gzip", "1.0.0").expect("add lock");
let c = Constraint { stem: "compress/gzip".to_string(), version_req: None, preferred_publishers: vec![], branch: None };
let plan = resolve_install(&img, &[c]).expect("resolve");
assert_eq!(plan.add.len(), 1);
assert_eq!(plan.add[0].fmri.version(), v_new.version());
}
}
#[cfg(test)]
mod composite_release_tests {
use super::*;

View file

@ -12,11 +12,11 @@ keywords.workspace = true
[dependencies]
anyhow = "1.0.59"
clap = {version = "3.2.16", features = [ "derive", "env" ] }
clap = {version = "4", features = [ "derive", "env" ] }
specfile = {path = "../specfile"}
shellexpand = "2.1.2"
shellexpand = "3"
url = { version = "2.2.2", features = ["serde"]}
reqwest = { version = "0.11", features = ["blocking"] }
which = "4.3.0"
reqwest = { version = "0.12", features = ["blocking"] }
which = "8"
libips = {path = "../libips"}
thiserror = "*"

View file

@ -2,6 +2,7 @@ mod sources;
#[allow(clippy::result_large_err)]
mod workspace;
use clap::ArgAction;
use crate::workspace::Workspace;
use anyhow::anyhow;
use anyhow::Result;
@ -29,7 +30,7 @@ struct Cli {
#[clap(short, long, env)]
pub config: Option<PathBuf>,
#[clap(short, parse(from_occurrences))]
#[clap(short, action = ArgAction::Count)]
pub verbose: i8,
}

View file

@ -12,19 +12,13 @@ keywords.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.56"
thiserror = "1.0.30"
pest_derive = "2.1.0"
maplit = "1.0.2"
pest = "2.1.0"
reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] }
semver = "1.0.13"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
anyhow = "1"
thiserror = "2"
pest_derive = "2"
pest = "2"
reqwest = { version = "0.12", features = ["blocking", "json", "rustls-tls"] }
semver = "1"
serde = { version = "1", features = ["derive"] }
regex = "1"
lazy_static = "1"
url = { version = "2.2.2", features = ["serde"] }
[dependencies.openssl]
version = "*"
features = ["vendored"]
url = { version = "2", features = ["serde"] }