Add vm-manager library and vmctl CLI

Unified VM management consolidating QEMU-KVM (Linux) and Propolis/bhyve
(illumos) backends behind an async Hypervisor trait, with a vmctl CLI for
direct use and a library API for orchestrators.

- Core library: types, async Hypervisor trait, miette diagnostic errors
- QEMU backend: direct process management, raw QMP client, QCOW2 overlays
- Propolis backend: zone-based VMM with REST API control
- Shared infra: cloud-init NoCloud ISO generation, image download/cache,
  SSH helpers with retry
- vmctl CLI: create, start, stop, destroy, list, status, console, ssh,
  suspend, resume, image pull/list/inspect
- nebula-vm zone brand: lifecycle scripts and platform/config XML for
  illumos zone integration

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Till Wegmueller 2026-02-14 18:25:17 +01:00
commit 9dc492f90f
No known key found for this signature in database
36 changed files with 6078 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/target
other-codes/

2712
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

37
Cargo.toml Normal file
View file

@ -0,0 +1,37 @@
[workspace]
resolver = "3"
members = ["crates/vm-manager", "crates/vmctl"]
[workspace.package]
edition = "2024"
license = "MPL-2.0"
rust-version = "1.85"
[workspace.dependencies]
tokio = { version = "1", features = [
"rt-multi-thread",
"macros",
"signal",
"fs",
"io-util",
"io-std",
"process",
"net",
"time",
] }
miette = { version = "7", features = ["fancy"] }
thiserror = "2"
clap = { version = "4", features = ["derive", "env"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
reqwest = { version = "0.12", default-features = false, features = [
"rustls-tls-native-roots",
"stream",
] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
uuid = { version = "1", features = ["v4", "serde"] }
tempfile = "3"
futures-util = "0.3"
zstd = "0.13"
dirs = "6"

52
brand/nebula-vm/boot.ksh Normal file
View file

@ -0,0 +1,52 @@
#!/bin/ksh
#
# nebula-vm brand: boot script
#
# Called by zoneadm(8) when the zone is booted.
# Arguments: %z = zone name, %R = zone root
#
ZONENAME="$1"
ZONEROOT="$2"
if [[ -z "$ZONENAME" || -z "$ZONEROOT" ]]; then
echo "Usage: boot.ksh <zone-name> <zone-root>" >&2
exit 1
fi
echo "nebula-vm: booting zone '${ZONENAME}'"
# Read zone network configuration and create VNIC if needed
VNIC_NAME="vnic_${ZONENAME}"
PHYSICAL_LINK=$(dladm show-phys -p -o LINK 2>/dev/null | head -1)
if [[ -n "$PHYSICAL_LINK" ]]; then
# Check if VNIC already exists
if ! dladm show-vnic "$VNIC_NAME" >/dev/null 2>&1; then
echo "nebula-vm: creating VNIC ${VNIC_NAME} over ${PHYSICAL_LINK}"
dladm create-vnic -l "$PHYSICAL_LINK" "$VNIC_NAME" || {
echo "nebula-vm: WARNING - failed to create VNIC" >&2
}
fi
fi
# Start propolis-server inside the zone
PROPOLIS="${ZONEROOT}/root/opt/propolis/propolis-server"
PROPOLIS_CONFIG="${ZONEROOT}/root/opt/propolis/config.toml"
PIDFILE="${ZONEROOT}/root/var/run/propolis.pid"
LOGFILE="${ZONEROOT}/root/var/log/propolis.log"
if [[ -x "$PROPOLIS" ]]; then
echo "nebula-vm: starting propolis-server"
nohup zlogin "$ZONENAME" /opt/propolis/propolis-server \
run /opt/propolis/config.toml \
> "$LOGFILE" 2>&1 &
echo $! > "$PIDFILE"
echo "nebula-vm: propolis-server started (pid=$(cat $PIDFILE))"
else
echo "nebula-vm: ERROR - propolis-server not found at ${PROPOLIS}" >&2
exit 1
fi
echo "nebula-vm: zone '${ZONENAME}' booted"
exit 0

View file

@ -0,0 +1,36 @@
<?xml version="1.0"?>
<!--
nebula-vm zone brand configuration.
Points to lifecycle scripts and defines required privileges.
-->
<!DOCTYPE brand PUBLIC "-//Sun Microsystems Inc//DTD Zones Brand//EN"
"file:///usr/share/lib/xml/dtd/zone_brand.dtd.1">
<brand name="nebula-vm">
<modname>nebula-vm</modname>
<initname>/sbin/init</initname>
<login_cmd>/usr/bin/login -z %Z %u</login_cmd>
<forcedlogin_cmd>/usr/bin/login -z %Z -f %u</forcedlogin_cmd>
<user_cmd>/usr/bin/getent passwd %u</user_cmd>
<!-- Lifecycle scripts -->
<install>/usr/lib/brand/nebula-vm/install.ksh %z %R</install>
<boot>/usr/lib/brand/nebula-vm/boot.ksh %z %R</boot>
<halt>/usr/lib/brand/nebula-vm/halt.ksh %z %R</halt>
<uninstall>/usr/lib/brand/nebula-vm/uninstall.ksh %z %R</uninstall>
<prestatechange>/usr/lib/brand/nebula-vm/support.ksh prestate %z %R</prestatechange>
<poststatechange>/usr/lib/brand/nebula-vm/support.ksh poststate %z %R</poststatechange>
<!-- Privileges granted to the zone -->
<privilege set="default" name="proc_clock_highres" />
<privilege set="default" name="sys_admin" />
<privilege set="default" name="sys_mount" />
<privilege set="default" name="file_dac_read" />
<privilege set="default" name="net_rawaccess" />
</brand>

49
brand/nebula-vm/halt.ksh Normal file
View file

@ -0,0 +1,49 @@
#!/bin/ksh
#
# nebula-vm brand: halt script
#
# Called by zoneadm(8) when the zone is being halted.
# Arguments: %z = zone name, %R = zone root
#
ZONENAME="$1"
ZONEROOT="$2"
if [[ -z "$ZONENAME" || -z "$ZONEROOT" ]]; then
echo "Usage: halt.ksh <zone-name> <zone-root>" >&2
exit 1
fi
echo "nebula-vm: halting zone '${ZONENAME}'"
# Stop propolis-server
PIDFILE="${ZONEROOT}/root/var/run/propolis.pid"
if [[ -f "$PIDFILE" ]]; then
PID=$(cat "$PIDFILE")
if [[ -n "$PID" ]] && kill -0 "$PID" 2>/dev/null; then
echo "nebula-vm: stopping propolis-server (pid=${PID})"
kill -TERM "$PID"
# Wait up to 10 seconds for graceful shutdown
WAIT=0
while kill -0 "$PID" 2>/dev/null && [[ $WAIT -lt 10 ]]; do
sleep 1
WAIT=$((WAIT + 1))
done
# Force kill if still running
if kill -0 "$PID" 2>/dev/null; then
echo "nebula-vm: force-killing propolis-server"
kill -KILL "$PID"
fi
fi
rm -f "$PIDFILE"
fi
# Clean up VNIC
VNIC_NAME="vnic_${ZONENAME}"
if dladm show-vnic "$VNIC_NAME" >/dev/null 2>&1; then
echo "nebula-vm: removing VNIC ${VNIC_NAME}"
dladm delete-vnic "$VNIC_NAME" || true
fi
echo "nebula-vm: zone '${ZONENAME}' halted"
exit 0

View file

@ -0,0 +1,49 @@
#!/bin/ksh
#
# nebula-vm brand: install script
#
# Called by zoneadm(8) during zone installation.
# Arguments: %z = zone name, %R = zone root
#
ZONENAME="$1"
ZONEROOT="$2"
if [[ -z "$ZONENAME" || -z "$ZONEROOT" ]]; then
echo "Usage: install.ksh <zone-name> <zone-root>" >&2
exit 1
fi
echo "nebula-vm: installing zone '${ZONENAME}' at ${ZONEROOT}"
# Create the minimal zone root structure
mkdir -p "${ZONEROOT}/root"
mkdir -p "${ZONEROOT}/root/dev"
mkdir -p "${ZONEROOT}/root/etc"
mkdir -p "${ZONEROOT}/root/var/run"
mkdir -p "${ZONEROOT}/root/var/log"
mkdir -p "${ZONEROOT}/root/opt/propolis"
# Copy propolis-server binary into the zone if available on the host
PROPOLIS_BIN="/opt/oxide/propolis-server/bin/propolis-server"
if [[ -f "$PROPOLIS_BIN" ]]; then
cp "$PROPOLIS_BIN" "${ZONEROOT}/root/opt/propolis/propolis-server"
chmod 0755 "${ZONEROOT}/root/opt/propolis/propolis-server"
echo "nebula-vm: propolis-server copied to zone"
else
echo "nebula-vm: WARNING - propolis-server not found at ${PROPOLIS_BIN}"
echo "nebula-vm: you must manually place propolis-server in the zone"
fi
# Write a default propolis configuration
cat > "${ZONEROOT}/root/opt/propolis/config.toml" <<'EOF'
[main]
listen_addr = "0.0.0.0"
listen_port = 12400
[log]
level = "info"
EOF
echo "nebula-vm: zone '${ZONENAME}' installed successfully"
exit 0

View file

@ -0,0 +1,36 @@
<?xml version="1.0"?>
<!--
nebula-vm zone brand platform configuration.
Grants the zone access to bhyve/propolis device nodes and standard filesystems.
-->
<!DOCTYPE platform PUBLIC "-//Sun Microsystems Inc//DTD Zones Platform//EN"
"file:///usr/share/lib/xml/dtd/zone_platform.dtd.1">
<platform name="nebula-vm" allow-exclusive-ip="true">
<!-- Standard filesystem mounts -->
<global_mount special="/dev" directory="/dev" type="dev"
opt="attrdir=%R/root/dev" />
<global_mount special="proc" directory="/proc" type="proc"
opt="zone=%z" />
<global_mount special="ctfs" directory="/system/contract" type="ctfs" />
<global_mount special="mnttab" directory="/etc/mnttab" type="mntfs" />
<global_mount special="objfs" directory="/system/object" type="objfs" />
<global_mount special="swap" directory="/tmp" type="tmpfs" />
<global_mount special="sharefs" directory="/etc/dfs/sharetab" type="sharefs" />
<!-- bhyve / propolis device access -->
<device match="vmm/*" />
<device match="viona" />
<device match="zvol/dsk/*" />
<device match="zvol/rdsk/*" />
<device match="zfs" />
<device match="null" />
<device match="zero" />
<device match="random" />
<device match="urandom" />
</platform>

View file

@ -0,0 +1,44 @@
#!/bin/ksh
#
# nebula-vm brand: support script
#
# Called for pre/post state change hooks.
# Arguments: prestate|poststate <zone-name> <zone-root>
#
ACTION="$1"
ZONENAME="$2"
ZONEROOT="$3"
case "$ACTION" in
prestate)
# Pre-state-change hook: ensure network resources are available
VNIC_NAME="vnic_${ZONENAME}"
PHYSICAL_LINK=$(dladm show-phys -p -o LINK 2>/dev/null | head -1)
if [[ -n "$PHYSICAL_LINK" ]]; then
if ! dladm show-vnic "$VNIC_NAME" >/dev/null 2>&1; then
dladm create-vnic -l "$PHYSICAL_LINK" "$VNIC_NAME" 2>/dev/null || true
fi
fi
;;
poststate)
# Post-state-change hook: cleanup if zone is no longer running
VNIC_NAME="vnic_${ZONENAME}"
ZONE_STATE=$(zoneadm -z "$ZONENAME" list -p 2>/dev/null | cut -d: -f3)
if [[ "$ZONE_STATE" != "running" ]]; then
if dladm show-vnic "$VNIC_NAME" >/dev/null 2>&1; then
dladm delete-vnic "$VNIC_NAME" 2>/dev/null || true
fi
fi
;;
*)
echo "nebula-vm support: unknown action '${ACTION}'" >&2
exit 1
;;
esac
exit 0

View file

@ -0,0 +1,29 @@
#!/bin/ksh
#
# nebula-vm brand: uninstall script
#
# Called by zoneadm(8) during zone uninstallation.
# Arguments: %z = zone name, %R = zone root
#
ZONENAME="$1"
ZONEROOT="$2"
if [[ -z "$ZONENAME" || -z "$ZONEROOT" ]]; then
echo "Usage: uninstall.ksh <zone-name> <zone-root>" >&2
exit 1
fi
echo "nebula-vm: uninstalling zone '${ZONENAME}'"
# Remove the zone root contents
if [[ -d "${ZONEROOT}/root" ]]; then
rm -rf "${ZONEROOT}/root"
echo "nebula-vm: zone root removed"
fi
# Remove the zone path itself if empty
rmdir "${ZONEROOT}" 2>/dev/null || true
echo "nebula-vm: zone '${ZONENAME}' uninstalled"
exit 0

View file

@ -0,0 +1,36 @@
[package]
name = "vm-manager"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[features]
default = []
pure-iso = ["dep:isobemak"]
[dependencies]
tokio.workspace = true
miette.workspace = true
thiserror.workspace = true
serde.workspace = true
serde_json.workspace = true
reqwest.workspace = true
tracing.workspace = true
uuid.workspace = true
tempfile.workspace = true
futures-util.workspace = true
zstd.workspace = true
dirs.workspace = true
# Optional pure-Rust ISO generation
isobemak = { version = "0.2", optional = true }
# SSH
ssh2 = "0.9"
[target.'cfg(target_os = "linux")'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "illumos")'.dependencies]
tokio-tungstenite = "0.26"

View file

@ -0,0 +1,281 @@
pub mod noop;
#[cfg(target_os = "linux")]
pub mod qemu;
#[cfg(target_os = "linux")]
pub mod qmp;
#[cfg(target_os = "illumos")]
pub mod propolis;
use std::time::Duration;
use crate::error::{Result, VmError};
use crate::traits::{ConsoleEndpoint, Hypervisor};
use crate::types::{BackendTag, VmHandle, VmSpec, VmState};
/// Platform-aware router that delegates to the appropriate backend.
pub struct RouterHypervisor {
pub noop: noop::NoopBackend,
#[cfg(target_os = "linux")]
pub qemu: Option<qemu::QemuBackend>,
#[cfg(target_os = "illumos")]
pub propolis: Option<propolis::PropolisBackend>,
}
impl RouterHypervisor {
/// Build a router with platform defaults.
///
/// On Linux, creates a QemuBackend with the given bridge.
/// On illumos, creates a PropolisBackend with the given ZFS pool.
#[allow(unused_variables)]
pub fn new(bridge: Option<String>, zfs_pool: Option<String>) -> Self {
#[cfg(target_os = "linux")]
{
RouterHypervisor {
noop: noop::NoopBackend,
qemu: Some(qemu::QemuBackend::new(None, None, bridge)),
}
}
#[cfg(target_os = "illumos")]
{
RouterHypervisor {
noop: noop::NoopBackend,
propolis: Some(propolis::PropolisBackend::new(
None,
zfs_pool.unwrap_or_else(|| "rpool".into()),
)),
}
}
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
{
RouterHypervisor {
noop: noop::NoopBackend,
}
}
}
/// Build a router that only has the noop backend (for dev/testing).
pub fn noop_only() -> Self {
#[cfg(target_os = "linux")]
{
RouterHypervisor {
noop: noop::NoopBackend,
qemu: None,
}
}
#[cfg(target_os = "illumos")]
{
RouterHypervisor {
noop: noop::NoopBackend,
propolis: None,
}
}
#[cfg(not(any(target_os = "linux", target_os = "illumos")))]
{
RouterHypervisor {
noop: noop::NoopBackend,
}
}
}
}
impl Hypervisor for RouterHypervisor {
async fn prepare(&self, spec: &VmSpec) -> Result<VmHandle> {
#[cfg(target_os = "linux")]
if let Some(ref qemu) = self.qemu {
return qemu.prepare(spec).await;
}
#[cfg(target_os = "illumos")]
if let Some(ref propolis) = self.propolis {
return propolis.prepare(spec).await;
}
self.noop.prepare(spec).await
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.start(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.start(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.start(vm).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
async fn stop(&self, vm: &VmHandle, timeout: Duration) -> Result<()> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.stop(vm, timeout).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.stop(vm, timeout).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.stop(vm, timeout).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
async fn suspend(&self, vm: &VmHandle) -> Result<()> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.suspend(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.suspend(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.suspend(vm).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
async fn resume(&self, vm: &VmHandle) -> Result<()> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.resume(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.resume(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.resume(vm).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
async fn destroy(&self, vm: VmHandle) -> Result<()> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.destroy(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.destroy(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.destroy(vm).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
async fn state(&self, vm: &VmHandle) -> Result<VmState> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.state(vm).await,
None => Ok(VmState::Destroyed),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.state(vm).await,
None => Ok(VmState::Destroyed),
},
BackendTag::Noop => self.noop.state(vm).await,
#[allow(unreachable_patterns)]
_ => Ok(VmState::Destroyed),
}
}
async fn guest_ip(&self, vm: &VmHandle) -> Result<String> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.guest_ip(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.guest_ip(vm).await,
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.guest_ip(vm).await,
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
fn console_endpoint(&self, vm: &VmHandle) -> Result<ConsoleEndpoint> {
match vm.backend {
#[cfg(target_os = "linux")]
BackendTag::Qemu => match self.qemu {
Some(ref q) => q.console_endpoint(vm),
None => Err(VmError::BackendNotAvailable {
backend: "qemu".into(),
}),
},
#[cfg(target_os = "illumos")]
BackendTag::Propolis => match self.propolis {
Some(ref p) => p.console_endpoint(vm),
None => Err(VmError::BackendNotAvailable {
backend: "propolis".into(),
}),
},
BackendTag::Noop => self.noop.console_endpoint(vm),
#[allow(unreachable_patterns)]
_ => Err(VmError::BackendNotAvailable {
backend: vm.backend.to_string(),
}),
}
}
}

View file

@ -0,0 +1,116 @@
use std::time::Duration;
use tracing::info;
use crate::error::Result;
use crate::traits::{ConsoleEndpoint, Hypervisor};
use crate::types::{BackendTag, VmHandle, VmSpec, VmState};
/// No-op hypervisor for development and testing on hosts without VM capabilities.
#[derive(Debug, Clone, Default)]
pub struct NoopBackend;
impl Hypervisor for NoopBackend {
async fn prepare(&self, spec: &VmSpec) -> Result<VmHandle> {
let id = format!("noop-{}", uuid::Uuid::new_v4());
let work_dir = std::env::temp_dir().join("vmctl-noop").join(&id);
tokio::fs::create_dir_all(&work_dir).await?;
info!(id = %id, name = %spec.name, image = ?spec.image_path, "noop: prepare");
Ok(VmHandle {
id,
name: spec.name.clone(),
backend: BackendTag::Noop,
work_dir,
overlay_path: None,
seed_iso_path: None,
pid: None,
qmp_socket: None,
console_socket: None,
vnc_addr: None,
})
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
info!(id = %vm.id, name = %vm.name, "noop: start");
Ok(())
}
async fn stop(&self, vm: &VmHandle, _timeout: Duration) -> Result<()> {
info!(id = %vm.id, name = %vm.name, "noop: stop");
Ok(())
}
async fn suspend(&self, vm: &VmHandle) -> Result<()> {
info!(id = %vm.id, name = %vm.name, "noop: suspend");
Ok(())
}
async fn resume(&self, vm: &VmHandle) -> Result<()> {
info!(id = %vm.id, name = %vm.name, "noop: resume");
Ok(())
}
async fn destroy(&self, vm: VmHandle) -> Result<()> {
info!(id = %vm.id, name = %vm.name, "noop: destroy");
let _ = tokio::fs::remove_dir_all(&vm.work_dir).await;
Ok(())
}
async fn state(&self, _vm: &VmHandle) -> Result<VmState> {
Ok(VmState::Prepared)
}
async fn guest_ip(&self, _vm: &VmHandle) -> Result<String> {
Ok("127.0.0.1".to_string())
}
fn console_endpoint(&self, _vm: &VmHandle) -> Result<ConsoleEndpoint> {
Ok(ConsoleEndpoint::None)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use crate::types::NetworkConfig;
fn test_spec() -> VmSpec {
VmSpec {
name: "test-vm".into(),
image_path: PathBuf::from("/tmp/test.qcow2"),
vcpus: 1,
memory_mb: 512,
disk_gb: None,
network: NetworkConfig::None,
cloud_init: None,
ssh: None,
}
}
#[tokio::test]
async fn noop_lifecycle() {
let backend = NoopBackend;
let spec = test_spec();
let handle = backend.prepare(&spec).await.unwrap();
assert_eq!(handle.backend, BackendTag::Noop);
assert!(handle.id.starts_with("noop-"));
backend.start(&handle).await.unwrap();
assert_eq!(backend.state(&handle).await.unwrap(), VmState::Prepared);
backend.suspend(&handle).await.unwrap();
backend.resume(&handle).await.unwrap();
let ip = backend.guest_ip(&handle).await.unwrap();
assert_eq!(ip, "127.0.0.1");
let endpoint = backend.console_endpoint(&handle).unwrap();
assert!(matches!(endpoint, ConsoleEndpoint::None));
backend.stop(&handle, Duration::from_secs(5)).await.unwrap();
backend.destroy(handle).await.unwrap();
}
}

View file

@ -0,0 +1,292 @@
//! Propolis (Oxide's bhyve VMM) backend for illumos.
//!
//! Manages VMs inside `nebula-vm` branded zones with propolis-server.
use std::path::PathBuf;
use std::time::Duration;
use tracing::{info, warn};
use crate::error::{Result, VmError};
use crate::traits::{ConsoleEndpoint, Hypervisor};
use crate::types::{BackendTag, NetworkConfig, VmHandle, VmSpec, VmState};
/// Propolis backend for illumos zones.
pub struct PropolisBackend {
data_dir: PathBuf,
zfs_pool: String,
}
impl PropolisBackend {
pub fn new(data_dir: Option<PathBuf>, zfs_pool: String) -> Self {
let data_dir = data_dir.unwrap_or_else(|| PathBuf::from("/var/lib/vmctl/vms"));
Self { data_dir, zfs_pool }
}
fn work_dir(&self, name: &str) -> PathBuf {
self.data_dir.join(name)
}
/// Run a shell command and return (success, stdout, stderr).
async fn run_cmd(cmd: &str, args: &[&str]) -> Result<(bool, String, String)> {
let output = tokio::process::Command::new(cmd)
.args(args)
.output()
.await?;
Ok((
output.status.success(),
String::from_utf8_lossy(&output.stdout).into_owned(),
String::from_utf8_lossy(&output.stderr).into_owned(),
))
}
/// Poll propolis-server until it responds, up to timeout.
async fn wait_for_propolis(addr: &str, timeout: Duration) -> Result<()> {
let client = reqwest::Client::new();
let deadline = tokio::time::Instant::now() + timeout;
let url = format!("http://{addr}/instance");
loop {
if let Ok(resp) = client.get(&url).send().await {
if resp.status().is_success() || resp.status().as_u16() == 404 {
return Ok(());
}
}
if tokio::time::Instant::now() >= deadline {
return Err(VmError::PropolisUnreachable {
addr: addr.into(),
source: Box::new(std::io::Error::new(
std::io::ErrorKind::TimedOut,
"propolis-server did not become available",
)),
});
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
}
}
impl Hypervisor for PropolisBackend {
async fn prepare(&self, spec: &VmSpec) -> Result<VmHandle> {
let work_dir = self.work_dir(&spec.name);
tokio::fs::create_dir_all(&work_dir).await?;
// Clone ZFS dataset for the VM disk
let base_dataset = format!("{}/images/{}", self.zfs_pool, spec.name);
let vm_dataset = format!("{}/vms/{}", self.zfs_pool, spec.name);
let (ok, _, stderr) = Self::run_cmd(
"zfs",
&["clone", &format!("{base_dataset}@latest"), &vm_dataset],
)
.await?;
if !ok {
warn!(name = %spec.name, stderr = %stderr, "ZFS clone failed (may already exist)");
}
// Create cloud-init seed ISO if configured
let mut seed_iso_path = None;
if let Some(ref ci) = spec.cloud_init {
let iso_path = work_dir.join("seed.iso");
let instance_id = ci.instance_id.as_deref().unwrap_or(&spec.name);
let hostname = ci.hostname.as_deref().unwrap_or(&spec.name);
let meta_data = format!("instance-id: {instance_id}\nlocal-hostname: {hostname}\n");
crate::cloudinit::create_nocloud_iso_raw(
&ci.user_data,
meta_data.as_bytes(),
&iso_path,
)?;
seed_iso_path = Some(iso_path);
}
// Determine VNIC name
let vnic_name = match &spec.network {
NetworkConfig::Vnic { name } => name.clone(),
_ => format!("vnic_{}", spec.name),
};
// Configure and install zone
let zone_name = &spec.name;
let zonecfg_cmds = format!(
"create -b; set brand=nebula-vm; set zonepath={work_dir}; set ip-type=exclusive; \
add net; set physical={vnic_name}; end; commit",
work_dir = work_dir.display()
);
let (ok, _, stderr) = Self::run_cmd("zonecfg", &["-z", zone_name, &zonecfg_cmds]).await?;
if !ok {
warn!(name = %zone_name, stderr = %stderr, "zonecfg failed (zone may already exist)");
}
let (ok, _, stderr) = Self::run_cmd("zoneadm", &["-z", zone_name, "install"]).await?;
if !ok {
warn!(name = %zone_name, stderr = %stderr, "zone install failed");
}
let handle = VmHandle {
id: format!("propolis-{}", uuid::Uuid::new_v4()),
name: spec.name.clone(),
backend: BackendTag::Propolis,
work_dir,
overlay_path: None,
seed_iso_path,
pid: None,
qmp_socket: None,
console_socket: None,
vnc_addr: None,
};
info!(name = %spec.name, id = %handle.id, "Propolis: prepared");
Ok(handle)
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
// Boot zone
let (ok, _, stderr) = Self::run_cmd("zoneadm", &["-z", &vm.name, "boot"]).await?;
if !ok {
return Err(VmError::QemuSpawnFailed {
source: std::io::Error::other(format!("zone boot failed: {stderr}")),
});
}
// The brand boot script starts propolis-server inside the zone.
// Wait for it to become available.
let propolis_addr = format!("127.0.0.1:12400"); // default propolis port
Self::wait_for_propolis(&propolis_addr, Duration::from_secs(30)).await?;
// PUT /instance with instance spec
let client = reqwest::Client::new();
let instance_spec = serde_json::json!({
"properties": {
"id": vm.id,
"name": vm.name,
"description": "managed by vmctl"
},
"nics": [],
"disks": [],
"boot_settings": {
"order": [{"name": "disk0"}]
}
});
client
.put(format!("http://{propolis_addr}/instance"))
.json(&instance_spec)
.send()
.await
.map_err(|e| VmError::PropolisUnreachable {
addr: propolis_addr.clone(),
source: Box::new(e),
})?;
// PUT /instance/state → Run
client
.put(format!("http://{propolis_addr}/instance/state"))
.json(&serde_json::json!("Run"))
.send()
.await
.map_err(|e| VmError::PropolisUnreachable {
addr: propolis_addr.clone(),
source: Box::new(e),
})?;
info!(name = %vm.name, "Propolis: started");
Ok(())
}
async fn stop(&self, vm: &VmHandle, _timeout: Duration) -> Result<()> {
let propolis_addr = "127.0.0.1:12400";
let client = reqwest::Client::new();
// PUT /instance/state → Stop
let _ = client
.put(format!("http://{propolis_addr}/instance/state"))
.json(&serde_json::json!("Stop"))
.send()
.await;
// Halt the zone
let _ = Self::run_cmd("zoneadm", &["-z", &vm.name, "halt"]).await;
info!(name = %vm.name, "Propolis: stopped");
Ok(())
}
async fn suspend(&self, vm: &VmHandle) -> Result<()> {
info!(name = %vm.name, "Propolis: suspend (not yet implemented)");
Ok(())
}
async fn resume(&self, vm: &VmHandle) -> Result<()> {
info!(name = %vm.name, "Propolis: resume (not yet implemented)");
Ok(())
}
async fn destroy(&self, vm: VmHandle) -> Result<()> {
// Stop first
self.stop(&vm, Duration::from_secs(10)).await?;
// Uninstall and delete zone
let _ = Self::run_cmd("zoneadm", &["-z", &vm.name, "uninstall", "-F"]).await;
let _ = Self::run_cmd("zonecfg", &["-z", &vm.name, "delete", "-F"]).await;
// Destroy ZFS dataset
let vm_dataset = format!("{}/vms/{}", self.zfs_pool, vm.name);
let _ = Self::run_cmd("zfs", &["destroy", "-r", &vm_dataset]).await;
// Remove work directory
let _ = tokio::fs::remove_dir_all(&vm.work_dir).await;
info!(name = %vm.name, "Propolis: destroyed");
Ok(())
}
async fn state(&self, vm: &VmHandle) -> Result<VmState> {
let (ok, stdout, _) = Self::run_cmd("zoneadm", &["-z", &vm.name, "list", "-p"]).await?;
if !ok {
return Ok(VmState::Destroyed);
}
// Output format: zoneid:zonename:state:zonepath:uuid:brand:ip-type
let state_field = stdout.split(':').nth(2).unwrap_or("").trim();
Ok(match state_field {
"running" => VmState::Running,
"installed" => VmState::Prepared,
"configured" => VmState::Prepared,
_ => VmState::Stopped,
})
}
async fn guest_ip(&self, vm: &VmHandle) -> Result<String> {
// For exclusive-IP zones, the IP is configured inside the zone.
// Try to query it via zlogin.
let (ok, stdout, _) = Self::run_cmd(
"zlogin",
&[&vm.name, "ipadm", "show-addr", "-p", "-o", "ADDR"],
)
.await?;
if ok {
for line in stdout.lines() {
let addr = line
.trim()
.trim_end_matches(|c: char| c == '/' || c.is_ascii_digit());
let addr = line.split('/').next().unwrap_or("").trim();
if !addr.is_empty() && addr != "127.0.0.1" && addr.contains('.') {
return Ok(addr.to_string());
}
}
}
Err(VmError::IpDiscoveryTimeout {
name: vm.name.clone(),
})
}
fn console_endpoint(&self, vm: &VmHandle) -> Result<ConsoleEndpoint> {
// Propolis serial console is available via WebSocket
Ok(ConsoleEndpoint::WebSocket(format!(
"ws://127.0.0.1:12400/instance/serial"
)))
}
}

View file

@ -0,0 +1,401 @@
use std::path::{Path, PathBuf};
use std::time::Duration;
use tracing::{debug, info, warn};
use crate::cloudinit;
use crate::error::{Result, VmError};
use crate::image;
use crate::traits::{ConsoleEndpoint, Hypervisor};
use crate::types::{BackendTag, VmHandle, VmSpec, VmState};
use super::qmp::QmpClient;
/// QEMU-KVM backend for Linux.
///
/// Manages VMs as QEMU processes with QMP control sockets.
pub struct QemuBackend {
qemu_binary: PathBuf,
data_dir: PathBuf,
default_bridge: Option<String>,
}
impl QemuBackend {
pub fn new(
qemu_binary: Option<PathBuf>,
data_dir: Option<PathBuf>,
default_bridge: Option<String>,
) -> Self {
let data_dir = data_dir.unwrap_or_else(|| {
dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("/tmp"))
.join("vmctl")
.join("vms")
});
Self {
qemu_binary: qemu_binary.unwrap_or_else(|| "qemu-system-x86_64".into()),
data_dir,
default_bridge,
}
}
fn work_dir(&self, name: &str) -> PathBuf {
self.data_dir.join(name)
}
/// Generate a random locally-administered MAC address.
pub fn generate_mac() -> String {
let bytes: [u8; 6] = rand_mac();
format!(
"{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5]
)
}
/// Read PID from the pidfile in the work directory.
async fn read_pid(work_dir: &Path) -> Option<u32> {
let pid_path = work_dir.join("qemu.pid");
tokio::fs::read_to_string(&pid_path)
.await
.ok()
.and_then(|s| s.trim().parse().ok())
}
/// Check if a process with the given PID is alive.
fn pid_alive(pid: u32) -> bool {
// Signal 0 checks if process exists without sending a signal
unsafe { libc::kill(pid as i32, 0) == 0 }
}
}
/// Generate a locally-administered unicast MAC address using random bytes.
fn rand_mac() -> [u8; 6] {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
let s = RandomState::new();
let mut h = s.build_hasher();
h.write_u64(
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64,
);
let v = h.finish();
let mut mac = [0u8; 6];
mac[0] = 0x52; // locally administered, unicast
mac[1] = 0x54;
mac[2] = (v >> 24) as u8;
mac[3] = (v >> 16) as u8;
mac[4] = (v >> 8) as u8;
mac[5] = v as u8;
mac
}
impl Hypervisor for QemuBackend {
async fn prepare(&self, spec: &VmSpec) -> Result<VmHandle> {
let work_dir = self.work_dir(&spec.name);
tokio::fs::create_dir_all(&work_dir).await?;
// Create QCOW2 overlay
let overlay = work_dir.join("overlay.qcow2");
image::create_overlay(&spec.image_path, &overlay, spec.disk_gb).await?;
// Generate cloud-init seed ISO if configured
let mut seed_iso_path = None;
if let Some(ref ci) = spec.cloud_init {
let iso_path = work_dir.join("seed.iso");
let instance_id = ci.instance_id.as_deref().unwrap_or(&spec.name);
let hostname = ci.hostname.as_deref().unwrap_or(&spec.name);
let meta_data = format!("instance-id: {instance_id}\nlocal-hostname: {hostname}\n");
cloudinit::create_nocloud_iso_raw(&ci.user_data, meta_data.as_bytes(), &iso_path)?;
seed_iso_path = Some(iso_path);
}
let qmp_socket = work_dir.join("qmp.sock");
let console_socket = work_dir.join("console.sock");
let handle = VmHandle {
id: format!("qemu-{}", uuid::Uuid::new_v4()),
name: spec.name.clone(),
backend: BackendTag::Qemu,
work_dir,
overlay_path: Some(overlay),
seed_iso_path,
pid: None,
qmp_socket: Some(qmp_socket),
console_socket: Some(console_socket),
vnc_addr: None,
};
info!(
name = %spec.name,
id = %handle.id,
overlay = ?handle.overlay_path,
seed = ?handle.seed_iso_path,
"QEMU: prepared"
);
Ok(handle)
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
let overlay = vm
.overlay_path
.as_ref()
.ok_or_else(|| VmError::InvalidState {
name: vm.name.clone(),
state: "no overlay path".into(),
})?;
// Read the VmSpec vcpus/memory from the overlay's qemu-img info? No — we need
// to reconstruct from VmHandle. For now, use defaults if not stored.
// The CLI will re-read spec and pass to prepare+start in sequence.
let qmp_sock = vm.qmp_socket.as_ref().unwrap();
let console_sock = vm.console_socket.as_ref().unwrap();
let mut args: Vec<String> = vec![
"-enable-kvm".into(),
"-machine".into(),
"q35,accel=kvm".into(),
"-cpu".into(),
"host".into(),
"-nodefaults".into(),
// QMP socket
"-qmp".into(),
format!("unix:{},server,nowait", qmp_sock.display()),
// Serial console socket
"-serial".into(),
format!("unix:{},server,nowait", console_sock.display()),
// VNC on localhost with auto-port
"-vnc".into(),
"127.0.0.1:0".into(),
// Virtio RNG
"-device".into(),
"virtio-rng-pci".into(),
// Main disk
"-drive".into(),
format!(
"file={},format=qcow2,if=none,id=drive0,discard=unmap",
overlay.display()
),
"-device".into(),
"virtio-blk-pci,drive=drive0".into(),
];
// Seed ISO (cloud-init)
if let Some(ref iso) = vm.seed_iso_path {
args.extend([
"-drive".into(),
format!(
"file={},format=raw,if=none,id=seed,readonly=on",
iso.display()
),
"-device".into(),
"virtio-blk-pci,drive=seed".into(),
]);
}
// Daemonize and pidfile
args.extend([
"-daemonize".into(),
"-pidfile".into(),
vm.work_dir.join("qemu.pid").display().to_string(),
]);
info!(
name = %vm.name,
binary = %self.qemu_binary.display(),
"QEMU: starting"
);
debug!(args = ?args, "QEMU command line");
let status = tokio::process::Command::new(&self.qemu_binary)
.args(&args)
.status()
.await
.map_err(|e| VmError::QemuSpawnFailed { source: e })?;
if !status.success() {
return Err(VmError::QemuSpawnFailed {
source: std::io::Error::other(format!("QEMU exited with status {}", status)),
});
}
// Wait for QMP socket and verify connection
let mut qmp = QmpClient::connect(qmp_sock, Duration::from_secs(10)).await?;
let status = qmp.query_status().await?;
info!(name = %vm.name, status = %status, "QEMU: started");
Ok(())
}
async fn stop(&self, vm: &VmHandle, timeout: Duration) -> Result<()> {
// Try ACPI shutdown via QMP first
if let Some(ref qmp_sock) = vm.qmp_socket {
if qmp_sock.exists() {
if let Ok(mut qmp) = QmpClient::connect(qmp_sock, Duration::from_secs(2)).await {
let _ = qmp.system_powerdown().await;
}
}
}
// Wait for process to exit
let start = tokio::time::Instant::now();
loop {
if let Some(pid) = Self::read_pid(&vm.work_dir).await {
if !Self::pid_alive(pid) {
info!(name = %vm.name, "QEMU: process exited after ACPI shutdown");
return Ok(());
}
} else {
// No PID file, process likely already gone
return Ok(());
}
if start.elapsed() >= timeout {
break;
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
// SIGTERM fallback
if let Some(pid) = Self::read_pid(&vm.work_dir).await {
if Self::pid_alive(pid) {
warn!(name = %vm.name, pid, "QEMU: ACPI shutdown timed out, sending SIGTERM");
unsafe {
libc::kill(pid as i32, libc::SIGTERM);
}
tokio::time::sleep(Duration::from_secs(3)).await;
}
// SIGKILL if still alive
if Self::pid_alive(pid) {
warn!(name = %vm.name, pid, "QEMU: SIGTERM failed, sending SIGKILL");
unsafe {
libc::kill(pid as i32, libc::SIGKILL);
}
}
}
Ok(())
}
async fn suspend(&self, vm: &VmHandle) -> Result<()> {
if let Some(ref qmp_sock) = vm.qmp_socket {
let mut qmp = QmpClient::connect(qmp_sock, Duration::from_secs(5)).await?;
qmp.stop().await?;
}
Ok(())
}
async fn resume(&self, vm: &VmHandle) -> Result<()> {
if let Some(ref qmp_sock) = vm.qmp_socket {
let mut qmp = QmpClient::connect(qmp_sock, Duration::from_secs(5)).await?;
qmp.cont().await?;
}
Ok(())
}
async fn destroy(&self, vm: VmHandle) -> Result<()> {
// Stop if running
self.stop(&vm, Duration::from_secs(5)).await?;
// QMP quit to ensure cleanup
if let Some(ref qmp_sock) = vm.qmp_socket {
if qmp_sock.exists() {
if let Ok(mut qmp) = QmpClient::connect(qmp_sock, Duration::from_secs(2)).await {
let _ = qmp.quit().await;
}
}
}
// Remove work directory
let _ = tokio::fs::remove_dir_all(&vm.work_dir).await;
info!(name = %vm.name, "QEMU: destroyed");
Ok(())
}
async fn state(&self, vm: &VmHandle) -> Result<VmState> {
// Check if process is alive
if let Some(pid) = Self::read_pid(&vm.work_dir).await {
if Self::pid_alive(pid) {
// Try QMP for detailed state
if let Some(ref qmp_sock) = vm.qmp_socket {
if let Ok(mut qmp) = QmpClient::connect(qmp_sock, Duration::from_secs(2)).await
{
if let Ok(status) = qmp.query_status().await {
return Ok(match status.as_str() {
"running" => VmState::Running,
"paused" | "suspended" => VmState::Stopped,
_ => VmState::Running,
});
}
}
}
return Ok(VmState::Running);
}
}
// Check if work dir exists (prepared but not running)
if vm.work_dir.exists() {
Ok(VmState::Stopped)
} else {
Ok(VmState::Destroyed)
}
}
async fn guest_ip(&self, vm: &VmHandle) -> Result<String> {
// Parse ARP table (`ip neigh`) looking for IPs on the bridge
let output = tokio::process::Command::new("ip")
.args(["neigh", "show"])
.output()
.await
.map_err(|_| VmError::IpDiscoveryTimeout {
name: vm.name.clone(),
})?;
let text = String::from_utf8_lossy(&output.stdout);
// Try to find an IP from the ARP table. This is a best-effort heuristic:
// look for REACHABLE or STALE entries on common bridge interfaces.
for line in text.lines() {
if line.contains("REACHABLE") || line.contains("STALE") {
if let Some(ip) = line.split_whitespace().next() {
// Basic IPv4 check
if ip.contains('.') && !ip.starts_with("127.") {
return Ok(ip.to_string());
}
}
}
}
// Fallback: check dnsmasq leases if available
if self.default_bridge.is_some() {
let leases_path = "/var/lib/misc/dnsmasq.leases";
if let Ok(content) = tokio::fs::read_to_string(leases_path).await {
// Lease format: epoch MAC IP hostname clientid
if let Some(line) = content.lines().last() {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 3 {
return Ok(parts[2].to_string());
}
}
}
}
Err(VmError::IpDiscoveryTimeout {
name: vm.name.clone(),
})
}
fn console_endpoint(&self, vm: &VmHandle) -> Result<ConsoleEndpoint> {
match vm.console_socket {
Some(ref path) => Ok(ConsoleEndpoint::UnixSocket(path.clone())),
None => Ok(ConsoleEndpoint::None),
}
}
}

View file

@ -0,0 +1,200 @@
//! QMP (QEMU Machine Protocol) client over Unix domain socket.
//!
//! Implements the QMP wire protocol directly using JSON over a tokio `UnixStream`.
//! QMP is a simple line-delimited JSON protocol:
//! 1. Server sends a greeting `{"QMP": {...}}`
//! 2. Client sends `{"execute": "qmp_capabilities"}`
//! 3. Server responds `{"return": {}}`
//! 4. Client sends commands, server sends responses and events.
use std::path::Path;
use std::time::Duration;
use serde_json::Value;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::net::UnixStream;
use tracing::{debug, info, trace};
use crate::error::{Result, VmError};
/// A connected QMP client for a single QEMU instance.
pub struct QmpClient {
reader: BufReader<tokio::io::ReadHalf<UnixStream>>,
writer: tokio::io::WriteHalf<UnixStream>,
}
impl QmpClient {
/// Connect to a QMP Unix socket and negotiate capabilities.
///
/// Retries the connection for up to `timeout` if the socket is not yet available.
pub async fn connect(socket_path: &Path, timeout: Duration) -> Result<Self> {
let deadline = tokio::time::Instant::now() + timeout;
let mut backoff = Duration::from_millis(100);
let stream = loop {
match UnixStream::connect(socket_path).await {
Ok(s) => break s,
Err(e) => {
if tokio::time::Instant::now() >= deadline {
return Err(VmError::QmpConnectionFailed {
path: socket_path.into(),
source: e,
});
}
let remaining = deadline.duration_since(tokio::time::Instant::now());
tokio::time::sleep(backoff.min(remaining)).await;
backoff = backoff.saturating_mul(2).min(Duration::from_secs(1));
}
}
};
let (read_half, write_half) = tokio::io::split(stream);
let mut client = Self {
reader: BufReader::new(read_half),
writer: write_half,
};
// Read the QMP greeting
let greeting = client.read_response().await?;
debug!(greeting = %greeting, "QMP greeting received");
// Negotiate capabilities
client.send_command("qmp_capabilities", None).await?;
let resp = client.read_response().await?;
if resp.get("error").is_some() {
return Err(VmError::QmpCommandFailed {
message: format!("qmp_capabilities failed: {resp}"),
});
}
debug!(path = %socket_path.display(), "QMP connected and negotiated");
Ok(client)
}
/// Send a QMP command and return the response.
async fn send_command(&mut self, execute: &str, arguments: Option<Value>) -> Result<()> {
let mut cmd = serde_json::json!({ "execute": execute });
if let Some(args) = arguments {
cmd.as_object_mut()
.unwrap()
.insert("arguments".into(), args);
}
let mut line = serde_json::to_string(&cmd).unwrap();
line.push('\n');
trace!(cmd = %line.trim(), "QMP send");
self.writer
.write_all(line.as_bytes())
.await
.map_err(|e| VmError::QmpCommandFailed {
message: format!("write failed: {e}"),
})?;
self.writer
.flush()
.await
.map_err(|e| VmError::QmpCommandFailed {
message: format!("flush failed: {e}"),
})?;
Ok(())
}
/// Read the next JSON response (skipping asynchronous events).
async fn read_response(&mut self) -> Result<Value> {
loop {
let mut line = String::new();
let n =
self.reader
.read_line(&mut line)
.await
.map_err(|e| VmError::QmpCommandFailed {
message: format!("read failed: {e}"),
})?;
if n == 0 {
return Err(VmError::QmpCommandFailed {
message: "QMP connection closed".into(),
});
}
let line = line.trim();
if line.is_empty() {
continue;
}
trace!(resp = %line, "QMP recv");
let val: Value = serde_json::from_str(line).map_err(|e| VmError::QmpCommandFailed {
message: format!("JSON parse failed: {e}: {line}"),
})?;
// Skip async events (they have an "event" key)
if val.get("event").is_some() {
debug!(event = %val, "QMP async event (skipped)");
continue;
}
return Ok(val);
}
}
/// Execute a QMP command and return the response.
async fn execute(&mut self, command: &str, arguments: Option<Value>) -> Result<Value> {
self.send_command(command, arguments).await?;
self.read_response().await
}
/// Send an ACPI system_powerdown event (graceful shutdown).
pub async fn system_powerdown(&mut self) -> Result<()> {
let resp = self.execute("system_powerdown", None).await?;
if resp.get("error").is_some() {
return Err(VmError::QmpCommandFailed {
message: format!("system_powerdown: {resp}"),
});
}
info!("QMP: system_powerdown sent");
Ok(())
}
/// Immediately terminate the QEMU process.
pub async fn quit(&mut self) -> Result<()> {
// quit disconnects before we can read a response, which is expected
let _ = self.send_command("quit", None).await;
info!("QMP: quit sent");
Ok(())
}
/// Pause VM execution (freeze vCPUs).
pub async fn stop(&mut self) -> Result<()> {
let resp = self.execute("stop", None).await?;
if resp.get("error").is_some() {
return Err(VmError::QmpCommandFailed {
message: format!("stop: {resp}"),
});
}
info!("QMP: stop (pause) sent");
Ok(())
}
/// Resume VM execution.
pub async fn cont(&mut self) -> Result<()> {
let resp = self.execute("cont", None).await?;
if resp.get("error").is_some() {
return Err(VmError::QmpCommandFailed {
message: format!("cont: {resp}"),
});
}
info!("QMP: cont (resume) sent");
Ok(())
}
/// Query the current VM status. Returns the "status" string (e.g. "running", "paused").
pub async fn query_status(&mut self) -> Result<String> {
let resp = self.execute("query-status", None).await?;
if let Some(err) = resp.get("error") {
return Err(VmError::QmpCommandFailed {
message: format!("query-status: {err}"),
});
}
let status = resp
.pointer("/return/status")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
Ok(status)
}
}

View file

@ -0,0 +1,177 @@
use std::path::Path;
use crate::error::{Result, VmError};
/// Create a NoCloud seed ISO from raw user-data and meta-data byte slices.
///
/// If the `pure-iso` feature is enabled, uses the `isobemak` crate to build the ISO entirely in
/// Rust. Otherwise falls back to external `genisoimage` or `mkisofs`.
pub fn create_nocloud_iso_raw(user_data: &[u8], meta_data: &[u8], out_iso: &Path) -> Result<()> {
use std::fs;
use std::io::Write;
// Ensure output directory exists
if let Some(parent) = out_iso.parent() {
fs::create_dir_all(parent)?;
}
#[cfg(feature = "pure-iso")]
{
use isobemak::{BootInfo, IsoImage, IsoImageFile, build_iso};
use std::fs::OpenOptions;
use std::io::{Seek, SeekFrom};
use tempfile::NamedTempFile;
use tracing::info;
info!(path = %out_iso.display(), "creating cloud-init ISO via isobemak (pure Rust)");
let mut tmp_user = NamedTempFile::new()?;
tmp_user.write_all(user_data)?;
let user_path = tmp_user.path().to_path_buf();
let mut tmp_meta = NamedTempFile::new()?;
tmp_meta.write_all(meta_data)?;
let meta_path = tmp_meta.path().to_path_buf();
let image = IsoImage {
files: vec![
IsoImageFile {
source: user_path,
destination: "user-data".to_string(),
},
IsoImageFile {
source: meta_path,
destination: "meta-data".to_string(),
},
],
boot_info: BootInfo {
bios_boot: None,
uefi_boot: None,
},
};
build_iso(out_iso, &image, false).map_err(|e| VmError::CloudInitIsoFailed {
detail: format!("isobemak: {e}"),
})?;
// Patch the PVD volume identifier to "CIDATA" (ISO 9660 Section 8.4.3).
const SECTOR_SIZE: u64 = 2048;
const PVD_LBA: u64 = 16;
const VOLID_OFFSET: u64 = 40;
const VOLID_LEN: usize = 32;
let mut f = OpenOptions::new().read(true).write(true).open(out_iso)?;
let offset = PVD_LBA * SECTOR_SIZE + VOLID_OFFSET;
f.seek(SeekFrom::Start(offset))?;
let mut buf = [b' '; VOLID_LEN];
let label = b"CIDATA";
buf[..label.len()].copy_from_slice(label);
f.write_all(&buf)?;
return Ok(());
}
#[cfg(not(feature = "pure-iso"))]
{
use std::fs::File;
use std::process::{Command, Stdio};
use tempfile::tempdir;
let dir = tempdir()?;
let seed_path = dir.path();
let user_data_path = seed_path.join("user-data");
let meta_data_path = seed_path.join("meta-data");
{
let mut f = File::create(&user_data_path)?;
f.write_all(user_data)?;
}
{
let mut f = File::create(&meta_data_path)?;
f.write_all(meta_data)?;
}
// Try genisoimage first, then mkisofs.
let status = Command::new("genisoimage")
.arg("-quiet")
.arg("-output")
.arg(out_iso)
.arg("-volid")
.arg("cidata")
.arg("-joliet")
.arg("-rock")
.arg(&user_data_path)
.arg(&meta_data_path)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status();
let status = match status {
Ok(s) => s,
Err(_) => Command::new("mkisofs")
.arg("-quiet")
.arg("-output")
.arg(out_iso)
.arg("-volid")
.arg("cidata")
.arg("-joliet")
.arg("-rock")
.arg(&user_data_path)
.arg(&meta_data_path)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()?,
};
if !status.success() {
return Err(VmError::CloudInitIsoFailed {
detail: "genisoimage/mkisofs exited with non-zero status".into(),
});
}
Ok(())
}
}
/// Convenience: build cloud-config YAML from user/SSH key params, then create the ISO.
pub fn create_nocloud_iso(
user: &str,
ssh_pubkey: &str,
instance_id: &str,
hostname: &str,
out_iso: &Path,
) -> Result<()> {
let (user_data, meta_data) = build_cloud_config(user, ssh_pubkey, instance_id, hostname);
create_nocloud_iso_raw(&user_data, &meta_data, out_iso)
}
/// Build a minimal cloud-config user-data and meta-data from parameters.
///
/// Returns `(user_data_bytes, meta_data_bytes)`.
pub fn build_cloud_config(
user: &str,
ssh_pubkey: &str,
instance_id: &str,
hostname: &str,
) -> (Vec<u8>, Vec<u8>) {
let user_data = format!(
r#"#cloud-config
users:
- name: {user}
groups: [sudo]
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- {ssh_pubkey}
ssh_pwauth: false
disable_root: true
chpasswd:
expire: false
"#
);
let meta_data = format!("instance-id: {instance_id}\nlocal-hostname: {hostname}\n");
(user_data.into_bytes(), meta_data.into_bytes())
}

View file

@ -0,0 +1,125 @@
// The `unused_assignments` warnings are false positives from thiserror 2's derive macro
// on Rust edition 2024 — it generates destructuring assignments that the compiler considers
// "never read" even though they are used in the Display implementation.
#![allow(unused_assignments)]
use miette::Diagnostic;
use std::path::PathBuf;
use thiserror::Error;
#[derive(Debug, Error, Diagnostic)]
pub enum VmError {
#[error("failed to spawn QEMU process: {source}")]
#[diagnostic(
code(vm_manager::qemu::spawn_failed),
help(
"ensure qemu-system-x86_64 is installed and in PATH, and that KVM is available (/dev/kvm)"
)
)]
QemuSpawnFailed { source: std::io::Error },
#[error("failed to connect to QMP socket at {}: {source}", path.display())]
#[diagnostic(
code(vm_manager::qemu::qmp_connect_failed),
help(
"the QEMU process may have crashed before the QMP socket was ready — check the work directory for logs"
)
)]
QmpConnectionFailed {
path: PathBuf,
source: std::io::Error,
},
#[error("QMP command failed: {message}")]
#[diagnostic(code(vm_manager::qemu::qmp_command_failed))]
QmpCommandFailed { message: String },
#[error("failed to create QCOW2 overlay from base image {}: {detail}", base.display())]
#[diagnostic(
code(vm_manager::image::overlay_creation_failed),
help("ensure qemu-img is installed and the base image exists and is readable")
)]
OverlayCreationFailed { base: PathBuf, detail: String },
#[error("timed out waiting for guest IP address for VM {name}")]
#[diagnostic(
code(vm_manager::network::ip_discovery_timeout),
help(
"the guest may not have obtained a DHCP lease — check bridge/network configuration and that the guest cloud-init is configured correctly"
)
)]
IpDiscoveryTimeout { name: String },
#[error("propolis server at {addr} is unreachable: {source}")]
#[diagnostic(
code(vm_manager::propolis::unreachable),
help(
"ensure the propolis-server process is running inside the zone and listening on the expected address"
)
)]
PropolisUnreachable {
addr: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("failed to create cloud-init seed ISO: {detail}")]
#[diagnostic(
code(vm_manager::cloudinit::iso_failed),
help(
"ensure genisoimage or mkisofs is installed, or enable the `pure-iso` feature for a Rust-only fallback"
)
)]
CloudInitIsoFailed { detail: String },
#[error("SSH operation failed: {detail}")]
#[diagnostic(
code(vm_manager::ssh::failed),
help("check that the SSH key is correct, the guest is reachable, and sshd is running")
)]
SshFailed { detail: String },
#[error("failed to download image from {url}: {detail}")]
#[diagnostic(
code(vm_manager::image::download_failed),
help("check network connectivity and that the URL is correct")
)]
ImageDownloadFailed { url: String, detail: String },
#[error("image format detection failed for {}: {detail}", path.display())]
#[diagnostic(
code(vm_manager::image::format_detection_failed),
help("ensure qemu-img is installed and the file is a valid disk image")
)]
ImageFormatDetectionFailed { path: PathBuf, detail: String },
#[error("image conversion failed: {detail}")]
#[diagnostic(
code(vm_manager::image::conversion_failed),
help("ensure qemu-img is installed and there is enough disk space")
)]
ImageConversionFailed { detail: String },
#[error("VM {name} not found")]
#[diagnostic(
code(vm_manager::vm::not_found),
help("run `vmctl list` to see available VMs")
)]
VmNotFound { name: String },
#[error("VM {name} is in state {state} which does not allow this operation")]
#[diagnostic(code(vm_manager::vm::invalid_state))]
InvalidState { name: String, state: String },
#[error("backend not available: {backend}")]
#[diagnostic(
code(vm_manager::backend::not_available),
help("this backend is not supported on the current platform")
)]
BackendNotAvailable { backend: String },
#[error(transparent)]
#[diagnostic(code(vm_manager::io))]
Io(#[from] std::io::Error),
}
pub type Result<T> = std::result::Result<T, VmError>;

View file

@ -0,0 +1,322 @@
use std::cmp::min;
use std::path::{Path, PathBuf};
use futures_util::StreamExt;
use tracing::info;
use crate::error::{Result, VmError};
/// Returns the default image cache directory: `{XDG_DATA_HOME}/vmctl/images/`.
pub fn cache_dir() -> PathBuf {
dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("/tmp"))
.join("vmctl")
.join("images")
}
/// Streaming image downloader with progress logging and zstd decompression support.
pub struct ImageManager {
client: reqwest::Client,
cache: PathBuf,
}
impl Default for ImageManager {
fn default() -> Self {
Self {
client: reqwest::Client::new(),
cache: cache_dir(),
}
}
}
impl ImageManager {
pub fn new() -> Self {
Self::default()
}
pub fn with_cache_dir(cache: PathBuf) -> Self {
Self {
client: reqwest::Client::new(),
cache,
}
}
/// Download an image from `url` to `destination`.
///
/// If the file already exists at `destination`, the download is skipped.
/// URLs ending in `.zst` or `.zstd` are automatically decompressed.
pub async fn download(&self, url: &str, destination: &Path) -> Result<()> {
if destination.exists() {
info!(url = %url, dest = %destination.display(), "image already present; skipping download");
return Ok(());
}
if let Some(parent) = destination.parent() {
tokio::fs::create_dir_all(parent).await?;
}
let is_zstd = url.ends_with(".zst") || url.ends_with(".zstd");
if is_zstd {
self.download_zstd(url, destination).await
} else {
self.download_raw(url, destination).await
}
}
/// Pull an image from a URL into the cache directory, returning the cached path.
pub async fn pull(&self, url: &str, name: Option<&str>) -> Result<PathBuf> {
let file_name = name.map(|n| n.to_string()).unwrap_or_else(|| {
url.rsplit('/')
.next()
.unwrap_or("image")
.trim_end_matches(".zst")
.trim_end_matches(".zstd")
.to_string()
});
let dest = self.cache.join(&file_name);
self.download(url, &dest).await?;
Ok(dest)
}
/// List all cached images.
pub async fn list(&self) -> Result<Vec<CachedImage>> {
let mut entries = Vec::new();
let cache = &self.cache;
if !cache.exists() {
return Ok(entries);
}
let mut dir = tokio::fs::read_dir(cache).await?;
while let Some(entry) = dir.next_entry().await? {
let path = entry.path();
if path.is_file() {
let metadata = entry.metadata().await?;
entries.push(CachedImage {
name: entry.file_name().to_string_lossy().to_string(),
path,
size_bytes: metadata.len(),
});
}
}
entries.sort_by(|a, b| a.name.cmp(&b.name));
Ok(entries)
}
async fn download_zstd(&self, url: &str, destination: &Path) -> Result<()> {
let res = self
.client
.get(url)
.send()
.await
.map_err(|e| VmError::ImageDownloadFailed {
url: url.into(),
detail: e.to_string(),
})?;
let total_size = res.content_length().unwrap_or(0);
let tmp_name = format!(
"{}.zst.tmp",
destination
.file_name()
.map(|s| s.to_string_lossy())
.unwrap_or_default()
);
let tmp_path = destination
.parent()
.map(|p| p.join(&tmp_name))
.unwrap_or_else(|| PathBuf::from(&tmp_name));
info!(url = %url, dest = %destination.display(), size_bytes = total_size, "downloading image (zstd)");
// Stream to temp compressed file
{
let mut tmp_file = std::fs::File::create(&tmp_path)?;
let mut downloaded: u64 = 0;
let mut stream = res.bytes_stream();
let mut last_logged_pct: u64 = 0;
while let Some(item) = stream.next().await {
let chunk = item.map_err(|e| VmError::ImageDownloadFailed {
url: url.into(),
detail: e.to_string(),
})?;
std::io::Write::write_all(&mut tmp_file, &chunk)?;
if total_size > 0 {
downloaded = min(downloaded + (chunk.len() as u64), total_size);
let pct = downloaded.saturating_mul(100) / total_size.max(1);
if pct >= last_logged_pct + 5 || pct == 100 {
info!(
percent = pct,
downloaded_mb = (downloaded as f64) / 1_000_000.0,
"downloading (zstd)..."
);
last_logged_pct = pct;
}
}
}
}
info!(tmp = %tmp_path.display(), "download complete; decompressing zstd");
// Decompress
let infile = std::fs::File::open(&tmp_path)?;
let mut decoder =
zstd::stream::Decoder::new(infile).map_err(|e| VmError::ImageDownloadFailed {
url: url.into(),
detail: format!("zstd decoder init: {e}"),
})?;
let mut outfile = std::fs::File::create(destination)?;
std::io::copy(&mut decoder, &mut outfile)?;
let _ = decoder.finish();
let _ = std::fs::remove_file(&tmp_path);
info!(dest = %destination.display(), "decompression completed");
Ok(())
}
async fn download_raw(&self, url: &str, destination: &Path) -> Result<()> {
let res = self
.client
.get(url)
.send()
.await
.map_err(|e| VmError::ImageDownloadFailed {
url: url.into(),
detail: e.to_string(),
})?;
let total_size = res.content_length().unwrap_or(0);
info!(url = %url, dest = %destination.display(), size_bytes = total_size, "downloading image");
let mut file = std::fs::File::create(destination)?;
let mut downloaded: u64 = 0;
let mut stream = res.bytes_stream();
let mut last_logged_pct: u64 = 0;
while let Some(item) = stream.next().await {
let chunk = item.map_err(|e| VmError::ImageDownloadFailed {
url: url.into(),
detail: e.to_string(),
})?;
std::io::Write::write_all(&mut file, &chunk)?;
if total_size > 0 {
downloaded = min(downloaded + (chunk.len() as u64), total_size);
let pct = downloaded.saturating_mul(100) / total_size.max(1);
if pct >= last_logged_pct + 5 || pct == 100 {
info!(
percent = pct,
downloaded_mb = (downloaded as f64) / 1_000_000.0,
"downloading..."
);
last_logged_pct = pct;
}
}
}
info!(dest = %destination.display(), "download completed");
Ok(())
}
}
/// Information about a cached image.
#[derive(Debug, Clone)]
pub struct CachedImage {
pub name: String,
pub path: PathBuf,
pub size_bytes: u64,
}
/// Detect the format of a disk image using `qemu-img info`.
pub async fn detect_format(path: &Path) -> Result<String> {
let output = tokio::process::Command::new("qemu-img")
.args(["info", "--output=json"])
.arg(path)
.output()
.await
.map_err(|e| VmError::ImageFormatDetectionFailed {
path: path.into(),
detail: format!("qemu-img not found: {e}"),
})?;
if !output.status.success() {
return Err(VmError::ImageFormatDetectionFailed {
path: path.into(),
detail: String::from_utf8_lossy(&output.stderr).into_owned(),
});
}
let info: serde_json::Value = serde_json::from_slice(&output.stdout).map_err(|e| {
VmError::ImageFormatDetectionFailed {
path: path.into(),
detail: format!("failed to parse qemu-img JSON: {e}"),
}
})?;
Ok(info
.get("format")
.and_then(|f| f.as_str())
.unwrap_or("raw")
.to_string())
}
/// Convert an image from one format to another using `qemu-img convert`.
pub async fn convert(src: &Path, dst: &Path, output_format: &str) -> Result<()> {
let output = tokio::process::Command::new("qemu-img")
.args(["convert", "-O", output_format])
.arg(src)
.arg(dst)
.output()
.await
.map_err(|e| VmError::ImageConversionFailed {
detail: format!("qemu-img convert failed to start: {e}"),
})?;
if !output.status.success() {
return Err(VmError::ImageConversionFailed {
detail: String::from_utf8_lossy(&output.stderr).into_owned(),
});
}
Ok(())
}
/// Create a QCOW2 overlay backed by a base image.
///
/// Automatically detects the base image format. If `size_gb` is provided, the overlay is resized.
pub async fn create_overlay(base: &Path, overlay: &Path, size_gb: Option<u32>) -> Result<()> {
let base_fmt = detect_format(base).await?;
let mut args = vec![
"create".to_string(),
"-f".into(),
"qcow2".into(),
"-F".into(),
base_fmt,
"-b".into(),
base.to_string_lossy().into_owned(),
overlay.to_string_lossy().into_owned(),
];
if let Some(gb) = size_gb {
args.push(format!("{gb}G"));
}
let output = tokio::process::Command::new("qemu-img")
.args(&args)
.output()
.await
.map_err(|e| VmError::OverlayCreationFailed {
base: base.into(),
detail: format!("qemu-img not found: {e}"),
})?;
if !output.status.success() {
return Err(VmError::OverlayCreationFailed {
base: base.into(),
detail: String::from_utf8_lossy(&output.stderr).into_owned(),
});
}
Ok(())
}

View file

@ -0,0 +1,13 @@
pub mod backends;
pub mod cloudinit;
pub mod error;
pub mod image;
pub mod ssh;
pub mod traits;
pub mod types;
// Re-export key types at crate root for convenience.
pub use backends::RouterHypervisor;
pub use error::{Result, VmError};
pub use traits::{ConsoleEndpoint, Hypervisor};
pub use types::*;

View file

@ -0,0 +1,169 @@
use std::io::Read;
use std::net::TcpStream;
use std::path::Path;
use std::time::Duration;
use ssh2::Session;
use tracing::warn;
use crate::error::{Result, VmError};
use crate::types::SshConfig;
/// Establish an SSH session to the given IP using the provided config.
///
/// Tries in-memory key first, then key file path.
pub fn connect(ip: &str, config: &SshConfig) -> Result<Session> {
let addr = format!("{ip}:22");
let tcp = TcpStream::connect(&addr).map_err(|e| VmError::SshFailed {
detail: format!("TCP connect to {addr}: {e}"),
})?;
let mut sess = Session::new().map_err(|e| VmError::SshFailed {
detail: format!("session init: {e}"),
})?;
sess.set_tcp_stream(tcp);
sess.handshake().map_err(|e| VmError::SshFailed {
detail: format!("handshake with {addr}: {e}"),
})?;
// Authenticate: in-memory PEM → file path
if let Some(ref pem) = config.private_key_pem {
sess.userauth_pubkey_memory(&config.user, None, pem, None)
.map_err(|e| VmError::SshFailed {
detail: format!("pubkey auth (memory) as {}: {e}", config.user),
})?;
} else if let Some(ref key_path) = config.private_key_path {
sess.userauth_pubkey_file(&config.user, None, key_path, None)
.map_err(|e| VmError::SshFailed {
detail: format!(
"pubkey auth (file {}) as {}: {e}",
key_path.display(),
config.user
),
})?;
} else {
return Err(VmError::SshFailed {
detail: "no SSH private key configured (neither in-memory PEM nor file path)".into(),
});
}
if !sess.authenticated() {
return Err(VmError::SshFailed {
detail: "session not authenticated after auth attempt".into(),
});
}
Ok(sess)
}
/// Execute a command over an existing SSH session.
///
/// Returns `(stdout, stderr, exit_code)`.
pub fn exec(sess: &Session, cmd: &str) -> Result<(String, String, i32)> {
let mut channel = sess.channel_session().map_err(|e| VmError::SshFailed {
detail: format!("channel session: {e}"),
})?;
channel.exec(cmd).map_err(|e| VmError::SshFailed {
detail: format!("exec '{cmd}': {e}"),
})?;
let mut stdout = String::new();
channel
.read_to_string(&mut stdout)
.map_err(|e| VmError::SshFailed {
detail: format!("read stdout: {e}"),
})?;
let mut stderr = String::new();
channel
.stderr()
.read_to_string(&mut stderr)
.map_err(|e| VmError::SshFailed {
detail: format!("read stderr: {e}"),
})?;
channel.wait_close().map_err(|e| VmError::SshFailed {
detail: format!("wait close: {e}"),
})?;
let exit_code = channel.exit_status().unwrap_or(1);
Ok((stdout, stderr, exit_code))
}
/// Upload a local file to a remote path via SFTP.
pub fn upload(sess: &Session, local: &Path, remote: &Path) -> Result<()> {
let sftp = sess.sftp().map_err(|e| VmError::SshFailed {
detail: format!("SFTP init: {e}"),
})?;
let mut local_file = std::fs::File::open(local).map_err(|e| VmError::SshFailed {
detail: format!("open local file {}: {e}", local.display()),
})?;
let mut buf = Vec::new();
local_file
.read_to_end(&mut buf)
.map_err(|e| VmError::SshFailed {
detail: format!("read local file: {e}"),
})?;
let mut remote_file = sftp.create(remote).map_err(|e| VmError::SshFailed {
detail: format!("SFTP create {}: {e}", remote.display()),
})?;
std::io::Write::write_all(&mut remote_file, &buf).map_err(|e| VmError::SshFailed {
detail: format!("SFTP write: {e}"),
})?;
Ok(())
}
/// Connect with exponential backoff retry.
///
/// Retries the connection until `timeout` elapses, with exponential backoff capped at 5 seconds.
pub async fn connect_with_retry(
ip: &str,
config: &SshConfig,
timeout: Duration,
) -> Result<Session> {
let deadline = tokio::time::Instant::now() + timeout;
let mut backoff = Duration::from_secs(1);
let mut attempt: u32 = 0;
loop {
attempt += 1;
let ip_owned = ip.to_string();
let config_clone = config.clone();
// Run the blocking SSH connect on a blocking thread
let result = tokio::task::spawn_blocking(move || connect(&ip_owned, &config_clone)).await;
match result {
Ok(Ok(sess)) => return Ok(sess),
Ok(Err(e)) => {
if tokio::time::Instant::now() >= deadline {
return Err(e);
}
warn!(
attempt,
ip = %ip,
error = %e,
"SSH connect failed; retrying"
);
}
Err(join_err) => {
if tokio::time::Instant::now() >= deadline {
return Err(VmError::SshFailed {
detail: format!("spawn_blocking join error: {join_err}"),
});
}
}
}
let remaining = deadline.duration_since(tokio::time::Instant::now());
let sleep_dur = backoff.min(remaining);
tokio::time::sleep(sleep_dur).await;
backoff = backoff.saturating_mul(2).min(Duration::from_secs(5));
}
}

View file

@ -0,0 +1,47 @@
use std::time::Duration;
use crate::error::Result;
use crate::types::{VmHandle, VmSpec, VmState};
/// Async hypervisor trait implemented by each backend (QEMU, Propolis, Noop).
///
/// The lifecycle is: `prepare` -> `start` -> (optionally `suspend`/`resume`) -> `stop` -> `destroy`.
pub trait Hypervisor: Send + Sync {
/// Allocate resources (overlay disk, cloud-init ISO, zone config, etc.) and return a handle.
fn prepare(&self, spec: &VmSpec) -> impl Future<Output = Result<VmHandle>> + Send;
/// Boot the VM.
fn start(&self, vm: &VmHandle) -> impl Future<Output = Result<()>> + Send;
/// Gracefully stop the VM. Falls back to forceful termination after `timeout`.
fn stop(&self, vm: &VmHandle, timeout: Duration) -> impl Future<Output = Result<()>> + Send;
/// Pause VM execution (freeze vCPUs).
fn suspend(&self, vm: &VmHandle) -> impl Future<Output = Result<()>> + Send;
/// Resume a suspended VM.
fn resume(&self, vm: &VmHandle) -> impl Future<Output = Result<()>> + Send;
/// Stop the VM (if running) and clean up all resources.
fn destroy(&self, vm: VmHandle) -> impl Future<Output = Result<()>> + Send;
/// Query the current state of the VM.
fn state(&self, vm: &VmHandle) -> impl Future<Output = Result<VmState>> + Send;
/// Attempt to discover the guest's IP address.
fn guest_ip(&self, vm: &VmHandle) -> impl Future<Output = Result<String>> + Send;
/// Return a path or address for attaching to the VM's serial console.
fn console_endpoint(&self, vm: &VmHandle) -> Result<ConsoleEndpoint>;
}
/// Describes how to connect to a VM's serial console.
#[derive(Debug, Clone)]
pub enum ConsoleEndpoint {
/// Unix domain socket path (QEMU).
UnixSocket(std::path::PathBuf),
/// WebSocket URL (Propolis).
WebSocket(String),
/// Not available (Noop).
None,
}

View file

@ -0,0 +1,128 @@
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Identifies which backend manages a VM.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum BackendTag {
Noop,
Qemu,
Propolis,
}
impl std::fmt::Display for BackendTag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Noop => write!(f, "noop"),
Self::Qemu => write!(f, "qemu"),
Self::Propolis => write!(f, "propolis"),
}
}
}
/// Full specification for creating a VM.
#[derive(Debug, Clone)]
pub struct VmSpec {
pub name: String,
pub image_path: PathBuf,
pub vcpus: u16,
pub memory_mb: u64,
pub disk_gb: Option<u32>,
pub network: NetworkConfig,
pub cloud_init: Option<CloudInitConfig>,
pub ssh: Option<SshConfig>,
}
/// Network configuration for a VM.
#[derive(Debug, Clone, Default)]
pub enum NetworkConfig {
/// TAP device bridged to a host bridge (default on Linux).
Tap { bridge: String },
/// SLIRP user-mode networking (no root required).
#[default]
User,
/// illumos VNIC for exclusive-IP zones.
Vnic { name: String },
/// No networking.
None,
}
/// Cloud-init NoCloud configuration.
#[derive(Debug, Clone)]
pub struct CloudInitConfig {
/// Raw user-data content (typically a cloud-config YAML).
pub user_data: Vec<u8>,
/// Instance ID for cloud-init metadata.
pub instance_id: Option<String>,
/// Hostname for the guest.
pub hostname: Option<String>,
}
/// SSH connection configuration.
#[derive(Debug, Clone)]
pub struct SshConfig {
/// Username to connect as.
pub user: String,
/// OpenSSH public key (for cloud-init authorized_keys injection).
pub public_key: Option<String>,
/// Path to a private key file on the host.
pub private_key_path: Option<PathBuf>,
/// In-memory PEM-encoded private key.
pub private_key_pem: Option<String>,
}
/// Runtime handle for a managed VM.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VmHandle {
/// Unique identifier for this VM instance.
pub id: String,
/// Human-readable name.
pub name: String,
/// Which backend manages this VM.
pub backend: BackendTag,
/// Working directory for this VM's files.
pub work_dir: PathBuf,
/// Path to the QCOW2 overlay (QEMU) or raw disk.
pub overlay_path: Option<PathBuf>,
/// Path to the cloud-init seed ISO.
pub seed_iso_path: Option<PathBuf>,
/// QEMU process PID (Linux).
pub pid: Option<u32>,
/// Path to the QMP Unix socket (QEMU).
pub qmp_socket: Option<PathBuf>,
/// Path to the serial console Unix socket (QEMU).
pub console_socket: Option<PathBuf>,
/// VNC listen address (e.g. "127.0.0.1:5900").
pub vnc_addr: Option<String>,
}
/// Observed VM lifecycle state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum VmState {
/// Backend is setting up resources.
Preparing,
/// Resources allocated, ready to start.
Prepared,
/// VM is running.
Running,
/// VM has been stopped (gracefully or forcibly).
Stopped,
/// VM encountered an error.
Failed,
/// VM and resources have been cleaned up.
Destroyed,
}
impl std::fmt::Display for VmState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Preparing => write!(f, "preparing"),
Self::Prepared => write!(f, "prepared"),
Self::Running => write!(f, "running"),
Self::Stopped => write!(f, "stopped"),
Self::Failed => write!(f, "failed"),
Self::Destroyed => write!(f, "destroyed"),
}
}
}

22
crates/vmctl/Cargo.toml Normal file
View file

@ -0,0 +1,22 @@
[package]
name = "vmctl"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[[bin]]
name = "vmctl"
path = "src/main.rs"
[dependencies]
vm-manager = { path = "../vm-manager" }
tokio.workspace = true
miette.workspace = true
clap.workspace = true
serde.workspace = true
serde_json.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
uuid.workspace = true
dirs.workspace = true

View file

@ -0,0 +1,85 @@
use clap::Args;
use miette::{IntoDiagnostic, Result};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use vm_manager::{ConsoleEndpoint, Hypervisor, RouterHypervisor};
use super::state;
#[derive(Args)]
pub struct ConsoleArgs {
/// VM name
name: String,
}
pub async fn run(args: ConsoleArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
let endpoint = hv.console_endpoint(handle).into_diagnostic()?;
match endpoint {
ConsoleEndpoint::UnixSocket(path) => {
println!(
"Connecting to console at {} (Ctrl+] to detach)...",
path.display()
);
let mut sock = tokio::net::UnixStream::connect(&path)
.await
.into_diagnostic()?;
let mut stdin = tokio::io::stdin();
let mut stdout = tokio::io::stdout();
let (mut read_half, mut write_half) = sock.split();
// Bridge stdin/stdout to socket
let to_sock = async {
let mut buf = [0u8; 1024];
loop {
let n = stdin.read(&mut buf).await?;
if n == 0 {
break;
}
// Check for Ctrl+] (0x1d) to detach
if buf[..n].contains(&0x1d) {
break;
}
write_half.write_all(&buf[..n]).await?;
}
Ok::<_, std::io::Error>(())
};
let from_sock = async {
let mut buf = [0u8; 1024];
loop {
let n = read_half.read(&mut buf).await?;
if n == 0 {
break;
}
stdout.write_all(&buf[..n]).await?;
stdout.flush().await?;
}
Ok::<_, std::io::Error>(())
};
tokio::select! {
r = to_sock => { let _ = r; }
r = from_sock => { let _ = r; }
}
println!("\nDetached from console.");
}
ConsoleEndpoint::WebSocket(url) => {
println!("Console available at WebSocket: {url}");
println!("Use a WebSocket client to connect.");
}
ConsoleEndpoint::None => {
println!("No console available for this backend.");
}
}
Ok(())
}

View file

@ -0,0 +1,136 @@
use std::path::PathBuf;
use clap::Args;
use miette::{IntoDiagnostic, Result};
use tracing::info;
use vm_manager::{CloudInitConfig, Hypervisor, NetworkConfig, RouterHypervisor, SshConfig, VmSpec};
use super::state;
#[derive(Args)]
pub struct CreateArgs {
/// VM name
#[arg(long)]
name: String,
/// Path to a local disk image
#[arg(long)]
image: Option<PathBuf>,
/// URL to download an image from
#[arg(long)]
image_url: Option<String>,
/// Number of vCPUs
#[arg(long, default_value = "1")]
vcpus: u16,
/// Memory in MB
#[arg(long, default_value = "1024")]
memory: u64,
/// Disk size in GB (overlay resize)
#[arg(long)]
disk: Option<u32>,
/// Bridge name for TAP networking
#[arg(long)]
bridge: Option<String>,
/// Path to cloud-init user-data file
#[arg(long)]
cloud_init: Option<PathBuf>,
/// Path to SSH public key file (injected via cloud-init)
#[arg(long)]
ssh_key: Option<PathBuf>,
/// Also start the VM after creation
#[arg(long)]
start: bool,
}
pub async fn run(args: CreateArgs) -> Result<()> {
// Resolve image
let image_path = if let Some(ref path) = args.image {
path.clone()
} else if let Some(ref url) = args.image_url {
let mgr = vm_manager::image::ImageManager::new();
mgr.pull(url, Some(&args.name)).await.into_diagnostic()?
} else {
miette::bail!("either --image or --image-url must be specified");
};
// Build cloud-init config if user-data or ssh key provided
let cloud_init = if args.cloud_init.is_some() || args.ssh_key.is_some() {
let user_data = if let Some(ref path) = args.cloud_init {
tokio::fs::read(path).await.into_diagnostic()?
} else if let Some(ref key_path) = args.ssh_key {
let pubkey = tokio::fs::read_to_string(key_path)
.await
.into_diagnostic()?;
let (ud, _) = vm_manager::cloudinit::build_cloud_config(
"vm",
pubkey.trim(),
&args.name,
&args.name,
);
ud
} else {
Vec::new()
};
Some(CloudInitConfig {
user_data,
instance_id: Some(args.name.clone()),
hostname: Some(args.name.clone()),
})
} else {
None
};
// Build SSH config if key provided
let ssh = args.ssh_key.as_ref().map(|key_path| SshConfig {
user: "vm".into(),
public_key: None,
private_key_path: Some(key_path.clone()),
private_key_pem: None,
});
// Network config
let network = if let Some(bridge) = args.bridge {
NetworkConfig::Tap { bridge }
} else {
NetworkConfig::User
};
let spec = VmSpec {
name: args.name.clone(),
image_path,
vcpus: args.vcpus,
memory_mb: args.memory,
disk_gb: args.disk,
network,
cloud_init,
ssh,
};
let hv = RouterHypervisor::new(None, None);
let handle = hv.prepare(&spec).await.into_diagnostic()?;
info!(name = %args.name, id = %handle.id, "VM created");
// Persist handle
let mut store = state::load_store().await?;
store.insert(args.name.clone(), handle.clone());
state::save_store(&store).await?;
println!("VM '{}' created (id: {})", args.name, handle.id);
if args.start {
hv.start(&handle).await.into_diagnostic()?;
println!("VM '{}' started", args.name);
}
Ok(())
}

View file

@ -0,0 +1,25 @@
use clap::Args;
use miette::{IntoDiagnostic, Result};
use vm_manager::{Hypervisor, RouterHypervisor};
use super::state;
#[derive(Args)]
pub struct DestroyArgs {
/// VM name
name: String,
}
pub async fn run(args: DestroyArgs) -> Result<()> {
let mut store = state::load_store().await?;
let handle = store
.remove(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
hv.destroy(handle).await.into_diagnostic()?;
state::save_store(&store).await?;
println!("VM '{}' destroyed", args.name);
Ok(())
}

View file

@ -0,0 +1,88 @@
use std::path::PathBuf;
use clap::{Args, Subcommand};
use miette::{IntoDiagnostic, Result};
#[derive(Args)]
pub struct ImageCommand {
#[command(subcommand)]
action: ImageAction,
}
#[derive(Subcommand)]
enum ImageAction {
/// Download an image to the local cache
Pull(PullArgs),
/// List cached images
List,
/// Show image format and details
Inspect(InspectArgs),
}
#[derive(Args)]
struct PullArgs {
/// URL to download
url: String,
/// Name to save as in the cache
#[arg(long)]
name: Option<String>,
}
#[derive(Args)]
struct InspectArgs {
/// Path to the image file
path: PathBuf,
}
pub async fn run(args: ImageCommand) -> Result<()> {
match args.action {
ImageAction::Pull(pull) => {
let mgr = vm_manager::image::ImageManager::new();
let path = mgr
.pull(&pull.url, pull.name.as_deref())
.await
.into_diagnostic()?;
println!("Image cached at: {}", path.display());
}
ImageAction::List => {
let mgr = vm_manager::image::ImageManager::new();
let images = mgr.list().await.into_diagnostic()?;
if images.is_empty() {
println!("No cached images.");
return Ok(());
}
println!("{:<40} {:<12} PATH", "NAME", "SIZE");
println!("{}", "-".repeat(80));
for img in images {
let size = if img.size_bytes >= 1_073_741_824 {
format!("{:.1} GB", img.size_bytes as f64 / 1_073_741_824.0)
} else {
format!("{:.1} MB", img.size_bytes as f64 / 1_048_576.0)
};
println!("{:<40} {:<12} {}", img.name, size, img.path.display());
}
}
ImageAction::Inspect(inspect) => {
let fmt = vm_manager::image::detect_format(&inspect.path)
.await
.into_diagnostic()?;
println!("Format: {}", fmt);
println!("Path: {}", inspect.path.display());
if let Ok(meta) = tokio::fs::metadata(&inspect.path).await {
let size = meta.len();
if size >= 1_073_741_824 {
println!("Size: {:.1} GB", size as f64 / 1_073_741_824.0);
} else {
println!("Size: {:.1} MB", size as f64 / 1_048_576.0);
}
}
}
}
Ok(())
}

View file

@ -0,0 +1,34 @@
use clap::Args;
use miette::Result;
use super::state;
#[derive(Args)]
pub struct ListArgs;
pub async fn run(_args: ListArgs) -> Result<()> {
let store = state::load_store().await?;
if store.is_empty() {
println!("No VMs found.");
return Ok(());
}
println!("{:<20} {:<12} {:<40} WORK DIR", "NAME", "BACKEND", "ID");
println!("{}", "-".repeat(90));
let mut entries: Vec<_> = store.iter().collect();
entries.sort_by_key(|(name, _)| (*name).clone());
for (name, handle) in entries {
println!(
"{:<20} {:<12} {:<40} {}",
name,
handle.backend,
handle.id,
handle.work_dir.display()
);
}
Ok(())
}

View file

@ -0,0 +1,64 @@
pub mod console;
pub mod create;
pub mod destroy;
pub mod image;
pub mod list;
pub mod ssh;
pub mod start;
pub mod state;
pub mod status;
pub mod stop;
use clap::{Parser, Subcommand};
use miette::Result;
#[derive(Parser)]
#[command(name = "vmctl", about = "Manage virtual machines", version)]
pub struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Subcommand)]
enum Command {
/// Create a new VM (and optionally start it)
Create(create::CreateArgs),
/// Start an existing VM
Start(start::StartArgs),
/// Stop a running VM
Stop(stop::StopArgs),
/// Destroy a VM and clean up all resources
Destroy(destroy::DestroyArgs),
/// List all VMs
List(list::ListArgs),
/// Show VM status
Status(status::StatusArgs),
/// Attach to a VM's serial console
Console(console::ConsoleArgs),
/// SSH into a VM
Ssh(ssh::SshArgs),
/// Suspend a running VM (pause vCPUs)
Suspend(start::SuspendArgs),
/// Resume a suspended VM
Resume(start::ResumeArgs),
/// Manage VM images
Image(image::ImageCommand),
}
impl Cli {
pub async fn run(self) -> Result<()> {
match self.command {
Command::Create(args) => create::run(args).await,
Command::Start(args) => start::run_start(args).await,
Command::Stop(args) => stop::run(args).await,
Command::Destroy(args) => destroy::run(args).await,
Command::List(args) => list::run(args).await,
Command::Status(args) => status::run(args).await,
Command::Console(args) => console::run(args).await,
Command::Ssh(args) => ssh::run(args).await,
Command::Suspend(args) => start::run_suspend(args).await,
Command::Resume(args) => start::run_resume(args).await,
Command::Image(args) => image::run(args).await,
}
}
}

View file

@ -0,0 +1,78 @@
use std::path::PathBuf;
use std::time::Duration;
use clap::Args;
use miette::{IntoDiagnostic, Result};
use vm_manager::{Hypervisor, RouterHypervisor, SshConfig};
use super::state;
#[derive(Args)]
pub struct SshArgs {
/// VM name
name: String,
/// SSH user
#[arg(long, default_value = "vm")]
user: String,
/// Path to SSH private key
#[arg(long)]
key: Option<PathBuf>,
}
pub async fn run(args: SshArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
let ip = hv.guest_ip(handle).await.into_diagnostic()?;
let key_path = args.key.unwrap_or_else(|| {
dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("/root"))
.join(".ssh")
.join("id_ed25519")
});
let config = SshConfig {
user: args.user.clone(),
public_key: None,
private_key_path: Some(key_path),
private_key_pem: None,
};
println!("Connecting to {}@{}...", args.user, ip);
let sess = vm_manager::ssh::connect_with_retry(&ip, &config, Duration::from_secs(30))
.await
.into_diagnostic()?;
// Drop the libssh2 session (just used to verify connectivity) and exec system ssh.
// We use the system ssh binary for interactive terminal support.
drop(sess);
let status = tokio::process::Command::new("ssh")
.arg("-o")
.arg("StrictHostKeyChecking=no")
.arg("-o")
.arg("UserKnownHostsFile=/dev/null")
.args(
config
.private_key_path
.iter()
.flat_map(|p| ["-i".to_string(), p.display().to_string()]),
)
.arg(format!("{}@{}", args.user, ip))
.status()
.await
.into_diagnostic()?;
if !status.success() {
miette::bail!("SSH exited with status {}", status);
}
Ok(())
}

View file

@ -0,0 +1,62 @@
use clap::Args;
use miette::{IntoDiagnostic, Result};
use vm_manager::{Hypervisor, RouterHypervisor};
use super::state;
#[derive(Args)]
pub struct StartArgs {
/// VM name
name: String,
}
pub async fn run_start(args: StartArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store.get(&args.name).ok_or_else(|| {
miette::miette!(
"VM '{}' not found — run `vmctl list` to see available VMs",
args.name
)
})?;
let hv = RouterHypervisor::new(None, None);
hv.start(handle).await.into_diagnostic()?;
println!("VM '{}' started", args.name);
Ok(())
}
#[derive(Args)]
pub struct SuspendArgs {
/// VM name
name: String,
}
pub async fn run_suspend(args: SuspendArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
hv.suspend(handle).await.into_diagnostic()?;
println!("VM '{}' suspended", args.name);
Ok(())
}
#[derive(Args)]
pub struct ResumeArgs {
/// VM name
name: String,
}
pub async fn run_resume(args: ResumeArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
hv.resume(handle).await.into_diagnostic()?;
println!("VM '{}' resumed", args.name);
Ok(())
}

View file

@ -0,0 +1,39 @@
//! Persistent state for vmctl: maps VM name → VmHandle in a JSON file.
use std::collections::HashMap;
use std::path::PathBuf;
use miette::{IntoDiagnostic, Result};
use vm_manager::VmHandle;
/// State file location: `{XDG_DATA_HOME}/vmctl/vms.json`
fn state_path() -> PathBuf {
dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("/tmp"))
.join("vmctl")
.join("vms.json")
}
pub type Store = HashMap<String, VmHandle>;
/// Load the VM store from disk. Returns an empty map if the file doesn't exist.
pub async fn load_store() -> Result<Store> {
let path = state_path();
if !path.exists() {
return Ok(HashMap::new());
}
let data = tokio::fs::read_to_string(&path).await.into_diagnostic()?;
let store: Store = serde_json::from_str(&data).into_diagnostic()?;
Ok(store)
}
/// Save the VM store to disk.
pub async fn save_store(store: &Store) -> Result<()> {
let path = state_path();
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent).await.into_diagnostic()?;
}
let data = serde_json::to_string_pretty(store).into_diagnostic()?;
tokio::fs::write(&path, data).await.into_diagnostic()?;
Ok(())
}

View file

@ -0,0 +1,42 @@
use clap::Args;
use miette::{IntoDiagnostic, Result};
use vm_manager::{Hypervisor, RouterHypervisor};
use super::state;
#[derive(Args)]
pub struct StatusArgs {
/// VM name
name: String,
}
pub async fn run(args: StatusArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
let state = hv.state(handle).await.into_diagnostic()?;
println!("Name: {}", handle.name);
println!("ID: {}", handle.id);
println!("Backend: {}", handle.backend);
println!("State: {}", state);
println!("WorkDir: {}", handle.work_dir.display());
if let Some(ref overlay) = handle.overlay_path {
println!("Overlay: {}", overlay.display());
}
if let Some(ref seed) = handle.seed_iso_path {
println!("Seed: {}", seed.display());
}
if let Some(pid) = handle.pid {
println!("PID: {}", pid);
}
if let Some(ref vnc) = handle.vnc_addr {
println!("VNC: {}", vnc);
}
Ok(())
}

View file

@ -0,0 +1,31 @@
use std::time::Duration;
use clap::Args;
use miette::{IntoDiagnostic, Result};
use vm_manager::{Hypervisor, RouterHypervisor};
use super::state;
#[derive(Args)]
pub struct StopArgs {
/// VM name
name: String,
/// Graceful shutdown timeout in seconds
#[arg(long, default_value = "30")]
timeout: u64,
}
pub async fn run(args: StopArgs) -> Result<()> {
let store = state::load_store().await?;
let handle = store
.get(&args.name)
.ok_or_else(|| miette::miette!("VM '{}' not found", args.name))?;
let hv = RouterHypervisor::new(None, None);
hv.stop(handle, Duration::from_secs(args.timeout))
.await
.into_diagnostic()?;
println!("VM '{}' stopped", args.name);
Ok(())
}

19
crates/vmctl/src/main.rs Normal file
View file

@ -0,0 +1,19 @@
use clap::Parser;
use miette::Result;
use tracing_subscriber::EnvFilter;
mod commands;
use commands::Cli;
#[tokio::main]
async fn main() -> Result<()> {
// Initialize tracing with RUST_LOG env filter (default: info)
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
)
.init();
let cli = Cli::parse();
cli.run().await
}