Add vm-manager adapter layer to orchestrator

- Add vm-manager as dependency of orchestrator
- Create vm_adapter.rs that bridges orchestrator's Hypervisor trait
  to vm-manager's RouterHypervisor (QEMU/Propolis/Noop backends)
- Add Qemu and Propolis variants to BackendTag
- Add console_socket, ssh_host_port, mac_addr fields to VmHandle
- Adapter uses user-mode networking by default for containerization
- Maps orchestrator VmSpec + JobContext → vm-manager VmSpec with
  CloudInitConfig and SshConfig
This commit is contained in:
Till Wegmueller 2026-04-07 15:46:20 +02:00
parent ceaac25a7e
commit a60053f030
5 changed files with 244 additions and 1 deletions

View file

@ -10,6 +10,7 @@ libvirt = []
[dependencies]
common = { path = "../common" }
vm-manager = { path = "../../../vm-manager/crates/vm-manager" }
clap = { version = "4", features = ["derive", "env"] }
miette = { version = "7", features = ["fancy"] }
thiserror = "1"

View file

@ -8,6 +8,8 @@ use tracing::info;
#[derive(Debug, Clone, Copy)]
pub enum BackendTag {
Noop,
Qemu,
Propolis,
#[cfg(all(target_os = "linux", feature = "libvirt"))]
Libvirt,
#[cfg(target_os = "illumos")]
@ -50,6 +52,12 @@ pub struct VmHandle {
pub work_dir: PathBuf,
pub overlay_path: Option<PathBuf>,
pub seed_iso_path: Option<PathBuf>,
/// Console socket path (QEMU Unix socket for serial console).
pub console_socket: Option<PathBuf>,
/// Forwarded SSH port on localhost (user-mode networking).
pub ssh_host_port: Option<u16>,
/// MAC address of the VM's network interface.
pub mac_addr: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -255,6 +263,9 @@ impl Hypervisor for NoopHypervisor {
work_dir,
overlay_path: None,
seed_iso_path: None,
console_socket: None,
ssh_host_port: None,
mac_addr: None,
})
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
@ -494,6 +505,9 @@ ssh_authorized_keys:
work_dir,
overlay_path: Some(overlay),
seed_iso_path: seed_iso,
console_socket: None,
ssh_host_port: None,
mac_addr: None,
})
}
@ -694,6 +708,9 @@ impl Hypervisor for ZonesHypervisor {
work_dir,
overlay_path: Some(raw_path),
seed_iso_path: None,
console_socket: None,
ssh_host_port: None,
mac_addr: None,
})
}
async fn start(&self, _vm: &VmHandle) -> Result<()> {

View file

@ -4,6 +4,7 @@ mod http;
mod hypervisor;
mod persist;
mod scheduler;
mod vm_adapter;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;

View file

@ -177,7 +177,7 @@ impl<H: Hypervisor + 'static> Scheduler<H> {
// persist prepared VM state
let overlay = h.overlay_path.as_ref().and_then(|p| p.to_str());
let seed = h.seed_iso_path.as_ref().and_then(|p| p.to_str());
let backend = match h.backend { BackendTag::Noop => Some("noop"), #[cfg(all(target_os = "linux", feature = "libvirt"))] BackendTag::Libvirt => Some("libvirt"), #[cfg(target_os = "illumos")] BackendTag::Zones => Some("zones") };
let backend = match h.backend { BackendTag::Noop => Some("noop"), BackendTag::Qemu => Some("qemu"), BackendTag::Propolis => Some("propolis"), #[cfg(all(target_os = "linux", feature = "libvirt"))] BackendTag::Libvirt => Some("libvirt"), #[cfg(target_os = "illumos")] BackendTag::Zones => Some("zones") };
if let Err(e) = persist.record_vm_event(item.ctx.request_id, &h.id, overlay, seed, backend, VmPersistState::Prepared).await {
warn!(error = %e, request_id = %item.ctx.request_id, domain = %h.id, "persist prepare failed");
}
@ -912,6 +912,9 @@ mod tests {
work_dir: PathBuf::from("/tmp"),
overlay_path: None,
seed_iso_path: None,
console_socket: None,
ssh_host_port: None,
mac_addr: None,
})
}
async fn start(&self, _vm: &VmHandle) -> miette::Result<()> {

View file

@ -0,0 +1,221 @@
//! Adapter bridging the orchestrator's [`Hypervisor`] trait to the
//! [`vm_manager`] library's QEMU/Propolis/Noop backends.
//!
//! The orchestrator keeps its own `VmSpec`, `JobContext`, and `VmHandle` types.
//! This module translates between those and vm-manager's equivalents.
use std::path::PathBuf;
use std::time::Duration;
use async_trait::async_trait;
use miette::{IntoDiagnostic as _, Result};
use tracing::info;
use crate::hypervisor::{BackendTag, Hypervisor, JobContext, VmHandle, VmSpec, VmState};
/// Adapter wrapping [`vm_manager::RouterHypervisor`] to implement the
/// orchestrator's [`Hypervisor`] trait.
pub struct VmManagerAdapter {
inner: vm_manager::RouterHypervisor,
}
impl VmManagerAdapter {
/// Build a new adapter. Uses user-mode networking (SLIRP) for
/// containerization-friendly operation — no libvirt or bridge needed.
/// Build a new adapter.
///
/// `bridge` — optional bridge name for TAP networking (e.g. `"virbr0"`).
/// If `None`, uses user-mode (SLIRP) networking with SSH port forwarding.
pub fn build(bridge: Option<String>) -> Self {
Self {
inner: vm_manager::RouterHypervisor::new(bridge, None),
}
}
}
#[async_trait]
impl Hypervisor for VmManagerAdapter {
async fn prepare(&self, spec: &VmSpec, ctx: &JobContext) -> Result<VmHandle> {
let vm_name = format!("job-{}", ctx.request_id);
// Build cloud-init config from orchestrator's user_data
let cloud_init = if spec.nocloud {
spec.user_data
.as_ref()
.map(|ud| vm_manager::CloudInitConfig {
user_data: ud.clone(),
instance_id: Some(vm_name.clone()),
hostname: Some(vm_name.clone()),
})
} else {
None
};
// Build SSH config from JobContext
let ssh = Some(vm_manager::SshConfig {
user: ctx.ssh_user.clone().unwrap_or_else(|| "sol".to_string()),
public_key: ctx.ssh_public_key.clone(),
private_key_path: ctx.ssh_private_key_path.as_ref().map(PathBuf::from),
private_key_pem: ctx.ssh_private_key_pem.clone(),
});
let vm_spec = vm_manager::VmSpec {
name: vm_name.clone(),
image_path: spec.image_path.clone(),
vcpus: spec.cpu,
memory_mb: spec.ram_mb as u64,
disk_gb: Some(spec.disk_gb),
network: vm_manager::NetworkConfig::User,
cloud_init,
ssh,
};
let handle = vm_manager::Hypervisor::prepare(&self.inner, &vm_spec)
.await
.into_diagnostic()?;
info!(
name = %vm_name,
overlay = ?handle.overlay_path,
seed = ?handle.seed_iso_path,
"vm-manager prepared"
);
Ok(to_orch_handle(&handle))
}
async fn start(&self, vm: &VmHandle) -> Result<()> {
let inner_handle = to_vm_handle(vm);
let updated = vm_manager::Hypervisor::start(&self.inner, &inner_handle)
.await
.into_diagnostic()?;
info!(
name = %updated.name,
pid = ?updated.pid,
ssh_port = ?updated.ssh_host_port,
"vm-manager started"
);
Ok(())
}
async fn stop(&self, vm: &VmHandle, timeout: Duration) -> Result<()> {
let inner_handle = to_vm_handle(vm);
let _ = vm_manager::Hypervisor::stop(&self.inner, &inner_handle, timeout)
.await
.into_diagnostic()?;
info!(name = %vm.id, "vm-manager stopped");
Ok(())
}
async fn suspend(&self, vm: &VmHandle) -> Result<()> {
let inner_handle = to_vm_handle(vm);
let _ = vm_manager::Hypervisor::suspend(&self.inner, &inner_handle)
.await
.into_diagnostic()?;
Ok(())
}
async fn destroy(&self, vm: VmHandle) -> Result<()> {
let inner_handle = to_vm_handle(&vm);
vm_manager::Hypervisor::destroy(&self.inner, inner_handle)
.await
.into_diagnostic()?;
info!(name = %vm.id, "vm-manager destroyed");
Ok(())
}
async fn state(&self, vm: &VmHandle) -> Result<VmState> {
let inner_handle = to_vm_handle(vm);
let state = vm_manager::Hypervisor::state(&self.inner, &inner_handle)
.await
.into_diagnostic()?;
Ok(match state {
vm_manager::VmState::Preparing | vm_manager::VmState::Prepared => VmState::Prepared,
vm_manager::VmState::Running => VmState::Running,
vm_manager::VmState::Stopped
| vm_manager::VmState::Failed
| vm_manager::VmState::Destroyed => VmState::Stopped,
})
}
}
impl VmManagerAdapter {
/// Discover the guest IP address. For user-mode networking this returns
/// `127.0.0.1`. The orchestrator should use `ssh_host_port()` to get the
/// forwarded port for SSH connections.
pub async fn guest_ip(&self, vm: &VmHandle) -> Result<String> {
let inner_handle = to_vm_handle(vm);
vm_manager::Hypervisor::guest_ip(&self.inner, &inner_handle)
.await
.into_diagnostic()
}
/// Get the forwarded SSH port for user-mode networking.
pub fn ssh_host_port(&self, vm: &VmHandle) -> u16 {
let inner_handle = to_vm_handle(vm);
inner_handle.ssh_host_port.unwrap_or(22)
}
/// Get the console endpoint for this VM (Unix socket path).
pub fn console_endpoint(&self, vm: &VmHandle) -> Result<vm_manager::ConsoleEndpoint> {
let inner_handle = to_vm_handle(vm);
vm_manager::Hypervisor::console_endpoint(&self.inner, &inner_handle).into_diagnostic()
}
}
// ---------------------------------------------------------------------------
// Type conversion helpers
// ---------------------------------------------------------------------------
/// Convert vm-manager's VmHandle to orchestrator's VmHandle.
fn to_orch_handle(h: &vm_manager::VmHandle) -> VmHandle {
let backend = match h.backend {
vm_manager::BackendTag::Qemu => BackendTag::Qemu,
vm_manager::BackendTag::Propolis => BackendTag::Propolis,
vm_manager::BackendTag::Noop => BackendTag::Noop,
};
VmHandle {
id: h.name.clone(),
backend,
work_dir: h.work_dir.clone(),
overlay_path: h.overlay_path.clone(),
seed_iso_path: h.seed_iso_path.clone(),
console_socket: h.console_socket.clone(),
ssh_host_port: h.ssh_host_port,
mac_addr: h.mac_addr.clone(),
}
}
/// Convert orchestrator's VmHandle to vm-manager's VmHandle for method calls.
fn to_vm_handle(h: &VmHandle) -> vm_manager::VmHandle {
let backend = match h.backend {
BackendTag::Qemu => vm_manager::BackendTag::Qemu,
BackendTag::Propolis => vm_manager::BackendTag::Propolis,
BackendTag::Noop => vm_manager::BackendTag::Noop,
};
vm_manager::VmHandle {
id: h.id.clone(),
name: h.id.clone(),
backend,
work_dir: h.work_dir.clone(),
overlay_path: h.overlay_path.clone(),
seed_iso_path: h.seed_iso_path.clone(),
pid: None,
qmp_socket: Some(h.work_dir.join("qmp.sock")),
console_socket: h.console_socket.clone(),
vnc_addr: None,
vcpus: 0,
memory_mb: 0,
disk_gb: None,
network: vm_manager::NetworkConfig::User,
ssh_host_port: h.ssh_host_port,
mac_addr: h.mac_addr.clone(),
}
}