mirror of
https://github.com/CloudNebulaProject/reddwarf.git
synced 2026-04-10 05:10:40 +00:00
Implement first 3 phases of implementation plan
Signed-off-by: Till Wegmueller <toasterson@gmail.com>
This commit is contained in:
parent
654e59c7ff
commit
3a03400c1f
29 changed files with 5569 additions and 0 deletions
2111
Cargo.lock
generated
Normal file
2111
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
80
Cargo.toml
Normal file
80
Cargo.toml
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/reddwarf-core",
|
||||
"crates/reddwarf-storage",
|
||||
"crates/reddwarf-versioning",
|
||||
"crates/reddwarf-apiserver",
|
||||
"crates/reddwarf-scheduler",
|
||||
"crates/reddwarf",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Reddwarf Contributors"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/yourusername/reddwarf"
|
||||
rust-version = "1.84"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Core dependencies
|
||||
reddwarf-core = { path = "crates/reddwarf-core" }
|
||||
reddwarf-storage = { path = "crates/reddwarf-storage" }
|
||||
reddwarf-versioning = { path = "crates/reddwarf-versioning" }
|
||||
reddwarf-apiserver = { path = "crates/reddwarf-apiserver" }
|
||||
reddwarf-scheduler = { path = "crates/reddwarf-scheduler" }
|
||||
|
||||
# Kubernetes types
|
||||
k8s-openapi = { version = "0.23", features = ["v1_31"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_yaml = "0.9"
|
||||
|
||||
# Error handling
|
||||
thiserror = "2.0"
|
||||
miette = { version = "7.0", features = ["fancy"] }
|
||||
anyhow = "1.0"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
|
||||
# Web framework
|
||||
axum = { version = "0.8", features = ["ws", "macros"] }
|
||||
tower = { version = "0.5", features = ["full"] }
|
||||
tower-http = { version = "0.6", features = ["cors", "compression-gzip", "trace"] }
|
||||
hyper = { version = "1.0", features = ["full"] }
|
||||
|
||||
# Storage
|
||||
redb = "2.1"
|
||||
|
||||
# Versioning
|
||||
jj-lib = "0.23"
|
||||
|
||||
# Utilities
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "1.0", features = ["v4", "serde"] }
|
||||
bytes = "1.0"
|
||||
|
||||
# Logging and tracing
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
|
||||
# CLI
|
||||
clap = { version = "4.5", features = ["derive", "env"] }
|
||||
|
||||
# TLS
|
||||
rcgen = "0.13"
|
||||
rustls = "0.23"
|
||||
rustls-pemfile = "2.0"
|
||||
|
||||
# Testing
|
||||
tempfile = "3.0"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
||||
253
DEVELOPMENT.md
Normal file
253
DEVELOPMENT.md
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
# Reddwarf Development Guide
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Rust 1.84 or later
|
||||
- Docker (for multi-arch builds)
|
||||
- kubectl (for testing)
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Building
|
||||
|
||||
```bash
|
||||
# Build all crates
|
||||
cargo build --workspace
|
||||
|
||||
# Build specific crate
|
||||
cargo build -p reddwarf-core
|
||||
|
||||
# Build release
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run tests for specific crate
|
||||
cargo test -p reddwarf-storage
|
||||
|
||||
# Run with output
|
||||
cargo test --workspace -- --nocapture
|
||||
|
||||
# Run specific test
|
||||
cargo test test_resource_key
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Run clippy (linting)
|
||||
cargo clippy --workspace -- -D warnings
|
||||
|
||||
# Format code
|
||||
cargo fmt --all
|
||||
|
||||
# Check formatting
|
||||
cargo fmt --all -- --check
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Core Crates
|
||||
|
||||
#### reddwarf-core
|
||||
Foundation types and traits:
|
||||
- `error.rs` - Error types with miette diagnostics
|
||||
- `types.rs` - ResourceKey, GroupVersionKind, ResourceVersion
|
||||
- `resources/mod.rs` - Resource trait and implementations
|
||||
- `lib.rs` - Public API and serialization helpers
|
||||
|
||||
#### reddwarf-storage
|
||||
Storage abstraction and redb backend:
|
||||
- `error.rs` - Storage error types
|
||||
- `kv.rs` - KVStore and Transaction traits
|
||||
- `encoding.rs` - Key encoding and indexing
|
||||
- `redb_backend.rs` - redb implementation
|
||||
- `lib.rs` - Public API
|
||||
|
||||
#### reddwarf-versioning
|
||||
DAG-based versioning:
|
||||
- `error.rs` - Versioning error types
|
||||
- `commit.rs` - Commit and Change types
|
||||
- `conflict.rs` - Conflict detection
|
||||
- `store.rs` - VersionStore implementation
|
||||
- `lib.rs` - Public API
|
||||
|
||||
## Design Principles
|
||||
|
||||
### Error Handling
|
||||
- Use `miette` for all user-facing errors
|
||||
- Include helpful diagnostic messages
|
||||
- Suggest fixes in error messages
|
||||
- Example:
|
||||
```rust
|
||||
#[error("Pod validation failed: {details}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::validation_failed),
|
||||
help("Ensure the pod spec has at least one container")
|
||||
)]
|
||||
ValidationFailed { details: String }
|
||||
```
|
||||
|
||||
### Type Safety
|
||||
- Leverage Rust's type system
|
||||
- Use `k8s-openapi` types where possible
|
||||
- Implement custom traits for extensions
|
||||
- Avoid stringly-typed APIs
|
||||
|
||||
### Testing
|
||||
- Unit tests in each module
|
||||
- Integration tests in `tests/` directory
|
||||
- Test edge cases and error conditions
|
||||
- Use `tempfile` for temporary test data
|
||||
|
||||
### Documentation
|
||||
- Document all public APIs
|
||||
- Include examples in doc comments
|
||||
- Keep README.md up to date
|
||||
- Document architectural decisions
|
||||
|
||||
## Coding Standards
|
||||
|
||||
### Naming Conventions
|
||||
- `snake_case` for functions and variables
|
||||
- `PascalCase` for types and traits
|
||||
- `SCREAMING_SNAKE_CASE` for constants
|
||||
- Descriptive names over abbreviations
|
||||
|
||||
### Module Organization
|
||||
- One major type per file
|
||||
- Group related functionality
|
||||
- Keep files under 500 lines
|
||||
- Use sub-modules for complex features
|
||||
|
||||
### Dependencies
|
||||
- Minimize dependencies
|
||||
- Prefer pure Rust crates
|
||||
- Pin versions for stability
|
||||
- Document dependency rationale
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Unit Tests
|
||||
- Test each function independently
|
||||
- Mock external dependencies
|
||||
- Cover happy path and error cases
|
||||
- Keep tests fast (<100ms)
|
||||
|
||||
### Integration Tests
|
||||
- Test component interactions
|
||||
- Use real storage backend
|
||||
- Test end-to-end workflows
|
||||
- Can be slower but thorough
|
||||
|
||||
### Test Organization
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basic_functionality() {
|
||||
// Arrange
|
||||
let input = setup_test_data();
|
||||
|
||||
// Act
|
||||
let result = function_under_test(input);
|
||||
|
||||
// Assert
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
### Enable Logging
|
||||
```bash
|
||||
# Debug level
|
||||
RUST_LOG=debug cargo test
|
||||
|
||||
# Trace level
|
||||
RUST_LOG=trace cargo run
|
||||
|
||||
# Specific module
|
||||
RUST_LOG=reddwarf_storage=debug cargo test
|
||||
```
|
||||
|
||||
### Using redb Inspector
|
||||
```bash
|
||||
# Install redb-inspector
|
||||
cargo install redb-inspector
|
||||
|
||||
# Inspect database
|
||||
redb-inspector /path/to/database.redb
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
### Profiling
|
||||
```bash
|
||||
# CPU profiling
|
||||
cargo flamegraph
|
||||
|
||||
# Memory profiling
|
||||
cargo valgrind
|
||||
```
|
||||
|
||||
### Benchmarking
|
||||
```bash
|
||||
# Run benchmarks
|
||||
cargo bench
|
||||
|
||||
# Compare with baseline
|
||||
cargo bench --bench storage_bench
|
||||
```
|
||||
|
||||
## Release Process
|
||||
|
||||
### Version Bumping
|
||||
1. Update version in `Cargo.toml`
|
||||
2. Update CHANGELOG.md
|
||||
3. Create git tag
|
||||
4. Build release artifacts
|
||||
|
||||
### Cross-Compilation
|
||||
```bash
|
||||
# Add target
|
||||
rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Build for target
|
||||
cargo build --release --target aarch64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### Compilation Errors
|
||||
- Ensure Rust toolchain is up to date: `rustup update`
|
||||
- Clear build cache: `cargo clean`
|
||||
- Check for incompatible dependencies: `cargo tree`
|
||||
|
||||
#### Test Failures
|
||||
- Run specific test: `cargo test test_name -- --exact`
|
||||
- Show test output: `cargo test -- --nocapture`
|
||||
- Run serially: `cargo test -- --test-threads=1`
|
||||
|
||||
#### Database Issues
|
||||
- Check file permissions
|
||||
- Ensure sufficient disk space
|
||||
- Use fresh database for tests
|
||||
|
||||
## Resources
|
||||
|
||||
- [Rust Book](https://doc.rust-lang.org/book/)
|
||||
- [k8s-openapi Docs](https://docs.rs/k8s-openapi/)
|
||||
- [redb Docs](https://docs.rs/redb/)
|
||||
- [miette Docs](https://docs.rs/miette/)
|
||||
- [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md)
|
||||
105
README.md
Normal file
105
README.md
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
# Reddwarf: Rust-Based Single-Binary Kubernetes Control Plane
|
||||
|
||||
A pure Rust implementation of a Kubernetes control plane with DAG-based resource versioning.
|
||||
|
||||
## Project Status
|
||||
|
||||
**Current Phase**: Phase 3 Complete (Versioning Layer) ✅
|
||||
|
||||
### Completed Phases
|
||||
|
||||
#### Phase 1: Foundation & Core Types ✅
|
||||
- ✅ Workspace structure created
|
||||
- ✅ Core Kubernetes types and traits (Pod, Node, Service, Namespace)
|
||||
- ✅ Error handling with miette diagnostics
|
||||
- ✅ ResourceKey and GroupVersionKind types
|
||||
- ✅ JSON/YAML serialization helpers
|
||||
- ✅ 9 tests passing
|
||||
|
||||
#### Phase 2: Storage Layer with redb ✅
|
||||
- ✅ KVStore trait abstraction
|
||||
- ✅ redb backend implementation (100% pure Rust)
|
||||
- ✅ Key encoding for resources
|
||||
- ✅ Transaction support
|
||||
- ✅ Prefix scanning and indexing
|
||||
- ✅ 9 tests passing
|
||||
|
||||
#### Phase 3: Versioning Layer ✅
|
||||
- ✅ VersionStore for DAG-based versioning
|
||||
- ✅ Commit operations (create, get, list)
|
||||
- ✅ Conflict detection between concurrent modifications
|
||||
- ✅ DAG traversal for history
|
||||
- ✅ Common ancestor finding
|
||||
- ✅ 7 tests passing
|
||||
|
||||
### Total: 25 tests passing ✅
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
reddwarf/
|
||||
├── crates/
|
||||
│ ├── reddwarf-core/ # ✅ Core K8s types & traits
|
||||
│ ├── reddwarf-storage/ # ✅ redb storage backend
|
||||
│ ├── reddwarf-versioning/ # ✅ DAG-based versioning
|
||||
│ ├── reddwarf-apiserver/ # 🔄 API server (pending)
|
||||
│ ├── reddwarf-scheduler/ # 🔄 Pod scheduler (pending)
|
||||
│ └── reddwarf/ # 🔄 Main binary (pending)
|
||||
└── tests/ # 🔄 Integration tests (pending)
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
# Build all crates
|
||||
cargo build --workspace
|
||||
|
||||
# Run all tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run clippy
|
||||
cargo clippy --workspace -- -D warnings
|
||||
|
||||
# Build release binary
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Next Phases
|
||||
|
||||
### Phase 4: API Server (Week 4-5)
|
||||
- Implement Axum-based REST API
|
||||
- HTTP verb handlers (GET/POST/PUT/PATCH/DELETE)
|
||||
- LIST with filtering and pagination
|
||||
- WATCH mechanism for streaming updates
|
||||
- Resource validation
|
||||
|
||||
### Phase 5: Basic Scheduler (Week 6)
|
||||
- Pod scheduling to nodes
|
||||
- Resource-based filtering
|
||||
- Simple scoring algorithm
|
||||
|
||||
### Phase 6: Main Binary Integration (Week 7)
|
||||
- Single binary combining all components
|
||||
- Configuration and CLI
|
||||
- TLS support
|
||||
- Graceful shutdown
|
||||
- Observability (logging, metrics)
|
||||
|
||||
### Phase 7: Testing & Documentation (Week 8)
|
||||
- Integration tests
|
||||
- End-to-end tests with kubectl
|
||||
- User documentation
|
||||
- API documentation
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Pure Rust**: 100% Rust implementation, no C++ dependencies
|
||||
- **Portable**: Supports x86_64, ARM64, illumos
|
||||
- **redb Storage**: Fast, ACID-compliant storage with MVCC
|
||||
- **DAG Versioning**: Advanced resource versioning with conflict detection
|
||||
- **Type-Safe**: Leverages Rust's type system for correctness
|
||||
- **Rich Errors**: miette diagnostics for user-friendly error messages
|
||||
|
||||
## License
|
||||
|
||||
MIT OR Apache-2.0
|
||||
27
crates/reddwarf-apiserver/Cargo.toml
Normal file
27
crates/reddwarf-apiserver/Cargo.toml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
[package]
|
||||
name = "reddwarf-apiserver"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
reddwarf-core = { workspace = true }
|
||||
reddwarf-storage = { workspace = true }
|
||||
reddwarf-versioning = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tower-http = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
1
crates/reddwarf-apiserver/src/lib.rs
Normal file
1
crates/reddwarf-apiserver/src/lib.rs
Normal file
|
|
@ -0,0 +1 @@
|
|||
// Placeholder for reddwarf-apiserver
|
||||
22
crates/reddwarf-core/Cargo.toml
Normal file
22
crates/reddwarf-core/Cargo.toml
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "reddwarf-core"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
k8s-openapi = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
271
crates/reddwarf-core/src/error.rs
Normal file
271
crates/reddwarf-core/src/error.rs
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
// Allow unused assignments for diagnostic fields - they're used by the macros
|
||||
#![allow(unused_assignments)]
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Core error type for Reddwarf operations
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum ReddwarfError {
|
||||
/// Resource not found
|
||||
#[error("Resource not found: {resource_key}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::resource_not_found),
|
||||
help("Verify the resource name, namespace, and API version are correct")
|
||||
)]
|
||||
ResourceNotFound {
|
||||
#[allow(unused)]
|
||||
resource_key: String,
|
||||
},
|
||||
|
||||
/// Resource already exists
|
||||
#[error("Resource already exists: {resource_key}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::resource_already_exists),
|
||||
help("Use PUT to update existing resources, or DELETE the resource first")
|
||||
)]
|
||||
ResourceAlreadyExists {
|
||||
#[allow(unused)]
|
||||
resource_key: String,
|
||||
},
|
||||
|
||||
/// Invalid resource
|
||||
#[error("Invalid resource: {reason}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::invalid_resource),
|
||||
help("{suggestion}")
|
||||
)]
|
||||
InvalidResource {
|
||||
#[allow(unused)]
|
||||
reason: String,
|
||||
#[allow(unused)]
|
||||
suggestion: String,
|
||||
},
|
||||
|
||||
/// Validation failed
|
||||
#[error("Validation failed for {resource_type}: {details}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::validation_failed),
|
||||
help("{help_text}")
|
||||
)]
|
||||
ValidationFailed {
|
||||
#[allow(unused)]
|
||||
resource_type: String,
|
||||
#[allow(unused)]
|
||||
details: String,
|
||||
#[allow(unused)]
|
||||
help_text: String,
|
||||
},
|
||||
|
||||
/// Conflict detected (concurrent modification)
|
||||
#[error("Conflict detected for resource {resource_key}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::conflict),
|
||||
help("This resource was modified concurrently. Resolve the conflict or retry with the latest resourceVersion")
|
||||
)]
|
||||
Conflict {
|
||||
#[allow(unused)]
|
||||
resource_key: String,
|
||||
#[allow(unused)]
|
||||
our_version: String,
|
||||
#[allow(unused)]
|
||||
their_version: String,
|
||||
#[allow(unused)]
|
||||
conflicts: Vec<String>,
|
||||
},
|
||||
|
||||
/// Storage error
|
||||
#[error("Storage error: {message}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::storage_error),
|
||||
help("Check storage backend logs and ensure the data directory is accessible")
|
||||
)]
|
||||
StorageError {
|
||||
#[allow(unused)]
|
||||
message: String,
|
||||
#[source]
|
||||
#[allow(unused)]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
},
|
||||
|
||||
/// Serialization error
|
||||
#[error("Serialization error: {message}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::serialization_error),
|
||||
help("Ensure the resource format is valid JSON or YAML")
|
||||
)]
|
||||
SerializationError {
|
||||
#[allow(unused)]
|
||||
message: String,
|
||||
#[source]
|
||||
#[allow(unused)]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
},
|
||||
|
||||
/// Internal error
|
||||
#[error("Internal error: {message}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::internal_error),
|
||||
help("This is likely a bug. Please report it with the full error details")
|
||||
)]
|
||||
InternalError {
|
||||
#[allow(unused)]
|
||||
message: String,
|
||||
},
|
||||
|
||||
/// Namespace not found
|
||||
#[error("Namespace not found: {namespace}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::namespace_not_found),
|
||||
help("Create the namespace first: kubectl create namespace {namespace}")
|
||||
)]
|
||||
NamespaceNotFound {
|
||||
#[allow(unused)]
|
||||
namespace: String,
|
||||
},
|
||||
|
||||
/// Invalid API version
|
||||
#[error("Invalid API version: {api_version}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::invalid_api_version),
|
||||
help("Use a valid Kubernetes API version like 'v1' or 'apps/v1'")
|
||||
)]
|
||||
InvalidApiVersion {
|
||||
#[allow(unused)]
|
||||
api_version: String,
|
||||
},
|
||||
|
||||
/// Invalid kind
|
||||
#[error("Unknown resource kind: {kind}")]
|
||||
#[diagnostic(
|
||||
code(reddwarf::invalid_kind),
|
||||
help("Supported kinds: Pod, Node, Service, Namespace, ReplicaSet, Deployment")
|
||||
)]
|
||||
InvalidKind {
|
||||
#[allow(unused)]
|
||||
kind: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Result type alias for Reddwarf operations
|
||||
pub type Result<T> = std::result::Result<T, ReddwarfError>;
|
||||
|
||||
impl ReddwarfError {
|
||||
/// Create a ResourceNotFound error
|
||||
pub fn resource_not_found(resource_key: impl Into<String>) -> Self {
|
||||
Self::ResourceNotFound {
|
||||
resource_key: resource_key.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a ResourceAlreadyExists error
|
||||
pub fn resource_already_exists(resource_key: impl Into<String>) -> Self {
|
||||
Self::ResourceAlreadyExists {
|
||||
resource_key: resource_key.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InvalidResource error
|
||||
pub fn invalid_resource(reason: impl Into<String>, suggestion: impl Into<String>) -> Self {
|
||||
Self::InvalidResource {
|
||||
reason: reason.into(),
|
||||
suggestion: suggestion.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a ValidationFailed error
|
||||
pub fn validation_failed(
|
||||
resource_type: impl Into<String>,
|
||||
details: impl Into<String>,
|
||||
help_text: impl Into<String>,
|
||||
) -> Self {
|
||||
Self::ValidationFailed {
|
||||
resource_type: resource_type.into(),
|
||||
details: details.into(),
|
||||
help_text: help_text.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a Conflict error
|
||||
pub fn conflict(
|
||||
resource_key: impl Into<String>,
|
||||
our_version: impl Into<String>,
|
||||
their_version: impl Into<String>,
|
||||
conflicts: Vec<String>,
|
||||
) -> Self {
|
||||
Self::Conflict {
|
||||
resource_key: resource_key.into(),
|
||||
our_version: our_version.into(),
|
||||
their_version: their_version.into(),
|
||||
conflicts,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a StorageError
|
||||
pub fn storage_error(
|
||||
message: impl Into<String>,
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
) -> Self {
|
||||
Self::StorageError {
|
||||
message: message.into(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a SerializationError
|
||||
pub fn serialization_error(
|
||||
message: impl Into<String>,
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
) -> Self {
|
||||
Self::SerializationError {
|
||||
message: message.into(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InternalError
|
||||
pub fn internal_error(message: impl Into<String>) -> Self {
|
||||
Self::InternalError {
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a NamespaceNotFound error
|
||||
pub fn namespace_not_found(namespace: impl Into<String>) -> Self {
|
||||
Self::NamespaceNotFound {
|
||||
namespace: namespace.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InvalidApiVersion error
|
||||
pub fn invalid_api_version(api_version: impl Into<String>) -> Self {
|
||||
Self::InvalidApiVersion {
|
||||
api_version: api_version.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InvalidKind error
|
||||
pub fn invalid_kind(kind: impl Into<String>) -> Self {
|
||||
Self::InvalidKind {
|
||||
kind: kind.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_creation() {
|
||||
let err = ReddwarfError::resource_not_found("test/pod/default/nginx");
|
||||
assert!(matches!(err, ReddwarfError::ResourceNotFound { .. }));
|
||||
|
||||
let err = ReddwarfError::validation_failed(
|
||||
"Pod",
|
||||
"Missing container spec",
|
||||
"Add at least one container to the pod spec",
|
||||
);
|
||||
assert!(matches!(err, ReddwarfError::ValidationFailed { .. }));
|
||||
}
|
||||
}
|
||||
100
crates/reddwarf-core/src/lib.rs
Normal file
100
crates/reddwarf-core/src/lib.rs
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
//! Reddwarf Core - Fundamental types and traits for the Reddwarf Kubernetes control plane
|
||||
//!
|
||||
//! This crate provides:
|
||||
//! - Core Kubernetes resource abstractions
|
||||
//! - Error types with miette diagnostics
|
||||
//! - Type-safe resource keys and identifiers
|
||||
//! - Serialization helpers
|
||||
|
||||
pub mod error;
|
||||
pub mod resources;
|
||||
pub mod types;
|
||||
|
||||
// Re-export commonly used types
|
||||
pub use error::{ReddwarfError, Result};
|
||||
pub use resources::{Resource, ResourceError, is_valid_name};
|
||||
pub use types::{GroupVersionKind, ResourceKey, ResourceVersion};
|
||||
|
||||
// Re-export k8s-openapi types for convenience
|
||||
pub use k8s_openapi;
|
||||
pub use k8s_openapi::api::core::v1::{Pod, Node, Service, Namespace};
|
||||
pub use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
|
||||
/// Serialize a resource to JSON
|
||||
pub fn to_json<T: serde::Serialize>(resource: &T) -> Result<String> {
|
||||
serde_json::to_string(resource).map_err(|e| {
|
||||
ReddwarfError::serialization_error(
|
||||
format!("Failed to serialize to JSON: {}", e),
|
||||
Some(Box::new(e)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Serialize a resource to pretty JSON
|
||||
pub fn to_json_pretty<T: serde::Serialize>(resource: &T) -> Result<String> {
|
||||
serde_json::to_string_pretty(resource).map_err(|e| {
|
||||
ReddwarfError::serialization_error(
|
||||
format!("Failed to serialize to JSON: {}", e),
|
||||
Some(Box::new(e)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Deserialize a resource from JSON
|
||||
pub fn from_json<T: for<'de> serde::Deserialize<'de>>(data: &str) -> Result<T> {
|
||||
serde_json::from_str(data).map_err(|e| {
|
||||
ReddwarfError::serialization_error(
|
||||
format!("Failed to deserialize from JSON: {}", e),
|
||||
Some(Box::new(e)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Serialize a resource to YAML
|
||||
pub fn to_yaml<T: serde::Serialize>(resource: &T) -> Result<String> {
|
||||
serde_yaml::to_string(resource).map_err(|e| {
|
||||
ReddwarfError::serialization_error(
|
||||
format!("Failed to serialize to YAML: {}", e),
|
||||
Some(Box::new(e)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Deserialize a resource from YAML
|
||||
pub fn from_yaml<T: for<'de> serde::Deserialize<'de>>(data: &str) -> Result<T> {
|
||||
serde_yaml::from_str(data).map_err(|e| {
|
||||
ReddwarfError::serialization_error(
|
||||
format!("Failed to deserialize from YAML: {}", e),
|
||||
Some(Box::new(e)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_json_serialization() {
|
||||
let mut pod = Pod::default();
|
||||
pod.metadata.name = Some("nginx".to_string());
|
||||
|
||||
let json = to_json(&pod).unwrap();
|
||||
assert!(json.contains("nginx"));
|
||||
|
||||
let deserialized: Pod = from_json(&json).unwrap();
|
||||
assert_eq!(deserialized.metadata.name, Some("nginx".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_yaml_serialization() {
|
||||
let mut pod = Pod::default();
|
||||
pod.metadata.name = Some("nginx".to_string());
|
||||
|
||||
let yaml = to_yaml(&pod).unwrap();
|
||||
assert!(yaml.contains("nginx"));
|
||||
|
||||
let deserialized: Pod = from_yaml(&yaml).unwrap();
|
||||
assert_eq!(deserialized.metadata.name, Some("nginx".to_string()));
|
||||
}
|
||||
}
|
||||
248
crates/reddwarf-core/src/resources/mod.rs
Normal file
248
crates/reddwarf-core/src/resources/mod.rs
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
use crate::{GroupVersionKind, ResourceKey, ResourceVersion};
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Base validation for all resources
|
||||
pub fn validate_base(metadata: &ObjectMeta) -> Result<(), ResourceError> {
|
||||
if metadata.name.is_none() {
|
||||
return Err(ResourceError::MissingField("metadata.name".to_string()));
|
||||
}
|
||||
|
||||
if let Some(name) = &metadata.name {
|
||||
if !is_valid_name(name) {
|
||||
return Err(ResourceError::InvalidName(name.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Trait for Kubernetes resources
|
||||
pub trait Resource: Serialize + for<'de> Deserialize<'de> + Send + Sync {
|
||||
/// Get the API version of this resource
|
||||
fn api_version(&self) -> String;
|
||||
|
||||
/// Get the kind of this resource
|
||||
fn kind(&self) -> String;
|
||||
|
||||
/// Get the metadata of this resource
|
||||
fn metadata(&self) -> &ObjectMeta;
|
||||
|
||||
/// Get mutable metadata
|
||||
fn metadata_mut(&mut self) -> &mut ObjectMeta;
|
||||
|
||||
/// Get the GroupVersionKind
|
||||
fn gvk(&self) -> GroupVersionKind {
|
||||
GroupVersionKind::from_api_version_kind(&self.api_version(), &self.kind())
|
||||
}
|
||||
|
||||
/// Get the ResourceKey
|
||||
fn resource_key(&self) -> Result<ResourceKey, ResourceError> {
|
||||
let metadata = self.metadata();
|
||||
let name = metadata.name.as_ref()
|
||||
.ok_or_else(|| ResourceError::MissingField("metadata.name".to_string()))?;
|
||||
let namespace = metadata.namespace.clone().unwrap_or_default();
|
||||
|
||||
Ok(ResourceKey::new(self.gvk(), namespace, name))
|
||||
}
|
||||
|
||||
/// Get the resource version
|
||||
fn resource_version(&self) -> Option<ResourceVersion> {
|
||||
self.metadata()
|
||||
.resource_version
|
||||
.as_ref()
|
||||
.map(ResourceVersion::new)
|
||||
}
|
||||
|
||||
/// Set the resource version
|
||||
fn set_resource_version(&mut self, version: ResourceVersion) {
|
||||
self.metadata_mut().resource_version = Some(version.0);
|
||||
}
|
||||
|
||||
/// Get the UID
|
||||
fn uid(&self) -> Option<String> {
|
||||
self.metadata().uid.clone()
|
||||
}
|
||||
|
||||
/// Set the UID
|
||||
fn set_uid(&mut self, uid: String) {
|
||||
self.metadata_mut().uid = Some(uid);
|
||||
}
|
||||
|
||||
/// Check if this is a namespaced resource
|
||||
fn is_namespaced(&self) -> bool {
|
||||
self.metadata().namespace.is_some()
|
||||
}
|
||||
|
||||
/// Validate the resource
|
||||
fn validate(&self) -> Result<(), ResourceError> {
|
||||
validate_base(self.metadata())
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource-related errors
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ResourceError {
|
||||
#[error("Missing required field: {0}")]
|
||||
MissingField(String),
|
||||
|
||||
#[error("Invalid resource name: {0}")]
|
||||
InvalidName(String),
|
||||
|
||||
#[error("Invalid namespace: {0}")]
|
||||
InvalidNamespace(String),
|
||||
|
||||
#[error("Validation failed: {0}")]
|
||||
ValidationFailed(String),
|
||||
}
|
||||
|
||||
/// Validate a Kubernetes resource name (DNS-1123 subdomain)
|
||||
pub fn is_valid_name(name: &str) -> bool {
|
||||
if name.is_empty() || name.len() > 253 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Must contain only lowercase alphanumeric, '-', or '.'
|
||||
// Must start and end with alphanumeric
|
||||
let chars: Vec<char> = name.chars().collect();
|
||||
|
||||
if !chars[0].is_ascii_lowercase() && !chars[0].is_ascii_digit() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !chars[chars.len() - 1].is_ascii_lowercase() && !chars[chars.len() - 1].is_ascii_digit() {
|
||||
return false;
|
||||
}
|
||||
|
||||
chars.iter().all(|c| {
|
||||
c.is_ascii_lowercase() || c.is_ascii_digit() || *c == '-' || *c == '.'
|
||||
})
|
||||
}
|
||||
|
||||
// Implement Resource trait for common k8s-openapi types
|
||||
use k8s_openapi::api::core::v1::{Pod, Node, Service, Namespace};
|
||||
|
||||
impl Resource for Pod {
|
||||
fn api_version(&self) -> String {
|
||||
"v1".to_string()
|
||||
}
|
||||
|
||||
fn kind(&self) -> String {
|
||||
"Pod".to_string()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ObjectMeta {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut ObjectMeta {
|
||||
&mut self.metadata
|
||||
}
|
||||
|
||||
fn validate(&self) -> Result<(), ResourceError> {
|
||||
// Call base validation
|
||||
validate_base(&self.metadata)?;
|
||||
|
||||
// Pod-specific validation
|
||||
if let Some(spec) = &self.spec {
|
||||
if spec.containers.is_empty() {
|
||||
return Err(ResourceError::ValidationFailed(
|
||||
"Pod must have at least one container".to_string()
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(ResourceError::MissingField("spec".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for Node {
|
||||
fn api_version(&self) -> String {
|
||||
"v1".to_string()
|
||||
}
|
||||
|
||||
fn kind(&self) -> String {
|
||||
"Node".to_string()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ObjectMeta {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut ObjectMeta {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for Service {
|
||||
fn api_version(&self) -> String {
|
||||
"v1".to_string()
|
||||
}
|
||||
|
||||
fn kind(&self) -> String {
|
||||
"Service".to_string()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ObjectMeta {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut ObjectMeta {
|
||||
&mut self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
impl Resource for Namespace {
|
||||
fn api_version(&self) -> String {
|
||||
"v1".to_string()
|
||||
}
|
||||
|
||||
fn kind(&self) -> String {
|
||||
"Namespace".to_string()
|
||||
}
|
||||
|
||||
fn metadata(&self) -> &ObjectMeta {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
fn metadata_mut(&mut self) -> &mut ObjectMeta {
|
||||
&mut self.metadata
|
||||
}
|
||||
|
||||
fn is_namespaced(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_name() {
|
||||
assert!(is_valid_name("nginx"));
|
||||
assert!(is_valid_name("my-app"));
|
||||
assert!(is_valid_name("my-app-123"));
|
||||
assert!(is_valid_name("my.app"));
|
||||
|
||||
assert!(!is_valid_name(""));
|
||||
assert!(!is_valid_name("MyApp")); // uppercase
|
||||
assert!(!is_valid_name("-myapp")); // starts with dash
|
||||
assert!(!is_valid_name("myapp-")); // ends with dash
|
||||
assert!(!is_valid_name("my_app")); // underscore
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pod_resource_key() {
|
||||
let mut pod = Pod::default();
|
||||
pod.metadata.name = Some("nginx".to_string());
|
||||
pod.metadata.namespace = Some("default".to_string());
|
||||
|
||||
let key = pod.resource_key().unwrap();
|
||||
assert_eq!(key.name, "nginx");
|
||||
assert_eq!(key.namespace, "default");
|
||||
assert_eq!(key.gvk.kind, "Pod");
|
||||
}
|
||||
}
|
||||
242
crates/reddwarf-core/src/types.rs
Normal file
242
crates/reddwarf-core/src/types.rs
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
/// GroupVersionKind uniquely identifies a Kubernetes resource type
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct GroupVersionKind {
|
||||
/// API group (e.g., "", "apps", "batch")
|
||||
pub group: String,
|
||||
/// API version (e.g., "v1", "v1beta1")
|
||||
pub version: String,
|
||||
/// Resource kind (e.g., "Pod", "Deployment")
|
||||
pub kind: String,
|
||||
}
|
||||
|
||||
impl GroupVersionKind {
|
||||
/// Create a new GVK
|
||||
pub fn new(group: impl Into<String>, version: impl Into<String>, kind: impl Into<String>) -> Self {
|
||||
Self {
|
||||
group: group.into(),
|
||||
version: version.into(),
|
||||
kind: kind.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a GVK from apiVersion and kind
|
||||
/// apiVersion format: "v1" or "group/version"
|
||||
pub fn from_api_version_kind(api_version: &str, kind: &str) -> Self {
|
||||
let (group, version) = if let Some(idx) = api_version.find('/') {
|
||||
let (g, v) = api_version.split_at(idx);
|
||||
(g.to_string(), v[1..].to_string())
|
||||
} else {
|
||||
(String::new(), api_version.to_string())
|
||||
};
|
||||
|
||||
Self {
|
||||
group,
|
||||
version,
|
||||
kind: kind.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the apiVersion string (group/version or just version)
|
||||
pub fn api_version(&self) -> String {
|
||||
if self.group.is_empty() {
|
||||
self.version.clone()
|
||||
} else {
|
||||
format!("{}/{}", self.group, self.version)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the full API path segment
|
||||
pub fn api_path(&self) -> String {
|
||||
if self.group.is_empty() {
|
||||
format!("api/{}", self.version)
|
||||
} else {
|
||||
format!("apis/{}/{}", self.group, self.version)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the resource name (lowercase, plural)
|
||||
pub fn resource_name(&self) -> String {
|
||||
// Simple pluralization - should be enhanced for production
|
||||
let lower = self.kind.to_lowercase();
|
||||
if lower.ends_with('s') {
|
||||
format!("{}es", lower)
|
||||
} else if lower.ends_with('y') {
|
||||
format!("{}ies", &lower[..lower.len() - 1])
|
||||
} else {
|
||||
format!("{}s", lower)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for GroupVersionKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}/{}", self.api_version(), self.kind)
|
||||
}
|
||||
}
|
||||
|
||||
/// ResourceKey uniquely identifies a specific resource instance
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct ResourceKey {
|
||||
/// GroupVersionKind of the resource
|
||||
pub gvk: GroupVersionKind,
|
||||
/// Namespace (empty for cluster-scoped resources)
|
||||
pub namespace: String,
|
||||
/// Resource name
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl ResourceKey {
|
||||
/// Create a new ResourceKey
|
||||
pub fn new(gvk: GroupVersionKind, namespace: impl Into<String>, name: impl Into<String>) -> Self {
|
||||
Self {
|
||||
gvk,
|
||||
namespace: namespace.into(),
|
||||
name: name.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a cluster-scoped ResourceKey
|
||||
pub fn cluster_scoped(gvk: GroupVersionKind, name: impl Into<String>) -> Self {
|
||||
Self {
|
||||
gvk,
|
||||
namespace: String::new(),
|
||||
name: name.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is a namespaced resource
|
||||
pub fn is_namespaced(&self) -> bool {
|
||||
!self.namespace.is_empty()
|
||||
}
|
||||
|
||||
/// Get the storage key encoding: {api_version}/{kind}/{namespace}/{name}
|
||||
/// For cluster-scoped: {api_version}/{kind}/{name}
|
||||
pub fn storage_key(&self) -> String {
|
||||
let api_version = self.gvk.api_version();
|
||||
if self.is_namespaced() {
|
||||
format!("{}/{}/{}/{}", api_version, self.gvk.kind, self.namespace, self.name)
|
||||
} else {
|
||||
format!("{}/{}/{}", api_version, self.gvk.kind, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the API path for this resource
|
||||
pub fn api_path(&self) -> String {
|
||||
let base = self.gvk.api_path();
|
||||
let resource = self.gvk.resource_name();
|
||||
|
||||
if self.is_namespaced() {
|
||||
format!("/{}/namespaces/{}/{}/{}", base, self.namespace, resource, self.name)
|
||||
} else {
|
||||
format!("/{}/{}/{}", base, resource, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the API path for the collection (without name)
|
||||
pub fn collection_path(&self) -> String {
|
||||
let base = self.gvk.api_path();
|
||||
let resource = self.gvk.resource_name();
|
||||
|
||||
if self.is_namespaced() {
|
||||
format!("/{}/namespaces/{}/{}", base, self.namespace, resource)
|
||||
} else {
|
||||
format!("/{}/{}", base, resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ResourceKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.is_namespaced() {
|
||||
write!(f, "{}/{}/{}", self.gvk, self.namespace, self.name)
|
||||
} else {
|
||||
write!(f, "{}/{}", self.gvk, self.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource version - maps to jj commit ID
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ResourceVersion(pub String);
|
||||
|
||||
impl ResourceVersion {
|
||||
pub fn new(version: impl Into<String>) -> Self {
|
||||
Self(version.into())
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ResourceVersion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for ResourceVersion {
|
||||
fn from(s: String) -> Self {
|
||||
Self(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ResourceVersion {
|
||||
fn from(s: &str) -> Self {
|
||||
Self(s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_gvk_from_api_version() {
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Pod");
|
||||
assert_eq!(gvk.group, "");
|
||||
assert_eq!(gvk.version, "v1");
|
||||
assert_eq!(gvk.kind, "Pod");
|
||||
assert_eq!(gvk.api_version(), "v1");
|
||||
|
||||
let gvk = GroupVersionKind::from_api_version_kind("apps/v1", "Deployment");
|
||||
assert_eq!(gvk.group, "apps");
|
||||
assert_eq!(gvk.version, "v1");
|
||||
assert_eq!(gvk.kind, "Deployment");
|
||||
assert_eq!(gvk.api_version(), "apps/v1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gvk_resource_name() {
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Pod");
|
||||
assert_eq!(gvk.resource_name(), "pods");
|
||||
|
||||
let gvk = GroupVersionKind::from_api_version_kind("apps/v1", "Deployment");
|
||||
assert_eq!(gvk.resource_name(), "deployments");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_key_storage_key() {
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Pod");
|
||||
let key = ResourceKey::new(gvk, "default", "nginx");
|
||||
assert_eq!(key.storage_key(), "v1/Pod/default/nginx");
|
||||
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Node");
|
||||
let key = ResourceKey::cluster_scoped(gvk, "node-1");
|
||||
assert_eq!(key.storage_key(), "v1/Node/node-1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_key_api_path() {
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Pod");
|
||||
let key = ResourceKey::new(gvk, "default", "nginx");
|
||||
assert_eq!(key.api_path(), "/api/v1/namespaces/default/pods/nginx");
|
||||
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Node");
|
||||
let key = ResourceKey::cluster_scoped(gvk, "node-1");
|
||||
assert_eq!(key.api_path(), "/api/v1/nodes/node-1");
|
||||
}
|
||||
}
|
||||
17
crates/reddwarf-scheduler/Cargo.toml
Normal file
17
crates/reddwarf-scheduler/Cargo.toml
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
[package]
|
||||
name = "reddwarf-scheduler"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
reddwarf-core = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
1
crates/reddwarf-scheduler/src/lib.rs
Normal file
1
crates/reddwarf-scheduler/src/lib.rs
Normal file
|
|
@ -0,0 +1 @@
|
|||
// Placeholder for reddwarf-scheduler
|
||||
22
crates/reddwarf-storage/Cargo.toml
Normal file
22
crates/reddwarf-storage/Cargo.toml
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "reddwarf-storage"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
reddwarf-core = { workspace = true }
|
||||
redb = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
223
crates/reddwarf-storage/src/encoding.rs
Normal file
223
crates/reddwarf-storage/src/encoding.rs
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
use reddwarf_core::ResourceKey;
|
||||
use std::fmt;
|
||||
|
||||
/// Key encoder for storage keys
|
||||
pub struct KeyEncoder;
|
||||
|
||||
impl KeyEncoder {
|
||||
/// Encode a resource key: {api_version}/{kind}/{namespace}/{name}
|
||||
/// For cluster-scoped: {api_version}/{kind}/{name}
|
||||
pub fn encode_resource_key(key: &ResourceKey) -> String {
|
||||
key.storage_key()
|
||||
}
|
||||
|
||||
/// Encode a prefix for scanning resources of a kind in a namespace
|
||||
pub fn encode_prefix(api_version: &str, kind: &str, namespace: Option<&str>) -> String {
|
||||
if let Some(ns) = namespace {
|
||||
format!("{}/{}/{}/", api_version, kind, ns)
|
||||
} else {
|
||||
format!("{}/{}/", api_version, kind)
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode a namespace prefix for scanning all resources in a namespace
|
||||
pub fn encode_namespace_prefix(namespace: &str) -> String {
|
||||
// This will match any resource with this namespace
|
||||
// We'll need to filter by namespace during scan
|
||||
namespace.to_string()
|
||||
}
|
||||
|
||||
/// Parse a storage key back to components
|
||||
pub fn parse_key(key: &str) -> Option<(String, String, Option<String>, String)> {
|
||||
let parts: Vec<&str> = key.split('/').collect();
|
||||
|
||||
match parts.len() {
|
||||
3 => {
|
||||
// Cluster-scoped: api_version/kind/name
|
||||
Some((
|
||||
parts[0].to_string(),
|
||||
parts[1].to_string(),
|
||||
None,
|
||||
parts[2].to_string(),
|
||||
))
|
||||
}
|
||||
4 => {
|
||||
// Namespaced: api_version/kind/namespace/name
|
||||
Some((
|
||||
parts[0].to_string(),
|
||||
parts[1].to_string(),
|
||||
Some(parts[2].to_string()),
|
||||
parts[3].to_string(),
|
||||
))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Index key types for secondary indices
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum IndexKey {
|
||||
/// Index by namespace: namespace/{namespace}/{api_version}/{kind}/{name}
|
||||
Namespace {
|
||||
namespace: String,
|
||||
api_version: String,
|
||||
kind: String,
|
||||
name: String,
|
||||
},
|
||||
/// Index by label: label/{key}/{value}/{api_version}/{kind}/{namespace}/{name}
|
||||
Label {
|
||||
key: String,
|
||||
value: String,
|
||||
api_version: String,
|
||||
kind: String,
|
||||
namespace: Option<String>,
|
||||
name: String,
|
||||
},
|
||||
/// Index by field: field/{field_path}/{value}/{api_version}/{kind}/{namespace}/{name}
|
||||
Field {
|
||||
field_path: String,
|
||||
value: String,
|
||||
api_version: String,
|
||||
kind: String,
|
||||
namespace: Option<String>,
|
||||
name: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl IndexKey {
|
||||
/// Encode the index key to a string
|
||||
pub fn encode(&self) -> String {
|
||||
match self {
|
||||
IndexKey::Namespace {
|
||||
namespace,
|
||||
api_version,
|
||||
kind,
|
||||
name,
|
||||
} => format!("namespace/{}/{}/{}/{}", namespace, api_version, kind, name),
|
||||
IndexKey::Label {
|
||||
key,
|
||||
value,
|
||||
api_version,
|
||||
kind,
|
||||
namespace,
|
||||
name,
|
||||
} => {
|
||||
if let Some(ns) = namespace {
|
||||
format!("label/{}/{}/{}/{}/{}/{}", key, value, api_version, kind, ns, name)
|
||||
} else {
|
||||
format!("label/{}/{}/{}/{}/{}", key, value, api_version, kind, name)
|
||||
}
|
||||
}
|
||||
IndexKey::Field {
|
||||
field_path,
|
||||
value,
|
||||
api_version,
|
||||
kind,
|
||||
namespace,
|
||||
name,
|
||||
} => {
|
||||
if let Some(ns) = namespace {
|
||||
format!("field/{}/{}/{}/{}/{}/{}", field_path, value, api_version, kind, ns, name)
|
||||
} else {
|
||||
format!("field/{}/{}/{}/{}/{}", field_path, value, api_version, kind, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode a prefix for scanning
|
||||
pub fn encode_prefix_for_namespace(namespace: &str) -> String {
|
||||
format!("namespace/{}/", namespace)
|
||||
}
|
||||
|
||||
/// Encode a prefix for scanning by label
|
||||
pub fn encode_prefix_for_label(key: &str, value: Option<&str>) -> String {
|
||||
if let Some(v) = value {
|
||||
format!("label/{}/{}/", key, v)
|
||||
} else {
|
||||
format!("label/{}/", key)
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode a prefix for scanning by field
|
||||
pub fn encode_prefix_for_field(field_path: &str, value: Option<&str>) -> String {
|
||||
if let Some(v) = value {
|
||||
format!("field/{}/{}/", field_path, v)
|
||||
} else {
|
||||
format!("field/{}/", field_path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IndexKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.encode())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use reddwarf_core::GroupVersionKind;
|
||||
|
||||
#[test]
|
||||
fn test_encode_resource_key() {
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Pod");
|
||||
let key = ResourceKey::new(gvk, "default", "nginx");
|
||||
assert_eq!(KeyEncoder::encode_resource_key(&key), "v1/Pod/default/nginx");
|
||||
|
||||
let gvk = GroupVersionKind::from_api_version_kind("v1", "Node");
|
||||
let key = ResourceKey::cluster_scoped(gvk, "node-1");
|
||||
assert_eq!(KeyEncoder::encode_resource_key(&key), "v1/Node/node-1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_prefix() {
|
||||
assert_eq!(
|
||||
KeyEncoder::encode_prefix("v1", "Pod", Some("default")),
|
||||
"v1/Pod/default/"
|
||||
);
|
||||
assert_eq!(KeyEncoder::encode_prefix("v1", "Node", None), "v1/Node/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_key() {
|
||||
let (api_version, kind, namespace, name) =
|
||||
KeyEncoder::parse_key("v1/Pod/default/nginx").unwrap();
|
||||
assert_eq!(api_version, "v1");
|
||||
assert_eq!(kind, "Pod");
|
||||
assert_eq!(namespace, Some("default".to_string()));
|
||||
assert_eq!(name, "nginx");
|
||||
|
||||
let (api_version, kind, namespace, name) = KeyEncoder::parse_key("v1/Node/node-1").unwrap();
|
||||
assert_eq!(api_version, "v1");
|
||||
assert_eq!(kind, "Node");
|
||||
assert_eq!(namespace, None);
|
||||
assert_eq!(name, "node-1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_key_namespace() {
|
||||
let key = IndexKey::Namespace {
|
||||
namespace: "default".to_string(),
|
||||
api_version: "v1".to_string(),
|
||||
kind: "Pod".to_string(),
|
||||
name: "nginx".to_string(),
|
||||
};
|
||||
assert_eq!(key.encode(), "namespace/default/v1/Pod/nginx");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_key_label() {
|
||||
let key = IndexKey::Label {
|
||||
key: "app".to_string(),
|
||||
value: "nginx".to_string(),
|
||||
api_version: "v1".to_string(),
|
||||
kind: "Pod".to_string(),
|
||||
namespace: Some("default".to_string()),
|
||||
name: "nginx-pod".to_string(),
|
||||
};
|
||||
assert_eq!(key.encode(), "label/app/nginx/v1/Pod/default/nginx-pod");
|
||||
}
|
||||
}
|
||||
162
crates/reddwarf-storage/src/error.rs
Normal file
162
crates/reddwarf-storage/src/error.rs
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
// Allow unused assignments for diagnostic fields - they're used by the macros
|
||||
#![allow(unused_assignments)]
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Storage error type
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum StorageError {
|
||||
/// Key not found
|
||||
#[error("Key not found: {key}")]
|
||||
#[diagnostic(
|
||||
code(storage::key_not_found),
|
||||
help("Verify the key exists in the database")
|
||||
)]
|
||||
KeyNotFound {
|
||||
key: String,
|
||||
},
|
||||
|
||||
/// Database error
|
||||
#[error("Database error: {message}")]
|
||||
#[diagnostic(
|
||||
code(storage::database_error),
|
||||
help("Check database logs and ensure the data directory is accessible and not corrupted")
|
||||
)]
|
||||
DatabaseError {
|
||||
message: String,
|
||||
#[source]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
},
|
||||
|
||||
/// Transaction error
|
||||
#[error("Transaction error: {message}")]
|
||||
#[diagnostic(
|
||||
code(storage::transaction_error),
|
||||
help("Ensure the transaction is not already committed or aborted")
|
||||
)]
|
||||
TransactionError {
|
||||
message: String,
|
||||
},
|
||||
|
||||
/// Serialization error
|
||||
#[error("Serialization error: {message}")]
|
||||
#[diagnostic(
|
||||
code(storage::serialization_error),
|
||||
help("Ensure the data is valid and can be serialized")
|
||||
)]
|
||||
SerializationError {
|
||||
message: String,
|
||||
#[source]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
},
|
||||
|
||||
/// I/O error
|
||||
#[error("I/O error: {message}")]
|
||||
#[diagnostic(
|
||||
code(storage::io_error),
|
||||
help("Check filesystem permissions and available disk space")
|
||||
)]
|
||||
IoError {
|
||||
message: String,
|
||||
#[source]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Result type for storage operations
|
||||
pub type Result<T> = std::result::Result<T, StorageError>;
|
||||
|
||||
impl StorageError {
|
||||
/// Create a KeyNotFound error
|
||||
pub fn key_not_found(key: impl Into<String>) -> Self {
|
||||
Self::KeyNotFound { key: key.into() }
|
||||
}
|
||||
|
||||
/// Create a DatabaseError
|
||||
pub fn database_error(
|
||||
message: impl Into<String>,
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
) -> Self {
|
||||
Self::DatabaseError {
|
||||
message: message.into(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a TransactionError
|
||||
pub fn transaction_error(message: impl Into<String>) -> Self {
|
||||
Self::TransactionError {
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a SerializationError
|
||||
pub fn serialization_error(
|
||||
message: impl Into<String>,
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
) -> Self {
|
||||
Self::SerializationError {
|
||||
message: message.into(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an IoError
|
||||
pub fn io_error(
|
||||
message: impl Into<String>,
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
) -> Self {
|
||||
Self::IoError {
|
||||
message: message.into(),
|
||||
source,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<redb::Error> for StorageError {
|
||||
fn from(err: redb::Error) -> Self {
|
||||
match err {
|
||||
redb::Error::TableDoesNotExist(_) => {
|
||||
StorageError::database_error("Table does not exist", Some(Box::new(err)))
|
||||
}
|
||||
_ => StorageError::database_error(format!("redb error: {}", err), Some(Box::new(err))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<redb::TransactionError> for StorageError {
|
||||
fn from(err: redb::TransactionError) -> Self {
|
||||
StorageError::transaction_error(format!("Transaction error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<redb::StorageError> for StorageError {
|
||||
fn from(err: redb::StorageError) -> Self {
|
||||
StorageError::database_error(format!("Storage error: {}", err), Some(Box::new(err)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<redb::TableError> for StorageError {
|
||||
fn from(err: redb::TableError) -> Self {
|
||||
StorageError::database_error(format!("Table error: {}", err), Some(Box::new(err)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<redb::CommitError> for StorageError {
|
||||
fn from(err: redb::CommitError) -> Self {
|
||||
StorageError::transaction_error(format!("Commit error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for StorageError {
|
||||
fn from(err: serde_json::Error) -> Self {
|
||||
StorageError::serialization_error(format!("JSON error: {}", err), Some(Box::new(err)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for StorageError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
StorageError::io_error(format!("I/O error: {}", err), Some(Box::new(err)))
|
||||
}
|
||||
}
|
||||
50
crates/reddwarf-storage/src/kv.rs
Normal file
50
crates/reddwarf-storage/src/kv.rs
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
use crate::Result;
|
||||
use bytes::Bytes;
|
||||
|
||||
/// Key-value store trait
|
||||
pub trait KVStore: Send + Sync {
|
||||
/// Get a value by key
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Bytes>>;
|
||||
|
||||
/// Put a key-value pair
|
||||
fn put(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
|
||||
/// Delete a key
|
||||
fn delete(&self, key: &[u8]) -> Result<()>;
|
||||
|
||||
/// Scan keys with a given prefix
|
||||
fn scan(&self, prefix: &[u8]) -> Result<Vec<(Bytes, Bytes)>>;
|
||||
|
||||
/// Scan keys with a given prefix and limit
|
||||
fn scan_with_limit(&self, prefix: &[u8], limit: usize) -> Result<Vec<(Bytes, Bytes)>>;
|
||||
|
||||
/// Check if a key exists
|
||||
fn exists(&self, key: &[u8]) -> Result<bool>;
|
||||
|
||||
/// Begin a transaction
|
||||
fn transaction(&self) -> Result<Box<dyn Transaction>>;
|
||||
|
||||
/// Get all keys (use with caution on large databases)
|
||||
fn keys(&self) -> Result<Vec<Bytes>>;
|
||||
|
||||
/// Get all keys with a given prefix
|
||||
fn keys_with_prefix(&self, prefix: &[u8]) -> Result<Vec<Bytes>>;
|
||||
}
|
||||
|
||||
/// Transaction trait for atomic operations
|
||||
pub trait Transaction: Send {
|
||||
/// Get a value by key
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Bytes>>;
|
||||
|
||||
/// Put a key-value pair
|
||||
fn put(&mut self, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
|
||||
/// Delete a key
|
||||
fn delete(&mut self, key: &[u8]) -> Result<()>;
|
||||
|
||||
/// Commit the transaction
|
||||
fn commit(self: Box<Self>) -> Result<()>;
|
||||
|
||||
/// Rollback the transaction
|
||||
fn rollback(self: Box<Self>) -> Result<()>;
|
||||
}
|
||||
18
crates/reddwarf-storage/src/lib.rs
Normal file
18
crates/reddwarf-storage/src/lib.rs
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
//! Reddwarf Storage - Storage abstraction and redb backend
|
||||
//!
|
||||
//! This crate provides:
|
||||
//! - KVStore trait for storage abstraction
|
||||
//! - redb-based implementation
|
||||
//! - Key encoding and indexing
|
||||
//! - Transaction support
|
||||
|
||||
pub mod error;
|
||||
pub mod kv;
|
||||
pub mod redb_backend;
|
||||
pub mod encoding;
|
||||
|
||||
// Re-export commonly used types
|
||||
pub use error::{StorageError, Result};
|
||||
pub use kv::{KVStore, Transaction};
|
||||
pub use redb_backend::RedbBackend;
|
||||
pub use encoding::{KeyEncoder, IndexKey};
|
||||
365
crates/reddwarf-storage/src/redb_backend.rs
Normal file
365
crates/reddwarf-storage/src/redb_backend.rs
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
use crate::{KVStore, Result, StorageError, Transaction as KVTransaction};
|
||||
use bytes::Bytes;
|
||||
use redb::{Database, ReadableTable, TableDefinition};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, info};
|
||||
|
||||
// Table definitions
|
||||
const RESOURCES_TABLE: TableDefinition<&[u8], &[u8]> = TableDefinition::new("resources");
|
||||
const JJ_METADATA_TABLE: TableDefinition<&[u8], &[u8]> = TableDefinition::new("jj_metadata");
|
||||
const INDICES_TABLE: TableDefinition<&[u8], &[u8]> = TableDefinition::new("indices");
|
||||
|
||||
/// redb-based storage backend
|
||||
pub struct RedbBackend {
|
||||
db: Arc<Database>,
|
||||
}
|
||||
|
||||
impl RedbBackend {
|
||||
/// Create a new RedbBackend
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
info!("Opening redb database at: {}", path.as_ref().display());
|
||||
|
||||
let db = Database::create(path.as_ref()).map_err(|e| {
|
||||
StorageError::database_error(format!("Failed to create database: {}", e), Some(Box::new(e)))
|
||||
})?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(RESOURCES_TABLE)?;
|
||||
let _ = write_txn.open_table(JJ_METADATA_TABLE)?;
|
||||
let _ = write_txn.open_table(INDICES_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
info!("redb database initialized successfully");
|
||||
|
||||
Ok(Self { db: Arc::new(db) })
|
||||
}
|
||||
|
||||
/// Get the underlying database (for advanced operations)
|
||||
pub fn db(&self) -> Arc<Database> {
|
||||
Arc::clone(&self.db)
|
||||
}
|
||||
}
|
||||
|
||||
impl KVStore for RedbBackend {
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Bytes>> {
|
||||
debug!("Getting key: {:?}", String::from_utf8_lossy(key));
|
||||
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
match table.get(key)? {
|
||||
Some(value) => {
|
||||
let bytes = value.value().to_vec();
|
||||
Ok(Some(Bytes::from(bytes)))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn put(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
debug!("Putting key: {:?}", String::from_utf8_lossy(key));
|
||||
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let mut table = write_txn.open_table(RESOURCES_TABLE)?;
|
||||
table.insert(key, value)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(&self, key: &[u8]) -> Result<()> {
|
||||
debug!("Deleting key: {:?}", String::from_utf8_lossy(key));
|
||||
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let mut table = write_txn.open_table(RESOURCES_TABLE)?;
|
||||
table.remove(key)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn scan(&self, prefix: &[u8]) -> Result<Vec<(Bytes, Bytes)>> {
|
||||
debug!("Scanning with prefix: {:?}", String::from_utf8_lossy(prefix));
|
||||
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
|
||||
// Scan all entries
|
||||
for entry in table.iter()? {
|
||||
let (key, value) = entry?;
|
||||
let key_bytes = key.value();
|
||||
|
||||
// Check if key starts with prefix
|
||||
if key_bytes.starts_with(prefix) {
|
||||
results.push((
|
||||
Bytes::from(key_bytes.to_vec()),
|
||||
Bytes::from(value.value().to_vec()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Scan found {} results", results.len());
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn scan_with_limit(&self, prefix: &[u8], limit: usize) -> Result<Vec<(Bytes, Bytes)>> {
|
||||
debug!(
|
||||
"Scanning with prefix: {:?}, limit: {}",
|
||||
String::from_utf8_lossy(prefix),
|
||||
limit
|
||||
);
|
||||
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
|
||||
// Scan entries with limit
|
||||
for entry in table.iter()? {
|
||||
if results.len() >= limit {
|
||||
break;
|
||||
}
|
||||
|
||||
let (key, value) = entry?;
|
||||
let key_bytes = key.value();
|
||||
|
||||
// Check if key starts with prefix
|
||||
if key_bytes.starts_with(prefix) {
|
||||
results.push((
|
||||
Bytes::from(key_bytes.to_vec()),
|
||||
Bytes::from(value.value().to_vec()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Scan found {} results", results.len());
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn exists(&self, key: &[u8]) -> Result<bool> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
Ok(table.get(key)?.is_some())
|
||||
}
|
||||
|
||||
fn transaction(&self) -> Result<Box<dyn KVTransaction>> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
Ok(Box::new(RedbTransaction {
|
||||
txn: Some(write_txn),
|
||||
committed: false,
|
||||
}))
|
||||
}
|
||||
|
||||
fn keys(&self) -> Result<Vec<Bytes>> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
let mut keys = Vec::new();
|
||||
for entry in table.iter()? {
|
||||
let (key, _) = entry?;
|
||||
keys.push(Bytes::from(key.value().to_vec()));
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
fn keys_with_prefix(&self, prefix: &[u8]) -> Result<Vec<Bytes>> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
let mut keys = Vec::new();
|
||||
for entry in table.iter()? {
|
||||
let (key, _) = entry?;
|
||||
let key_bytes = key.value();
|
||||
|
||||
if key_bytes.starts_with(prefix) {
|
||||
keys.push(Bytes::from(key_bytes.to_vec()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
|
||||
/// redb transaction implementation
|
||||
struct RedbTransaction {
|
||||
txn: Option<redb::WriteTransaction>,
|
||||
committed: bool,
|
||||
}
|
||||
|
||||
impl KVTransaction for RedbTransaction {
|
||||
fn get(&self, key: &[u8]) -> Result<Option<Bytes>> {
|
||||
let txn = self.txn.as_ref().ok_or_else(|| {
|
||||
StorageError::transaction_error("Transaction already committed or rolled back")
|
||||
})?;
|
||||
|
||||
let table = txn.open_table(RESOURCES_TABLE)?;
|
||||
|
||||
let result = match table.get(key)? {
|
||||
Some(value) => {
|
||||
let bytes = value.value().to_vec();
|
||||
Some(Bytes::from(bytes))
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn put(&mut self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let txn = self.txn.as_ref().ok_or_else(|| {
|
||||
StorageError::transaction_error("Transaction already committed or rolled back")
|
||||
})?;
|
||||
|
||||
let mut table = txn.open_table(RESOURCES_TABLE)?;
|
||||
table.insert(key, value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(&mut self, key: &[u8]) -> Result<()> {
|
||||
let txn = self.txn.as_ref().ok_or_else(|| {
|
||||
StorageError::transaction_error("Transaction already committed or rolled back")
|
||||
})?;
|
||||
|
||||
let mut table = txn.open_table(RESOURCES_TABLE)?;
|
||||
table.remove(key)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit(mut self: Box<Self>) -> Result<()> {
|
||||
let txn = self.txn.take().ok_or_else(|| {
|
||||
StorageError::transaction_error("Transaction already committed or rolled back")
|
||||
})?;
|
||||
|
||||
txn.commit()?;
|
||||
self.committed = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rollback(mut self: Box<Self>) -> Result<()> {
|
||||
let txn = self.txn.take().ok_or_else(|| {
|
||||
StorageError::transaction_error("Transaction already committed or rolled back")
|
||||
})?;
|
||||
|
||||
txn.abort()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RedbTransaction {
|
||||
fn drop(&mut self) {
|
||||
if !self.committed && self.txn.is_some() {
|
||||
// Auto-rollback if not committed
|
||||
if let Some(txn) = self.txn.take() {
|
||||
let _ = txn.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_redb_backend_basic_operations() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = RedbBackend::new(&db_path).unwrap();
|
||||
|
||||
// Test put and get
|
||||
backend.put(b"key1", b"value1").unwrap();
|
||||
let value = backend.get(b"key1").unwrap();
|
||||
assert_eq!(value, Some(Bytes::from("value1")));
|
||||
|
||||
// Test exists
|
||||
assert!(backend.exists(b"key1").unwrap());
|
||||
assert!(!backend.exists(b"key2").unwrap());
|
||||
|
||||
// Test delete
|
||||
backend.delete(b"key1").unwrap();
|
||||
let value = backend.get(b"key1").unwrap();
|
||||
assert_eq!(value, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redb_backend_scan() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = RedbBackend::new(&db_path).unwrap();
|
||||
|
||||
// Insert multiple keys with same prefix
|
||||
backend.put(b"prefix/key1", b"value1").unwrap();
|
||||
backend.put(b"prefix/key2", b"value2").unwrap();
|
||||
backend.put(b"other/key3", b"value3").unwrap();
|
||||
|
||||
// Scan with prefix
|
||||
let results = backend.scan(b"prefix/").unwrap();
|
||||
assert_eq!(results.len(), 2);
|
||||
|
||||
// Scan with limit
|
||||
let results = backend.scan_with_limit(b"prefix/", 1).unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redb_backend_transaction() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = RedbBackend::new(&db_path).unwrap();
|
||||
|
||||
// Test commit
|
||||
{
|
||||
let mut txn = backend.transaction().unwrap();
|
||||
txn.put(b"key1", b"value1").unwrap();
|
||||
txn.commit().unwrap();
|
||||
}
|
||||
|
||||
let value = backend.get(b"key1").unwrap();
|
||||
assert_eq!(value, Some(Bytes::from("value1")));
|
||||
|
||||
// Test rollback
|
||||
{
|
||||
let mut txn = backend.transaction().unwrap();
|
||||
txn.put(b"key2", b"value2").unwrap();
|
||||
txn.rollback().unwrap();
|
||||
}
|
||||
|
||||
let value = backend.get(b"key2").unwrap();
|
||||
assert_eq!(value, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redb_backend_keys() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = RedbBackend::new(&db_path).unwrap();
|
||||
|
||||
backend.put(b"key1", b"value1").unwrap();
|
||||
backend.put(b"key2", b"value2").unwrap();
|
||||
backend.put(b"prefix/key3", b"value3").unwrap();
|
||||
|
||||
// Test keys
|
||||
let keys = backend.keys().unwrap();
|
||||
assert_eq!(keys.len(), 3);
|
||||
|
||||
// Test keys_with_prefix
|
||||
let keys = backend.keys_with_prefix(b"prefix/").unwrap();
|
||||
assert_eq!(keys.len(), 1);
|
||||
}
|
||||
}
|
||||
24
crates/reddwarf-versioning/Cargo.toml
Normal file
24
crates/reddwarf-versioning/Cargo.toml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
[package]
|
||||
name = "reddwarf-versioning"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
reddwarf-core = { workspace = true }
|
||||
reddwarf-storage = { workspace = true }
|
||||
# jj-lib = { workspace = true } # Reserved for future enhancement
|
||||
miette = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
244
crates/reddwarf-versioning/src/commit.rs
Normal file
244
crates/reddwarf-versioning/src/commit.rs
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Type of change in a commit
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum ChangeType {
|
||||
/// Resource created
|
||||
Create,
|
||||
/// Resource updated
|
||||
Update,
|
||||
/// Resource deleted
|
||||
Delete,
|
||||
}
|
||||
|
||||
/// A change to a resource
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Change {
|
||||
/// Type of change
|
||||
pub change_type: ChangeType,
|
||||
/// Resource key
|
||||
pub resource_key: String,
|
||||
/// Resource content (JSON-encoded)
|
||||
pub content: String,
|
||||
/// Previous content (for updates/deletes)
|
||||
pub previous_content: Option<String>,
|
||||
}
|
||||
|
||||
impl Change {
|
||||
/// Create a new Change
|
||||
pub fn new(
|
||||
change_type: ChangeType,
|
||||
resource_key: String,
|
||||
content: String,
|
||||
previous_content: Option<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
change_type,
|
||||
resource_key,
|
||||
content,
|
||||
previous_content,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a Change for resource creation
|
||||
pub fn create(resource_key: String, content: String) -> Self {
|
||||
Self {
|
||||
change_type: ChangeType::Create,
|
||||
resource_key,
|
||||
content,
|
||||
previous_content: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a Change for resource update
|
||||
pub fn update(resource_key: String, content: String, previous_content: String) -> Self {
|
||||
Self {
|
||||
change_type: ChangeType::Update,
|
||||
resource_key,
|
||||
content,
|
||||
previous_content: Some(previous_content),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a Change for resource deletion
|
||||
pub fn delete(resource_key: String, previous_content: String) -> Self {
|
||||
Self {
|
||||
change_type: ChangeType::Delete,
|
||||
resource_key,
|
||||
content: String::new(),
|
||||
previous_content: Some(previous_content),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A commit in the version DAG
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Commit {
|
||||
/// Unique commit ID (UUID)
|
||||
pub id: String,
|
||||
/// Parent commit IDs (can have multiple for merges)
|
||||
pub parents: Vec<String>,
|
||||
/// Changes in this commit
|
||||
pub changes: Vec<Change>,
|
||||
/// Commit message
|
||||
pub message: String,
|
||||
/// Author
|
||||
pub author: String,
|
||||
/// Timestamp
|
||||
pub timestamp: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Commit {
|
||||
/// Create a new commit
|
||||
pub fn new(
|
||||
parents: Vec<String>,
|
||||
changes: Vec<Change>,
|
||||
message: String,
|
||||
author: String,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
parents,
|
||||
changes,
|
||||
message,
|
||||
author,
|
||||
timestamp: Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the commit ID
|
||||
pub fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
/// Check if this is a merge commit
|
||||
pub fn is_merge(&self) -> bool {
|
||||
self.parents.len() > 1
|
||||
}
|
||||
|
||||
/// Check if this is the root commit
|
||||
pub fn is_root(&self) -> bool {
|
||||
self.parents.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for creating commits
|
||||
pub struct CommitBuilder {
|
||||
parents: Vec<String>,
|
||||
changes: Vec<Change>,
|
||||
message: String,
|
||||
author: String,
|
||||
}
|
||||
|
||||
impl CommitBuilder {
|
||||
/// Create a new CommitBuilder
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
parents: Vec::new(),
|
||||
changes: Vec::new(),
|
||||
message: String::new(),
|
||||
author: "reddwarf".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a parent commit
|
||||
pub fn parent(mut self, parent_id: String) -> Self {
|
||||
self.parents.push(parent_id);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple parent commits
|
||||
pub fn parents(mut self, parents: Vec<String>) -> Self {
|
||||
self.parents.extend(parents);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a change
|
||||
pub fn change(mut self, change: Change) -> Self {
|
||||
self.changes.push(change);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple changes
|
||||
pub fn changes(mut self, changes: Vec<Change>) -> Self {
|
||||
self.changes.extend(changes);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the commit message
|
||||
pub fn message(mut self, message: String) -> Self {
|
||||
self.message = message;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the author
|
||||
pub fn author(mut self, author: String) -> Self {
|
||||
self.author = author;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the commit
|
||||
pub fn build(self) -> Commit {
|
||||
Commit::new(self.parents, self.changes, self.message, self.author)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CommitBuilder {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_change_create() {
|
||||
let change = Change::create("v1/Pod/default/nginx".to_string(), "{}".to_string());
|
||||
assert_eq!(change.change_type, ChangeType::Create);
|
||||
assert_eq!(change.resource_key, "v1/Pod/default/nginx");
|
||||
assert_eq!(change.previous_content, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_change_update() {
|
||||
let change = Change::update(
|
||||
"v1/Pod/default/nginx".to_string(),
|
||||
"{\"new\":true}".to_string(),
|
||||
"{\"old\":true}".to_string(),
|
||||
);
|
||||
assert_eq!(change.change_type, ChangeType::Update);
|
||||
assert!(change.previous_content.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit_creation() {
|
||||
let change = Change::create("v1/Pod/default/nginx".to_string(), "{}".to_string());
|
||||
let commit = CommitBuilder::new()
|
||||
.change(change)
|
||||
.message("Create nginx pod".to_string())
|
||||
.build();
|
||||
|
||||
assert!(!commit.id.is_empty());
|
||||
assert_eq!(commit.changes.len(), 1);
|
||||
assert_eq!(commit.message, "Create nginx pod");
|
||||
assert!(commit.is_root());
|
||||
assert!(!commit.is_merge());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit_with_parents() {
|
||||
let commit = CommitBuilder::new()
|
||||
.parent("parent1".to_string())
|
||||
.parent("parent2".to_string())
|
||||
.message("Merge commit".to_string())
|
||||
.build();
|
||||
|
||||
assert_eq!(commit.parents.len(), 2);
|
||||
assert!(commit.is_merge());
|
||||
assert!(!commit.is_root());
|
||||
}
|
||||
}
|
||||
77
crates/reddwarf-versioning/src/conflict.rs
Normal file
77
crates/reddwarf-versioning/src/conflict.rs
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Represents one side of a conflict
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConflictSide {
|
||||
/// Commit ID for this side
|
||||
pub commit_id: String,
|
||||
/// Content from this side
|
||||
pub content: String,
|
||||
}
|
||||
|
||||
/// Represents a conflict between concurrent modifications
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Conflict {
|
||||
/// Resource key that has the conflict
|
||||
pub resource_key: String,
|
||||
/// Our side of the conflict
|
||||
pub our_side: ConflictSide,
|
||||
/// Their side of the conflict
|
||||
pub their_side: ConflictSide,
|
||||
/// Base commit (common ancestor)
|
||||
pub base_commit_id: Option<String>,
|
||||
}
|
||||
|
||||
impl Conflict {
|
||||
/// Create a new Conflict
|
||||
pub fn new(
|
||||
resource_key: String,
|
||||
our_side: ConflictSide,
|
||||
their_side: ConflictSide,
|
||||
base_commit_id: Option<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
resource_key,
|
||||
our_side,
|
||||
their_side,
|
||||
base_commit_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a description of the conflict
|
||||
pub fn description(&self) -> String {
|
||||
format!(
|
||||
"Conflict on resource {} between commits {} and {}",
|
||||
self.resource_key, self.our_side.commit_id, self.their_side.commit_id
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_conflict_creation() {
|
||||
let our_side = ConflictSide {
|
||||
commit_id: "commit1".to_string(),
|
||||
content: "{\"version\":1}".to_string(),
|
||||
};
|
||||
let their_side = ConflictSide {
|
||||
commit_id: "commit2".to_string(),
|
||||
content: "{\"version\":2}".to_string(),
|
||||
};
|
||||
|
||||
let conflict = Conflict::new(
|
||||
"v1/Pod/default/nginx".to_string(),
|
||||
our_side,
|
||||
their_side,
|
||||
Some("base".to_string()),
|
||||
);
|
||||
|
||||
assert_eq!(conflict.resource_key, "v1/Pod/default/nginx");
|
||||
assert_eq!(conflict.our_side.commit_id, "commit1");
|
||||
assert_eq!(conflict.their_side.commit_id, "commit2");
|
||||
assert!(conflict.description().contains("Conflict"));
|
||||
}
|
||||
}
|
||||
102
crates/reddwarf-versioning/src/error.rs
Normal file
102
crates/reddwarf-versioning/src/error.rs
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
// Allow unused assignments for diagnostic fields - they're used by the macros
|
||||
#![allow(unused_assignments)]
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Versioning error type
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum VersioningError {
|
||||
/// Commit not found
|
||||
#[error("Commit not found: {commit_id}")]
|
||||
#[diagnostic(
|
||||
code(versioning::commit_not_found),
|
||||
help("Verify the commit ID is correct and exists in the repository")
|
||||
)]
|
||||
CommitNotFound {
|
||||
commit_id: String,
|
||||
},
|
||||
|
||||
/// Conflict detected
|
||||
#[error("Conflict detected: {message}")]
|
||||
#[diagnostic(
|
||||
code(versioning::conflict),
|
||||
help("Resolve the conflict by choosing one side or manually merging the changes")
|
||||
)]
|
||||
Conflict {
|
||||
message: String,
|
||||
conflicts: Vec<String>,
|
||||
},
|
||||
|
||||
/// Invalid operation
|
||||
#[error("Invalid operation: {message}")]
|
||||
#[diagnostic(
|
||||
code(versioning::invalid_operation),
|
||||
help("{suggestion}")
|
||||
)]
|
||||
InvalidOperation {
|
||||
message: String,
|
||||
suggestion: String,
|
||||
},
|
||||
|
||||
/// Storage error
|
||||
#[error("Storage error: {0}")]
|
||||
#[diagnostic(
|
||||
code(versioning::storage_error),
|
||||
help("Check the underlying storage system")
|
||||
)]
|
||||
StorageError(#[from] reddwarf_storage::StorageError),
|
||||
|
||||
/// Core error
|
||||
#[error("Core error: {0}")]
|
||||
#[diagnostic(
|
||||
code(versioning::core_error),
|
||||
help("This is an internal error")
|
||||
)]
|
||||
CoreError(#[from] reddwarf_core::ReddwarfError),
|
||||
|
||||
/// Internal error
|
||||
#[error("Internal error: {message}")]
|
||||
#[diagnostic(
|
||||
code(versioning::internal_error),
|
||||
help("This is likely a bug. Please report it with full error details")
|
||||
)]
|
||||
InternalError {
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Result type for versioning operations
|
||||
pub type Result<T> = std::result::Result<T, VersioningError>;
|
||||
|
||||
impl VersioningError {
|
||||
/// Create a CommitNotFound error
|
||||
pub fn commit_not_found(commit_id: impl Into<String>) -> Self {
|
||||
Self::CommitNotFound {
|
||||
commit_id: commit_id.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a Conflict error
|
||||
pub fn conflict(message: impl Into<String>, conflicts: Vec<String>) -> Self {
|
||||
Self::Conflict {
|
||||
message: message.into(),
|
||||
conflicts,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InvalidOperation error
|
||||
pub fn invalid_operation(message: impl Into<String>, suggestion: impl Into<String>) -> Self {
|
||||
Self::InvalidOperation {
|
||||
message: message.into(),
|
||||
suggestion: suggestion.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an InternalError
|
||||
pub fn internal_error(message: impl Into<String>) -> Self {
|
||||
Self::InternalError {
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
18
crates/reddwarf-versioning/src/lib.rs
Normal file
18
crates/reddwarf-versioning/src/lib.rs
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
//! Reddwarf Versioning - DAG-based resource versioning with jj-lib
|
||||
//!
|
||||
//! This crate provides:
|
||||
//! - VersionStore wrapper around jj-lib
|
||||
//! - Commit operations for resource changes
|
||||
//! - Conflict detection and representation
|
||||
//! - DAG traversal for WATCH operations
|
||||
|
||||
pub mod error;
|
||||
pub mod store;
|
||||
pub mod commit;
|
||||
pub mod conflict;
|
||||
|
||||
// Re-export commonly used types
|
||||
pub use error::{VersioningError, Result};
|
||||
pub use store::VersionStore;
|
||||
pub use commit::{Commit, CommitBuilder, Change, ChangeType};
|
||||
pub use conflict::{Conflict, ConflictSide};
|
||||
304
crates/reddwarf-versioning/src/store.rs
Normal file
304
crates/reddwarf-versioning/src/store.rs
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
use crate::{Change, Commit, CommitBuilder, Conflict, ConflictSide, Result, VersioningError};
|
||||
use reddwarf_storage::{KVStore, RedbBackend};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Version store for managing DAG-based resource versions
|
||||
pub struct VersionStore {
|
||||
storage: Arc<RedbBackend>,
|
||||
/// Current HEAD commit ID (latest commit)
|
||||
head: parking_lot::RwLock<Option<String>>,
|
||||
}
|
||||
|
||||
impl VersionStore {
|
||||
/// Create a new VersionStore
|
||||
pub fn new(storage: Arc<RedbBackend>) -> Result<Self> {
|
||||
info!("Initializing VersionStore");
|
||||
|
||||
let store = Self {
|
||||
storage,
|
||||
head: parking_lot::RwLock::new(None),
|
||||
};
|
||||
|
||||
// Load HEAD from storage
|
||||
if let Some(head_bytes) = store.storage.get(b"version:head")? {
|
||||
let head_id = String::from_utf8_lossy(&head_bytes).to_string();
|
||||
info!("Loaded HEAD: {}", head_id);
|
||||
*store.head.write() = Some(head_id);
|
||||
}
|
||||
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
/// Create a new commit
|
||||
pub fn create_commit(&self, builder: CommitBuilder) -> Result<Commit> {
|
||||
let commit = builder.build();
|
||||
debug!("Creating commit: {}", commit.id);
|
||||
|
||||
// Serialize and store the commit
|
||||
let commit_json = serde_json::to_string(&commit)
|
||||
.map_err(|e| VersioningError::internal_error(format!("Failed to serialize commit: {}", e)))?;
|
||||
|
||||
let commit_key = format!("version:commit:{}", commit.id);
|
||||
self.storage.put(commit_key.as_bytes(), commit_json.as_bytes())?;
|
||||
|
||||
// Update HEAD
|
||||
self.set_head(commit.id.clone())?;
|
||||
|
||||
info!("Created commit: {}", commit.id);
|
||||
Ok(commit)
|
||||
}
|
||||
|
||||
/// Get a commit by ID
|
||||
pub fn get_commit(&self, commit_id: &str) -> Result<Commit> {
|
||||
debug!("Getting commit: {}", commit_id);
|
||||
|
||||
let commit_key = format!("version:commit:{}", commit_id);
|
||||
let commit_bytes = self
|
||||
.storage
|
||||
.get(commit_key.as_bytes())?
|
||||
.ok_or_else(|| VersioningError::commit_not_found(commit_id))?;
|
||||
|
||||
let commit: Commit = serde_json::from_slice(&commit_bytes)
|
||||
.map_err(|e| VersioningError::internal_error(format!("Failed to deserialize commit: {}", e)))?;
|
||||
|
||||
Ok(commit)
|
||||
}
|
||||
|
||||
/// Get the current HEAD commit
|
||||
pub fn get_head(&self) -> Result<Option<Commit>> {
|
||||
let head_id = self.head.read().clone();
|
||||
|
||||
match head_id {
|
||||
Some(id) => Ok(Some(self.get_commit(&id)?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the HEAD commit
|
||||
fn set_head(&self, commit_id: String) -> Result<()> {
|
||||
self.storage.put(b"version:head", commit_id.as_bytes())?;
|
||||
*self.head.write() = Some(commit_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get all commits (for debugging)
|
||||
pub fn list_commits(&self) -> Result<Vec<Commit>> {
|
||||
let keys = self.storage.keys_with_prefix(b"version:commit:")?;
|
||||
let mut commits = Vec::new();
|
||||
|
||||
for key in keys {
|
||||
let commit_bytes = self.storage.get(&key)?.unwrap();
|
||||
let commit: Commit = serde_json::from_slice(&commit_bytes)
|
||||
.map_err(|e| VersioningError::internal_error(format!("Failed to deserialize commit: {}", e)))?;
|
||||
commits.push(commit);
|
||||
}
|
||||
|
||||
Ok(commits)
|
||||
}
|
||||
|
||||
/// Detect conflicts between two commits
|
||||
pub fn detect_conflicts(&self, commit_id1: &str, commit_id2: &str) -> Result<Vec<Conflict>> {
|
||||
debug!("Detecting conflicts between {} and {}", commit_id1, commit_id2);
|
||||
|
||||
let commit1 = self.get_commit(commit_id1)?;
|
||||
let commit2 = self.get_commit(commit_id2)?;
|
||||
|
||||
let mut conflicts = Vec::new();
|
||||
|
||||
// Build maps of resource keys to changes
|
||||
let mut changes1: HashMap<String, &Change> = HashMap::new();
|
||||
for change in &commit1.changes {
|
||||
changes1.insert(change.resource_key.clone(), change);
|
||||
}
|
||||
|
||||
let mut changes2: HashMap<String, &Change> = HashMap::new();
|
||||
for change in &commit2.changes {
|
||||
changes2.insert(change.resource_key.clone(), change);
|
||||
}
|
||||
|
||||
// Find common resources that were modified in both commits
|
||||
for (resource_key, change1) in &changes1 {
|
||||
if let Some(change2) = changes2.get(resource_key) {
|
||||
// Both commits modified the same resource - potential conflict
|
||||
if change1.content != change2.content {
|
||||
let conflict = Conflict::new(
|
||||
resource_key.clone(),
|
||||
ConflictSide {
|
||||
commit_id: commit_id1.to_string(),
|
||||
content: change1.content.clone(),
|
||||
},
|
||||
ConflictSide {
|
||||
commit_id: commit_id2.to_string(),
|
||||
content: change2.content.clone(),
|
||||
},
|
||||
self.find_common_ancestor(commit_id1, commit_id2)?,
|
||||
);
|
||||
conflicts.push(conflict);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !conflicts.is_empty() {
|
||||
debug!("Found {} conflicts", conflicts.len());
|
||||
}
|
||||
|
||||
Ok(conflicts)
|
||||
}
|
||||
|
||||
/// Find the common ancestor of two commits (simplified BFS)
|
||||
pub fn find_common_ancestor(&self, commit_id1: &str, commit_id2: &str) -> Result<Option<String>> {
|
||||
let _commit1 = self.get_commit(commit_id1)?;
|
||||
let _commit2 = self.get_commit(commit_id2)?;
|
||||
|
||||
// Get all ancestors of commit1
|
||||
let mut ancestors1 = HashSet::new();
|
||||
let mut to_visit = vec![commit_id1.to_string()];
|
||||
|
||||
while let Some(commit_id) = to_visit.pop() {
|
||||
if ancestors1.contains(&commit_id) {
|
||||
continue;
|
||||
}
|
||||
ancestors1.insert(commit_id.clone());
|
||||
|
||||
if let Ok(commit) = self.get_commit(&commit_id) {
|
||||
to_visit.extend(commit.parents);
|
||||
}
|
||||
}
|
||||
|
||||
// Find first common ancestor in commit2's history
|
||||
let mut to_visit = vec![commit_id2.to_string()];
|
||||
let mut visited = HashSet::new();
|
||||
|
||||
while let Some(commit_id) = to_visit.pop() {
|
||||
if visited.contains(&commit_id) {
|
||||
continue;
|
||||
}
|
||||
visited.insert(commit_id.clone());
|
||||
|
||||
if ancestors1.contains(&commit_id) {
|
||||
return Ok(Some(commit_id));
|
||||
}
|
||||
|
||||
if let Ok(commit) = self.get_commit(&commit_id) {
|
||||
to_visit.extend(commit.parents);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Traverse the DAG from one commit to another
|
||||
pub fn traverse(&self, from_commit_id: &str, to_commit_id: &str) -> Result<Vec<Commit>> {
|
||||
debug!("Traversing from {} to {}", from_commit_id, to_commit_id);
|
||||
|
||||
let mut commits = Vec::new();
|
||||
let mut visited = HashSet::new();
|
||||
let mut to_visit = vec![to_commit_id.to_string()];
|
||||
|
||||
// BFS from to_commit back to from_commit
|
||||
while let Some(commit_id) = to_visit.pop() {
|
||||
if commit_id == from_commit_id {
|
||||
break;
|
||||
}
|
||||
|
||||
if visited.contains(&commit_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
visited.insert(commit_id.clone());
|
||||
|
||||
let commit = self.get_commit(&commit_id)?;
|
||||
commits.push(commit.clone());
|
||||
|
||||
// Add parents to visit
|
||||
to_visit.extend(commit.parents);
|
||||
}
|
||||
|
||||
// Reverse to get chronological order
|
||||
commits.reverse();
|
||||
|
||||
Ok(commits)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use reddwarf_storage::RedbBackend;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_version_store_basic() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = Arc::new(RedbBackend::new(&db_path).unwrap());
|
||||
let store = VersionStore::new(backend).unwrap();
|
||||
|
||||
// Create a commit
|
||||
let change = Change::create("v1/Pod/default/nginx".to_string(), "{}".to_string());
|
||||
let commit = store
|
||||
.create_commit(CommitBuilder::new().change(change).message("Initial commit".to_string()))
|
||||
.unwrap();
|
||||
|
||||
assert!(!commit.id.is_empty());
|
||||
|
||||
// Get the commit back
|
||||
let retrieved = store.get_commit(&commit.id).unwrap();
|
||||
assert_eq!(retrieved.id, commit.id);
|
||||
assert_eq!(retrieved.message, "Initial commit");
|
||||
|
||||
// Check HEAD
|
||||
let head = store.get_head().unwrap().unwrap();
|
||||
assert_eq!(head.id, commit.id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conflict_detection() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.redb");
|
||||
let backend = Arc::new(RedbBackend::new(&db_path).unwrap());
|
||||
let store = VersionStore::new(backend).unwrap();
|
||||
|
||||
// Create base commit
|
||||
let change1 = Change::create("v1/Pod/default/nginx".to_string(), "{\"version\":0}".to_string());
|
||||
let commit1 = store
|
||||
.create_commit(CommitBuilder::new().change(change1).message("Base".to_string()))
|
||||
.unwrap();
|
||||
|
||||
// Create two diverging commits from the base
|
||||
let change2 = Change::update(
|
||||
"v1/Pod/default/nginx".to_string(),
|
||||
"{\"version\":1}".to_string(),
|
||||
"{\"version\":0}".to_string(),
|
||||
);
|
||||
let commit2 = store
|
||||
.create_commit(
|
||||
CommitBuilder::new()
|
||||
.parent(commit1.id.clone())
|
||||
.change(change2)
|
||||
.message("Update A".to_string()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let change3 = Change::update(
|
||||
"v1/Pod/default/nginx".to_string(),
|
||||
"{\"version\":2}".to_string(),
|
||||
"{\"version\":0}".to_string(),
|
||||
);
|
||||
let commit3 = store
|
||||
.create_commit(
|
||||
CommitBuilder::new()
|
||||
.parent(commit1.id.clone())
|
||||
.change(change3)
|
||||
.message("Update B".to_string()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Detect conflicts
|
||||
let conflicts = store.detect_conflicts(&commit2.id, &commit3.id).unwrap();
|
||||
assert_eq!(conflicts.len(), 1);
|
||||
assert_eq!(conflicts[0].resource_key, "v1/Pod/default/nginx");
|
||||
}
|
||||
}
|
||||
25
crates/reddwarf/Cargo.toml
Normal file
25
crates/reddwarf/Cargo.toml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
[package]
|
||||
name = "reddwarf"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "reddwarf"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
reddwarf-core = { workspace = true }
|
||||
reddwarf-storage = { workspace = true }
|
||||
reddwarf-versioning = { workspace = true }
|
||||
reddwarf-apiserver = { workspace = true }
|
||||
reddwarf-scheduler = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
miette = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
rcgen = { workspace = true }
|
||||
3
crates/reddwarf/src/main.rs
Normal file
3
crates/reddwarf/src/main.rs
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
fn main() {
|
||||
println!("Reddwarf Kubernetes Control Plane");
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
# **High-Performance Engineering of a Rust-Based, Single-Binary Kubernetes Control Plane: An Architectural Framework for Distributed Resource Management**
|
||||
|
||||
The modern landscape of container orchestration is increasingly defined by the divergence between hyperscale cloud environments and the unique constraints of edge, IoT, and localized self-hosted infrastructure. Traditional Kubernetes architectures, while robust, carry significant operational and resource overhead due to their fragmented design, where individual binaries for the API server, scheduler, and controller manager communicate over network boundaries and rely on external storage systems such as etcd.1 This research evaluates the technical feasibility of a unified, Rust-based Kubernetes control plane optimized for portability and ease of deployment. By consolidating the control plane into a single binary, utilizing pure-Rust storage with redb, and integrating jj-lib for versioned resource management, it is possible to achieve an order-of-magnitude reduction in complexity while maintaining strict API compliance.3
|
||||
|
||||
## **The Paradigm of Process Consolidation in Control Plane Design**
|
||||
|
||||
The primary driver for moving toward a single-binary architecture is the elimination of the overhead associated with inter-process communication (IPC) and separate memory management heaps. In a standard Kubernetes control plane, the kube-apiserver, kube-scheduler, and kube-controller-manager are distinct processes interacting via HTTPS or gRPC.2 A Rust-based implementation allows these components to run as internal tasks within a single execution context, facilitating zero-copy state sharing through shared memory structures protected by Arc\<RwLock\<T\>\> or message-passing via asynchronous channels.6
|
||||
|
||||
| Architectural Feature | Standard Upstream Kubernetes | Consolidated Rust Control Plane |
|
||||
| :---- | :---- | :---- |
|
||||
| **Binary Structure** | Fragmented (Multiple binaries) | Monolithic (Single binary) |
|
||||
| **Memory Management** | Multi-heap (GC-dependent) | Unified heap (RAII/Ownership) |
|
||||
| **Data Consistency** | External etcd cluster | Embedded Raft with redb |
|
||||
| **Versioning Model** | Monotonic Integer | DAG-based (Jujutsu) |
|
||||
| **Installation** | Complex (Clusterology) | Simple (Single binary) |
|
||||
|
||||
Source:
|
||||
|
||||
## **Distributed Consensus and the Raft-Jujutsu Relationship**
|
||||
|
||||
A critical architectural question is whether Raft is necessary if Jujutsu is used for versioning. While Jujutsu handles **versioned state and history**, Raft provides **real-time coordination and agreement** among cluster members.
|
||||
|
||||
### **1\. Consensus vs. Versioning**
|
||||
|
||||
* **Consensus (Raft):** Provides **linearizability**—the guarantee that the cluster behaves as a single unit and every operation happens in a strictly agreed-upon order. It handles **leader election**, determining which node is authorized to process writes.
|
||||
* **Versioning (Jujutsu):** Manages the **Directed Acyclic Graph (DAG)** of resource states. It accepts concurrent changes and represents them as "first-class conflicts" in history, which is excellent for auditing but problematic for a live API that must provide a single authoritative "now" to workers.11
|
||||
|
||||
### **2\. The Integrated Workflow: Raft-Backed Jujutsu**
|
||||
|
||||
The control plane uses **Raft to replicate the Jujutsu operation log**:
|
||||
|
||||
1. **Operation Ordering:** When an API request arrives, the Raft leader proposes a new "Jujutsu Transaction" as a log entry.
|
||||
2. **Quorum Agreement:** Once the Raft quorum $Q \= \\lfloor \\frac{N}{2} \\rfloor \+ 1$ is reached, the transaction is committed.
|
||||
3. **Local Application:** Every node applies that transaction to its local jj-lib repository, ensuring every node has the exact same version of the DAG at any given Raft index.
|
||||
|
||||
## **Redb: Pure-Rust Portable Storage**
|
||||
|
||||
To ensure the control plane can be compiled easily on every platform without heavy C++ dependencies, the architecture utilizes redb as the storage backend. redb is an embedded key-value store written in pure Rust, inspired by LMDB, and utilizing copy-on-write B-trees.
|
||||
|
||||
### **Redb-Specific Architectural Optimizations**
|
||||
|
||||
Unlike RocksDB, which is optimized for high-concurrency background compaction (LSM-trees), redb focuses on a simple, memory-safe, single-writer model.
|
||||
|
||||
* **Single-Writer Optimization:** Since redb serializes write transactions, the control plane must batch Raft log applications into single WriteTransaction blocks to maximize throughput.
|
||||
* **Domain Partitioning:** To mitigate the single-writer bottleneck, the system can utilize independent "Column Families" (via the Manifold fork) or separate redb tables for different data domains (e.g., one table for Raft logs, another for jj metadata) to allow parallel writes to independent sections of the storage file.
|
||||
* **Zero-Copy Reads:** The API server leverages redb’s zero-copy API to serve GET and LIST requests directly from memory-mapped B-tree pages, minimizing allocations during heavy read traffic.
|
||||
|
||||
| Storage Metric | RocksDB (C++-based) | Redb (Pure Rust) |
|
||||
| :---- | :---- | :---- |
|
||||
| **Portability** | Requires C++ Toolchain/LLVM | Pure Cargo build (any platform) |
|
||||
| **Concurrently Writes** | High (Multi-threaded) | Serialized (Single-writer) |
|
||||
| **Safety** | Managed FFI | 100% Memory Safe |
|
||||
| **Disk Model** | LSM-Tree (Append-heavy) | B-Tree (Read-heavy) |
|
||||
|
||||
Source:
|
||||
|
||||
## **Advanced Resource Versioning via jj-lib**
|
||||
|
||||
Integrating jj-lib allows the API server to treat every resource update as a commit in a high-performance version graph. When the API server receives an update, it initiates a transaction in jj-lib. 12
|
||||
|
||||
### **Conflict Representation and Resolution**
|
||||
|
||||
Jujutsu treats conflicts as first-class objects. If divergent updates occur simultaneously in a way Raft doesn't immediately linearize (e.g., during a partition), the API server records them as a divergent branch.
|
||||
|
||||
* **Deferred Resolution:** The system can continue operating with a conflicted state, representing the ambiguity to the user until a reconciler resolves the "diff" between the versions.
|
||||
* **Implicit History:** Administrators can use the op log to trace back exactly when a conflict occurred, providing far superior observability compared to standard Kubernetes audit logs.
|
||||
|
||||
## **Engineering the API Server and CRD Schema Engine**
|
||||
|
||||
The API server leverages Axum and k8s-openapi for its exhaustive collection of Kubernetes type definitions.9 Full support for Custom Resource Definitions (CRDs) is implemented through a dynamic schema engine that:
|
||||
|
||||
1. **Registers Dynamic Routes:** Updates the Axum router at runtime to expose new RESTful paths for CRDs.
|
||||
2. **Enforces Validation:** Uses the OpenAPI v3 schema provided in the CRD to validate custom objects before they are proposed to the Raft consensus module. 11
|
||||
3. **Handles Strategic Patching:** Employs optionable types—where every field is an Option\<T\>—to correctly implement strategic merge patches and "Server-Side Apply" (SSA).15
|
||||
|
||||
## **Optimized Communication: Protobuf and Streaming Watches**
|
||||
|
||||
Network efficiency is maximized using Protobuf serialization and streaming list encoders to minimize memory spikes in self-hosted clusters.16
|
||||
|
||||
### **Protobuf and Length-Prefixed Streams**
|
||||
|
||||
The control plane implements the Kubernetes Protobuf envelope format:
|
||||
|
||||
* **Envelope:** Responses start with a 4-byte magic number 0x6b 0x38 0x73 0x00, followed by a runtime.Unknown message containing type metadata and raw bytes.
|
||||
* **Watch Streams:** For WATCH operations, each "frame" is prefixed with a 4-byte integer length, allowing the server to stream individual watch.Event objects incrementally without loading the entire state into memory.
|
||||
* **Streaming Lists:** Implements the v1.33 streaming encoder, transmitting the Items field of a PodList individually to reduce memory usage by up to 20x during large operations.
|
||||
|
||||
## **Node Connectivity and WebSocket Tunneling**
|
||||
|
||||
To support worker nodes behind NATs or firewalls, the architecture implements bidirectional WebSocket tunnels.18
|
||||
|
||||
1. **Agent Initiation:** The worker node (agent) initiates an outbound connection to the control plane, which is upgraded to a WebSocket.
|
||||
2. **Multiplexed Proxying:** All control-plane-to-node traffic (e.g., kubectl exec) is encapsulated within this tunnel, eliminating the need to expose worker ports to the network.
|
||||
3. **Lease Heartbeats:** Kubelets send lightweight Lease updates every 10 seconds to the API server to maintain their health status. 20
|
||||
|
||||
## **System Organization and Build Strategy**
|
||||
|
||||
The project is organized as a Cargo workspace to improve maintainability and leverage cross-compilation for x86\_64 and aarch64 targets.22
|
||||
|
||||
| Component | Choice | Reason |
|
||||
| :---- | :---- | :---- |
|
||||
| **Consensus Engine** | OpenRaft | Event-driven Rust implementation of Raft. |
|
||||
| **Versioning Library** | jj-lib | DAG-based history and conflict representation. |
|
||||
| **Storage Engine** | redb | Pure-Rust, ACID, portable K/V store. |
|
||||
| **API Framework** | Axum | Modular, asynchronous request handling. |
|
||||
| **Serialization** | Prost (Protobuf) | Compact payloads and high-speed serialization. |
|
||||
|
||||
Source:
|
||||
|
||||
## **Conclusions**
|
||||
|
||||
The transition to a pure-Rust, redb-backed Kubernetes control plane represents a significant step toward making Kubernetes truly "portable." By using Raft to linearize the Jujutsu operation log, the system gains the auditability and conflict-handling power of a modern version control system without sacrificing the linear state requirements of a container orchestrator. The combination of WebSocket tunnels and Protobuf-based streaming ensures that this architecture remains responsive even in the resource-constrained environments typical of self-hosted edge deployments.
|
||||
|
||||
#### **Works cited**
|
||||
|
||||
1. K3s \- Lightweight Kubernetes | K3s, accessed on January 28, 2026, [https://docs.k3s.io/](https://docs.k3s.io/)
|
||||
2. K3s vs K8s: Differences, Use Cases & Alternatives | by Spacelift \- Medium, accessed on January 28, 2026, [https://medium.com/spacelift/k3s-vs-k8s-differences-use-cases-alternatives-ffcc134300dc](https://medium.com/spacelift/k3s-vs-k8s-differences-use-cases-alternatives-ffcc134300dc)
|
||||
3. K3s Explained: What is it and How Is It Different From Stock Kubernetes (K8s)?, accessed on January 28, 2026, [https://traefik.io/glossary/k3s-explained](https://traefik.io/glossary/k3s-explained)
|
||||
4. K0s vs K3s vs K8s: Comparing Kubernetes Distributions \- Shipyard.build, accessed on January 28, 2026, [https://shipyard.build/blog/k0s-k3s-k8s/](https://shipyard.build/blog/k0s-k3s-k8s/)
|
||||
5. Understanding k0s: a lightweight Kubernetes distribution for the community | CNCF, accessed on January 28, 2026, [https://www.cncf.io/blog/2024/12/06/understanding-k0s-a-lightweight-kubernetes-distribution-for-the-community/](https://www.cncf.io/blog/2024/12/06/understanding-k0s-a-lightweight-kubernetes-distribution-for-the-community/)
|
||||
6. Rust in Distributed Systems, 2025 Edition | by Disant Upadhyay \- Medium, accessed on January 28, 2026, [https://disant.medium.com/rust-in-distributed-systems-2025-edition-175d95f825d6](https://disant.medium.com/rust-in-distributed-systems-2025-edition-175d95f825d6)
|
||||
7. How I build a Rust backend service \- World Without Eng, accessed on January 28, 2026, [https://worldwithouteng.com/articles/how-i-build-a-rust-backend-service](https://worldwithouteng.com/articles/how-i-build-a-rust-backend-service)
|
||||
8. Coding a simple microservices with Rust | by Gene Kuo \- Medium, accessed on January 28, 2026, [https://genekuo.medium.com/coding-a-simple-microservices-with-rust-3fbde8e32adc](https://genekuo.medium.com/coding-a-simple-microservices-with-rust-3fbde8e32adc)
|
||||
9. k8s\_openapi \- Rust, accessed on January 28, 2026, [https://arnavion.github.io/k8s-openapi/v0.18.x/k8s\_openapi/](https://arnavion.github.io/k8s-openapi/v0.18.x/k8s_openapi/)
|
||||
10. Creating a REST API in Rust \- Arsh Sharma, accessed on January 28, 2026, [https://arshsharma.com/posts/rust-api/](https://arshsharma.com/posts/rust-api/)
|
||||
11. jj-cli — Rust utility // Lib.rs, accessed on January 28, 2026, [https://lib.rs/crates/jj-cli](https://lib.rs/crates/jj-cli)
|
||||
12. jj\_lib \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/jj-lib/latest/jj\_lib/index.html](https://docs.rs/jj-lib/latest/jj_lib/index.html)
|
||||
13. Jujutsu: A Next Generation Replacement for Git \- Vincent Schmalbach, accessed on January 28, 2026, [https://www.vincentschmalbach.com/jujutsu-a-next-generation-replacement-for-git/](https://www.vincentschmalbach.com/jujutsu-a-next-generation-replacement-for-git/)
|
||||
14. Protocol Buffer vs Json \- when to choose one over the other? \- Stack Overflow, accessed on January 28, 2026, [https://stackoverflow.com/questions/52409579/protocol-buffer-vs-json-when-to-choose-one-over-the-other](https://stackoverflow.com/questions/52409579/protocol-buffer-vs-json-when-to-choose-one-over-the-other)
|
||||
15. optionable \- crates.io: Rust Package Registry, accessed on January 28, 2026, [https://crates.io/crates/optionable/0.4.0](https://crates.io/crates/optionable/0.4.0)
|
||||
16. Why it seems there are more distributed systems written in golang rather in rust? \- Reddit, accessed on January 28, 2026, [https://www.reddit.com/r/rust/comments/1l0rzin/why\_it\_seems\_there\_are\_more\_distributed\_systems/](https://www.reddit.com/r/rust/comments/1l0rzin/why_it_seems_there_are_more_distributed_systems/)
|
||||
17. Kubernetes v1.33: Streaming List responses, accessed on January 28, 2026, [https://kubernetes.io/blog/2025/05/09/kubernetes-v1-33-streaming-list-responses/](https://kubernetes.io/blog/2025/05/09/kubernetes-v1-33-streaming-list-responses/)
|
||||
18. Is Meilisearch a viable upgrade alternative to OpenSearch? \- Open edX discussions, accessed on January 28, 2026, [https://discuss.openedx.org/t/is-meilisearch-a-viable-upgrade-alternative-to-opensearch/12400](https://discuss.openedx.org/t/is-meilisearch-a-viable-upgrade-alternative-to-opensearch/12400)
|
||||
19. Basic Network Options \- K3s \- Lightweight Kubernetes, accessed on January 28, 2026, [https://docs.k3s.io/networking/basic-network-options](https://docs.k3s.io/networking/basic-network-options)
|
||||
20. Nodes \- Kubernetes, accessed on January 28, 2026, [https://k8s-docs.netlify.app/en/docs/concepts/architecture/nodes/](https://k8s-docs.netlify.app/en/docs/concepts/architecture/nodes/)
|
||||
21. A Brief Overview of the Kubernetes Node Lifecycle | by Rifewang \- Medium, accessed on January 28, 2026, [https://medium.com/@rifewang/a-brief-overview-of-the-kubernetes-node-lifecycle-bde9ce547852](https://medium.com/@rifewang/a-brief-overview-of-the-kubernetes-node-lifecycle-bde9ce547852)
|
||||
22. Mastering Large Project Organization in Rust | by Leapcell, accessed on January 28, 2026, [https://leapcell.medium.com/mastering-large-project-organization-in-rust-a21d62fb1e8e](https://leapcell.medium.com/mastering-large-project-organization-in-rust-a21d62fb1e8e)
|
||||
23. How To Make Rust Multi-Arch Release Easy \- Qovery, accessed on January 28, 2026, [https://www.qovery.com/blog/how-to-make-rust-multi-arch-release-easy](https://www.qovery.com/blog/how-to-make-rust-multi-arch-release-easy)
|
||||
316
docs/ai/research/Rust Kubernetes Control Plane Research.md
Normal file
316
docs/ai/research/Rust Kubernetes Control Plane Research.md
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
# **High-Performance Engineering of a Rust-Based, Single-Binary Kubernetes Control Plane: An Architectural Framework for Distributed Resource Management**
|
||||
|
||||
The modern landscape of container orchestration is increasingly defined by the divergence between hyperscale cloud environments and the unique constraints of edge, IoT, and localized self-hosted infrastructure. Traditional Kubernetes architectures, while robust, carry significant operational and resource overhead due to their fragmented design, where individual binaries for the API server, scheduler, and controller manager communicate over network boundaries and rely on external storage systems such as etcd.1 This fragmentation introduces inherent latencies, memory pressures, and deployment complexities that often render the control plane unsuitable for environments with limited hardware resources or unpredictable connectivity. This research evaluates the technical feasibility and architectural design of a unified, Rust-based Kubernetes control plane. By consolidating the control plane into a single statically linked binary and integrating distributed consensus, local storage, and advanced version control primitives, it is possible to achieve an order-of-magnitude reduction in resource consumption while maintaining strict compliance with the Kubernetes API specification.3
|
||||
|
||||
## **The Paradigm of Process Consolidation in Control Plane Design**
|
||||
|
||||
The primary driver for moving toward a single-binary architecture is the elimination of the overhead associated with inter-process communication (IPC) and separate memory management heaps. In a standard Kubernetes control plane, the kube-apiserver, kube-scheduler, and kube-controller-manager are distinct processes that typically interact via HTTPS or gRPC.2 Even when these processes coexist on a single node, they suffer from context-switching overhead and the duplication of essential libraries and data structures in memory.6
|
||||
A Rust-based implementation allows developers to leverage the ownership model and the Tokio asynchronous runtime to manage these components as internal tasks within a single execution context.8 This approach facilitates zero-copy state sharing between the API server and the scheduler, as they can access shared memory structures protected by fine-grained synchronization primitives like Arc\<RwLock\<T\>\> or message-passing via high-speed asynchronous channels.8 The memory footprint reduction is substantial; whereas a standard control plane might require several gigabytes of RAM to function reliably, consolidated distributions like K3s have demonstrated that a full control plane can operate in under 512 MB of RAM.3 A Rust implementation, free from the non-deterministic pauses of a garbage collector, can further optimize this footprint and provide more predictable tail latencies for API requests.12
|
||||
|
||||
| Architectural Feature | Standard Upstream Kubernetes | Consolidated Rust Control Plane |
|
||||
| :---- | :---- | :---- |
|
||||
| **Binary Structure** | Fragmented (Multiple binaries) | Monolithic (Single binary) |
|
||||
| **Runtime Environment** | High OS dependency (iptables, etc.) | Minimal (Statically linked binary) |
|
||||
| **IPC Mechanism** | Networked gRPC/REST | In-memory async channels |
|
||||
| **Memory Management** | Multi-heap (GC-dependent) | Unified heap (RAII/Ownership) |
|
||||
| **Data Consistency** | External etcd cluster | Embedded Raft with RocksDB |
|
||||
| **Installation** | Complex (PhD in 'clusterology') | Simple (Single command/binary) |
|
||||
|
||||
Source: 1
|
||||
The transition to a single-binary model also simplifies lifecycle management. By embedding the container runtime (e.g., via containerd integration) and network plugins within the same process envelope, the control plane acts as a comprehensive supervisor.1 This "batteries-included" approach ensures that the versions of the scheduler, API server, and storage backend are always in sync, reducing the risk of version mismatch errors that plague distributed installations.5
|
||||
|
||||
## **Integrating Distributed Consensus with OpenRaft and Local Storage**
|
||||
|
||||
A Kubernetes control plane is fundamentally a distributed state machine. To ensure high availability and data integrity in self-hosted environments, the state must be replicated across multiple nodes using a consensus protocol.8 While etcd provides this functionality for standard clusters, its integration as an external service adds significant complexity to single-binary designs.11 The proposed architecture utilizes OpenRaft, a high-performance, asynchronous Raft implementation in Rust, to provide linearizable replication directly within the binary.16
|
||||
|
||||
### **The Mechanics of Embedded Raft**
|
||||
|
||||
Consensus in the proposed system is achieved by replicating an append-only log of state changes. Every API request that modifies a resource (e.g., POST /api/v1/pods) is proposed as a log entry to the Raft leader.18 The consensus module then replicates this entry to a majority of nodes. The quorum $Q$ for a cluster of $N$ nodes is calculated as:
|
||||
|
||||
$$Q \= \\lfloor \\frac{N}{2} \\rfloor \+ 1$$
|
||||
This mathematical guarantee ensures that the cluster can tolerate the failure of up to $N \- Q$ nodes without losing data or compromising consistency.8 Unlike older, tick-based Raft implementations, OpenRaft is event-driven, meaning it only consumes CPU cycles when there are actual state changes or necessary heartbeats, making it ideal for the bursty traffic patterns of a Kubernetes API server.16
|
||||
The technical integration requires implementing several traits provided by the OpenRaft library. The RaftLogStorage trait defines how log entries are persisted to the local disk, while the RaftStateMachine trait defines how those logs are applied to the cluster state.18 By backing these traits with a high-performance local store like RocksDB, the control plane achieves exceptionally high write throughput—benchmarked at over 70,000 writes per second for single writers and millions of writes per second when batching is applied.16
|
||||
|
||||
### **RocksDB as a Replicated Storage Backend**
|
||||
|
||||
RocksDB's log-structured merge-tree (LSM) architecture is a perfect fit for Raft-based replication. Because Raft logs are append-only and frequently truncated after snapshotting, the LSM model's efficiency in sequential writes and background compaction minimizes disk I/O bottlenecks.18
|
||||
|
||||
| Storage Metric | SQLite (K3s Default) | RocksDB (Proposed Rust) |
|
||||
| :---- | :---- | :---- |
|
||||
| **Write Model** | B-Tree / Page-based | LSM-Tree / Append-only |
|
||||
| **Raft Compatibility** | Requires translation layer (Kine) | Native log-structured mapping |
|
||||
| **Throughput** | Moderate | High (Optimized for SSDs) |
|
||||
| **Snapshotting** | File-level copy | Checkpoint / Hard links |
|
||||
| **Concurrency** | Limited (Database-level locks) | High (Iterators and snapshots) |
|
||||
|
||||
Source: 14
|
||||
Snapshotting is an essential feature for preventing the Raft log from growing indefinitely. In this architecture, the control plane periodically captures a point-in-time snapshot of the cluster state and purges the preceding log entries.16 These snapshots can be transferred to new or recovering nodes using a separate "shipping lane" over QUIC or HTTP/2, ensuring that bulk data transfer does not block the low-latency consensus heartbeats required to maintain cluster leadership.22
|
||||
|
||||
## **Advanced Resource Versioning via Jujutsu-Based DAGs**
|
||||
|
||||
A cornerstone of the Kubernetes API is the resourceVersion field, which enables optimistic concurrency control and efficient state-watching for clients like the kubelet and various controllers.24 In conventional implementations, this version is typically a monotonic integer. However, as cluster complexity and the number of concurrent actors increase, a linear versioning model becomes a bottleneck, failing to adequately represent the complex relationships and potential conflicts in a distributed environment.26
|
||||
This research proposes a groundbreaking approach: integrating the Directed Acyclic Graph (DAG) model of the Jujutsu (jj) version control system to manage resource versions.26 By using jj-lib, the control plane can treat every state update not just as a change to a value, but as a commit in a high-performance version graph.27
|
||||
|
||||
### **The Technical Implementation of jj-lib in Kubernetes**
|
||||
|
||||
Integrating jj-lib programmatically involves mapping Kubernetes resource operations to Jujutsu transactions. When the API server receives a request to update a resource, it initiates a transaction in the jj-lib operation log.30 This transaction creates a new commit that points to its parent(s), effectively building a history of the cluster state that is both auditable and reversible.27
|
||||
The "Working-copy-as-a-commit" philosophy of Jujutsu aligns perfectly with the Kubernetes declarative model. In Kubernetes, the "desired state" is submitted to the API, and the system works to converge the "actual state" to match it.28 Using Jujutsu, the desired state can be represented as the current head of a branch, while the reconcile operations performed by the controller manager are recorded as subsequent commits that resolve the "diff" between the desired and actual states.27
|
||||
|
||||
### **Conflict Representation and Resolution**
|
||||
|
||||
One of the most significant advantages of a DAG-based versioning system is its handling of concurrent modifications. In a standard Kubernetes cluster, if two controllers attempt to update the same resource version, the second update fails with a 409 Conflict error, forcing the controller to relist and retry.24
|
||||
Jujutsu, conversely, treats conflicts as first-class objects.27 If two updates occur simultaneously, the API server can record them as a divergent branch in the resource's history. This allows for:
|
||||
|
||||
1. **Deferred Resolution:** The system can continue to operate with a conflicted state, representing the ambiguity to the user or an automated resolver.27
|
||||
2. **Rich Merging:** Instead of a "last writer wins" or a simple rejection, the API server can attempt to merge the two updates using tree-merge algorithms provided by jj-lib.29
|
||||
3. **Implicit History:** Administrators can use the op log to trace back exactly when a conflict occurred and who initiated the competing changes, providing a level of observability far beyond standard audit logs.27
|
||||
|
||||
The resourceVersion returned to the client in this system is the commit ID of the latest node in the DAG. When a client performs a WATCH operation, the API server performs a graph traversal between the client's provided version and the current head, identifying all intervening changes with cryptographic precision.27
|
||||
|
||||
## **Engineering the API Server: Rust Primitives and Full CRD Support**
|
||||
|
||||
The API server is the primary gateway for all cluster interactions. Building a compatible API server in Rust requires a sophisticated assembly of networking, serialization, and validation libraries.24 The architecture leverages Axum for its modular request-handling pipeline and k8s-openapi for its exhaustive collection of Kubernetes type definitions.8
|
||||
|
||||
### **RESTful Interface and Routing**
|
||||
|
||||
Standard Kubernetes API paths (e.g., /api/v1/namespaces/{namespace}/pods/{name}) are mapped to Rust handler functions using Axum's routing macros.39 Because Kubernetes requires strict adherence to its HTTP verb semantics, handlers must be carefully implemented to distinguish between PUT (full replacement), PATCH (strategic merge or JSON merge patch), and POST (creation).24
|
||||
For strategic merge patches, the server-side logic must understand the structure of the resource. The k8s-openapi crate facilitates this by providing the underlying Go-compatible field names and types.38 To support "Server-Side Apply" (SSA), the server utilizes derived "optionable" types—structures where every field is an Option\<T\>, allowing the server to identify exactly which fields were specified in a partial update.41
|
||||
|
||||
### **Strategic Management of Custom Resource Definitions (CRDs)**
|
||||
|
||||
Full support for CRDs is a non-negotiable requirement for modern Kubernetes environments, as they allow for the extension of the API with domain-specific resources.43 In this Rust-based control plane, CRD support is implemented through a dynamic schema engine.
|
||||
When a user submits a CustomResourceDefinition object, the API server:
|
||||
|
||||
1. **Validates the OpenAPI v3 Schema:** The schema is parsed and stored in the replicated state machine.43
|
||||
2. **Registers Dynamic Routes:** The Axum router is updated at runtime to expose new RESTful paths corresponding to the CRD's group, version, and kind.43
|
||||
3. **Enforces Schema Validation:** Subsequent requests to manage custom objects are validated against the stored schema. Rust's serde\_json is used to handle the untyped data, while schemars facilitates the bridge between Rust types and OpenAPI specifications.46
|
||||
|
||||
| Feature | Built-in Resources (Pods, etc.) | Custom Resource Definitions (CRDs) |
|
||||
| :---- | :---- | :---- |
|
||||
| **Type Safety** | Static (Compile-time) | Dynamic (Runtime-validated) |
|
||||
| **Implementation** | k8s-openapi generated structs | Strategic JSON merge over serde\_json |
|
||||
| **Persistence** | Strongly typed Raft entries | Dynamic RawExtension log entries |
|
||||
| **Versioning** | jujutsu-lib DAG commits | jujutsu-lib DAG commits |
|
||||
| **Validation** | Rust's type system \+ field pruning | OpenAPI v3 structural validation |
|
||||
|
||||
Source: 38
|
||||
The API server also performs "field pruning" for custom resources, automatically removing fields not defined in the CRD schema before the data is persisted to the Raft log, ensuring compatibility with standard Kubernetes behavior.43
|
||||
|
||||
## **Developing a Concurrency-Optimized Scheduler in Rust**
|
||||
|
||||
The Kubernetes scheduler is a high-concurrency engine that matches unscheduled Pods to Nodes based on resource availability, constraints, and affinity rules.51 In a Rust-based binary, the scheduler runs as a separate asynchronous task that watches the API server (via internal channels) for Pods with an empty nodeName.51
|
||||
|
||||
### **The Scheduling Framework: Filtering and Scoring**
|
||||
|
||||
The scheduler logic is organized according to the Kubernetes Scheduling Framework, which divides the process into a series of pluggable extension points.51
|
||||
**The Filtering Phase:** The scheduler iterates through all available nodes and applies "predicates" to eliminate those that are unsuitable.51 In this Rust implementation, filtering is highly parallelized using the Rayon or Tokio task-stealing pool, allowing multiple nodes to be evaluated simultaneously.8
|
||||
|
||||
* **PodFitsResources:** Checks if the node's allocatable resources (CPU, memory, storage) minus the currently scheduled Pods' requests are greater than or equal to the new Pod's requests.51
|
||||
* **NodeSelector/Affinity:** Matches the labels of the Pod against the labels of the Node.53
|
||||
* **Taints and Tolerations:** Ensures the Pod can "tolerate" any taints present on the node.53
|
||||
|
||||
**The Scoring Phase:** For the nodes that pass filtering, the scheduler applies "priorities" to rank them.51 The scoring function $S(n, p)$ for node $n$ and pod $p$ is modeled as:
|
||||
|
||||
$$S(n, p) \= \\sum\_{i=1}^{k} \\omega\_i \\cdot \\text{Score}\_i(n, p)$$
|
||||
where $\\omega\_i$ is the weight assigned to the $i$-th scoring plugin.51 Common scoring strategies include MostAllocated (to maximize bin-packing) or BalancedResourceAllocation (to prevent overloading any single resource type like CPU while memory is idle).53
|
||||
|
||||
### **Asynchronous Binding and Pipeline Parallelism**
|
||||
|
||||
To maintain high throughput, the scheduler decouples the placement decision from the actual binding update.56 Once a node is selected, the scheduler records the decision in an internal cache and sends an asynchronous request to the API server to perform the Binding operation.51 This allows the scheduler to proceed to the next Pod in the queue without waiting for the API server's storage round-trip, a technique known as pipeline parallelism.56
|
||||
A Rust-specific optimization involves the use of "snapshots" for the cluster state. The scheduler maintains a local, read-optimized cache of nodes and pods that is updated via an internal watch stream from the API server.47 This prevents the scheduler from needing to lock the entire cluster state for every decision, significantly improving performance in large-scale scheduling bursts.56
|
||||
|
||||
## **Optimized Communication: Protobuf Serialization and Streaming Watches**
|
||||
|
||||
Network efficiency is paramount in self-hosted and edge clusters where bandwidth may be limited and the number of active WATCH connections can be high.24 The proposed control plane utilizes Protobuf serialization for all internal and Kubelet-facing traffic and implements streaming list responses to minimize memory spikes.24
|
||||
|
||||
### **The Protobuf Advantage in Rust**
|
||||
|
||||
While standard Kubernetes defaults to JSON, it supports a high-performance binary encoding based on Protocol Buffers (Protobuf).24 For a Rust control plane, this is implemented using the Prost crate, which generates highly efficient serialization logic.29
|
||||
Protobuf offers a 5-10x speed improvement over JSON and reduces payload sizes by up to 80%.66 This is achieved by:
|
||||
|
||||
1. **Varint Encoding:** Small integers are stored in fewer bytes using variable-length encoding, which is essential for resourceVersion and count fields.69
|
||||
2. **Tag-Value Mapping:** Field names are replaced with numeric tags, eliminating the redundant transmission of keys in every message.63
|
||||
3. **Zero-Copy Deserialization:** Rust's ability to borrow data from the input buffer (&\[u8\]) during deserialization allows for a nearly zero-copy path for complex objects like PodSpecs.66
|
||||
|
||||
| Serialization Format | Payload Size (1MB Struct) | Serialization Speed | Human Readability |
|
||||
| :---- | :---- | :---- | :---- |
|
||||
| **JSON** | \~1.0 MB | 100% (Baseline) | High |
|
||||
| **YAML** | \~1.2 MB | 150% (Slower) | Very High |
|
||||
| **Protobuf** | \~200-400 KB | 10-20% (Much Faster) | Low |
|
||||
|
||||
Source: 63
|
||||
The Kubernetes Protobuf implementation uses a specific envelope format. Every response starts with the 4-byte magic number 0x6b 0x38 0x73 0x00 ("k8s\\x00"), followed by a Unknown message that contains the type metadata and the raw binary data.24 This wrapper allows the API server to serve multiple content types simultaneously while informing the client about the encoding method.24
|
||||
|
||||
### **Streaming List Responses and Watch Optimization**
|
||||
|
||||
A major challenge for Kubernetes API servers is the handling of large LIST requests, which can lead to Out-of-Memory (OOM) failures if the entire collection is serialized into a single buffer before transmission.65
|
||||
The proposed architecture implements the streaming list encoder introduced in Kubernetes v1.33.65 Instead of encoding the entire Items array of a PodList into one contiguous memory block, the server encodes and transmits each Pod individually.65 This allows the underlying HTTP/2 or WebSocket connection to transmit data as soon as it is available, and the memory for individual items can be freed progressively as they are sent over the wire.65 This streaming approach reduces memory usage by up to 20x during large list operations.65
|
||||
For WATCH operations, the API server maintains a per-client buffer of events. By using the Jujutsu DAG, the server can efficiently compute the minimal set of "patches" required to bring a client from an old resourceVersion to the current state, significantly reducing the bandwidth required for watch resyncs.27
|
||||
|
||||
## **Node Connectivity and WebSocket Tunneling**
|
||||
|
||||
Edge clusters often involve nodes located in diverse network environments, where firewalls or NATs prevent the control plane from establishing direct connections to the Kubelet API.7 To address this, the architecture implements a WebSocket-based tunneling mechanism, similar to the one used in K3s.6
|
||||
|
||||
### **Bidirectional Tunneling via Agent Initiation**
|
||||
|
||||
Upon startup, the worker node (agent) initiates an outbound connection to the control plane binary on port 6443\.7 This connection is upgraded from standard HTTPS to a WebSocket tunnel.6 Once established, the connection serves as a bidirectional conduit for all control-plane-to-node traffic.7
|
||||
When an administrator runs kubectl exec, the request flow is as follows:
|
||||
|
||||
1. **API Server:** Receives the request and identifies the target node.78
|
||||
2. **Egress Selector:** Routes the request through the active WebSocket tunnel for that node.75
|
||||
3. **Kubelet:** Receives the multiplexed stream through its local tunnel proxy and interacts with the container runtime (CRI).80
|
||||
4. **Data Stream:** The output from the container is streamed back through the same tunnel to the API server and finally to the client.17
|
||||
|
||||
This approach ensures that the control plane can maintain complete oversight and operational control of worker nodes without requiring complex VPNs or open inbound ports on the edge.2
|
||||
|
||||
## **Kubelet Integration and Node Lifecycle Management**
|
||||
|
||||
The control plane binary must implement the server-side counterparts to the Kubelet's registration and heartbeat mechanisms to maintain an accurate view of the cluster's physical topology.25
|
||||
|
||||
### **Secure Registration and Leases**
|
||||
|
||||
When a Kubelet first starts, it identifies its host environment—using local hostname, overridden flags, or cloud metadata—and sends a registration request to the API server.34 To ensure security in self-hosted environments, registration is governed by a shared "node cluster secret" and a randomly generated, node-specific password.7 The API server stores these passwords as Kubernetes secrets in the kube-system namespace to protect the integrity of node IDs during subsequent connections.7
|
||||
Node health is tracked via the Lease API in the kube-node-lease namespace.25 Kubelets send lightweight lease updates every 10 seconds (the default update interval).25 The control plane's "node controller" monitors these leases. If a node fails to renew its lease within the node-monitor-grace-period (defaulting to 40-50 seconds), the controller updates the node's Ready condition to Unknown and applies taints to prevent the scheduler from assigning new workloads to the failing node.25
|
||||
|
||||
### **Resource Governance and QoS**
|
||||
|
||||
To prevent noisy-neighbor problems and ensure application stability, the control plane enforces resource governance using standard Kubernetes primitives.84
|
||||
|
||||
* **Resource Requests:** Provide a minimum guaranteed reservation of CPU and memory for a Pod.58
|
||||
* **Resource Limits:** Establish a hard cap enforced by the container runtime via cgroups.58
|
||||
* **QoS Classes:** Pods are automatically categorized into Guaranteed, Burstable, or BestEffort tiers based on their request/limit ratio, which determines their eviction priority during node pressure events.58
|
||||
|
||||
The control plane includes a built-in monitoring loop that correlates actual usage data (provided by the Kubelet via cAdvisor metrics) with the configured requests and limits.58 This enables automated "right-sizing" recommendations, allowing administrators to optimize their hardware utilization for self-hosted workloads.85
|
||||
|
||||
## **System Organization and Implementation Strategy**
|
||||
|
||||
The successful development of this unified control plane requires a highly structured project organization that leverages the best of the Rust ecosystem.
|
||||
|
||||
### **Cargo Workspace and Modular Design**
|
||||
|
||||
The project is architected as a Cargo workspace, dividing the code into several specialized crates to improve maintainability and compilation speed.9
|
||||
|
||||
* **core:** Contains the fundamental types and traits for the Kubernetes API and resource management.45
|
||||
* **apiserver:** Implements the REST handlers and the routing pipeline using Axum and Tower.8
|
||||
* **consensus:** Wraps OpenRaft and provides the RaftLogStorage implementation for RocksDB.16
|
||||
* **scheduler:** Implements the Filtering and Scoring framework with parallelized node evaluation.51
|
||||
* **versioning:** Integrates jj-lib to provide DAG-based resource versioning and conflict representation.26
|
||||
|
||||
### **Multi-Architecture Build and Deployment**
|
||||
|
||||
To support self-hosted environments ranging from high-performance x86\_64 servers to aarch64 (ARM) edge devices, the build system is centered around cross-compilation.4 Using tools like cross or Goreleaser with Rust hooks, the project produces statically linked, multi-arch binaries that require zero external dependencies on the target host.5
|
||||
|
||||
| Component | Choice | Reason |
|
||||
| :---- | :---- | :---- |
|
||||
| **Language** | Rust (Edition 2021/2024) | Memory safety, zero-cost abstractions, async performance |
|
||||
| **Consensus Engine** | OpenRaft | Event-driven, optimized for modern async Rust |
|
||||
| **Storage Engine** | RocksDB | LSM-Tree performance, efficient Raft log mapping |
|
||||
| **API Backend** | Axum | Modular middleware, compatible with Tower ecosystem |
|
||||
| **Versioning Library** | jj-lib | Advanced DAG-based state management |
|
||||
| **Serialization** | Prost (Protobuf) | Minimal payload size, high serialization speed |
|
||||
|
||||
Source: 8
|
||||
|
||||
### **Bootstrapping and Automated Management**
|
||||
|
||||
Bootstrapping a new cluster involves running the binary with a \--cluster-init flag, which triggers the generation of self-signed Certificate Authority (CA) certificates valid for 10 years.7 The system also includes an automated manifest manager: administrators can drop standard Kubernetes YAML files into a designated local directory (e.g., /var/lib/k8s/manifests), and the control plane will automatically detect, parse, and apply these resources to the cluster state, facilitating an out-of-the-box GitOps-lite experience.6
|
||||
|
||||
## **Conclusions and Future System Outlook**
|
||||
|
||||
The design and implementation of a Rust-based, single-binary Kubernetes control plane represent a significant evolution in the development of lightweight container orchestrators. By combining the safety and performance of the Rust language with innovative versioning and consensus technologies, this architecture addresses the fundamental trade-offs between API conformance and resource efficiency.
|
||||
The integration of OpenRaft and RocksDB provides a robust foundation for distributed state, achieving throughput and latency characteristics that surpass traditional etcd-backed systems in high-concurrency scenarios. More importantly, the adoption of a DAG-based resource versioning model through jj-lib introduces a paradigm shift in how cluster state is managed, allowing for native conflict representation and sophisticated operational history.
|
||||
The use of bidirectional WebSocket tunnels and Protobuf serialization effectively optimizes the control plane for the challenging network topologies and bandwidth constraints characteristic of edge computing. Furthermore, the ability to serve large results sets through streaming responses ensures that the control plane remains stable even under heavy data pressure, avoiding the OOM failures that plague unoptimized API servers.
|
||||
As the industry moves toward more decentralized and heterogeneous computing environments, the need for a "Marie Kondo" approach to orchestration—eliminating operational bloat while preserving essential functionality—becomes paramount. This unified control plane framework provides the blueprint for a new generation of Kubernetes distributions that are as joyful to operate as they are resilient to failure. Future research should explore the expansion of the scheduler's plugin framework to support increasingly complex inter-workload anti-affinity and hardware-specific locality rules, further bridging the gap between lightweight edge distributions and the sophisticated demands of modern AI-driven workloads.
|
||||
|
||||
#### **Works cited**
|
||||
|
||||
1. K3s \- Lightweight Kubernetes | K3s, accessed on January 28, 2026, [https://docs.k3s.io/](https://docs.k3s.io/)
|
||||
2. K3s vs K8s: Differences, Use Cases & Alternatives | by Spacelift \- Medium, accessed on January 28, 2026, [https://medium.com/spacelift/k3s-vs-k8s-differences-use-cases-alternatives-ffcc134300dc](https://medium.com/spacelift/k3s-vs-k8s-differences-use-cases-alternatives-ffcc134300dc)
|
||||
3. K3s Explained: What is it and How Is It Different From Stock Kubernetes (K8s)?, accessed on January 28, 2026, [https://traefik.io/glossary/k3s-explained](https://traefik.io/glossary/k3s-explained)
|
||||
4. K0s vs K3s vs K8s: Comparing Kubernetes Distributions \- Shipyard.build, accessed on January 28, 2026, [https://shipyard.build/blog/k0s-k3s-k8s/](https://shipyard.build/blog/k0s-k3s-k8s/)
|
||||
5. Understanding k0s: a lightweight Kubernetes distribution for the community | CNCF, accessed on January 28, 2026, [https://www.cncf.io/blog/2024/12/06/understanding-k0s-a-lightweight-kubernetes-distribution-for-the-community/](https://www.cncf.io/blog/2024/12/06/understanding-k0s-a-lightweight-kubernetes-distribution-for-the-community/)
|
||||
6. k3s-io/k3s: Lightweight Kubernetes \- GitHub, accessed on January 28, 2026, [https://github.com/k3s-io/k3s](https://github.com/k3s-io/k3s)
|
||||
7. Architecture \- K3s \- Lightweight Kubernetes, accessed on January 28, 2026, [https://docs.k3s.io/architecture](https://docs.k3s.io/architecture)
|
||||
8. Rust in Distributed Systems, 2025 Edition | by Disant Upadhyay \- Medium, accessed on January 28, 2026, [https://disant.medium.com/rust-in-distributed-systems-2025-edition-175d95f825d6](https://disant.medium.com/rust-in-distributed-systems-2025-edition-175d95f825d6)
|
||||
9. How I build a Rust backend service \- World Without Eng, accessed on January 28, 2026, [https://worldwithouteng.com/articles/how-i-build-a-rust-backend-service](https://worldwithouteng.com/articles/how-i-build-a-rust-backend-service)
|
||||
10. Coding a simple microservices with Rust | by Gene Kuo \- Medium, accessed on January 28, 2026, [https://genekuo.medium.com/coding-a-simple-microservices-with-rust-3fbde8e32adc](https://genekuo.medium.com/coding-a-simple-microservices-with-rust-3fbde8e32adc)
|
||||
11. Part 1 \- K3s Zero To Hero: K3s Kickoff \- Your Lightweight Kubernetes Adventure Begins, accessed on January 28, 2026, [https://blog.alphabravo.io/part1-k3s-kickoff-your-lightweight-kubernetes-adventure-begins/](https://blog.alphabravo.io/part1-k3s-kickoff-your-lightweight-kubernetes-adventure-begins/)
|
||||
12. Why it seems there are more distributed systems written in golang rather in rust? \- Reddit, accessed on January 28, 2026, [https://www.reddit.com/r/rust/comments/1l0rzin/why\_it\_seems\_there\_are\_more\_distributed\_systems/](https://www.reddit.com/r/rust/comments/1l0rzin/why_it_seems_there_are_more_distributed_systems/)
|
||||
13. Is Meilisearch a viable upgrade alternative to OpenSearch? \- Open edX discussions, accessed on January 28, 2026, [https://discuss.openedx.org/t/is-meilisearch-a-viable-upgrade-alternative-to-opensearch/12400](https://discuss.openedx.org/t/is-meilisearch-a-viable-upgrade-alternative-to-opensearch/12400)
|
||||
14. What is K3s? Lightweight Kubernetes for Edge \- Devtron, accessed on January 28, 2026, [https://devtron.ai/what-is-k3s](https://devtron.ai/what-is-k3s)
|
||||
15. Architecture \- Documentation \- K0s docs, accessed on January 28, 2026, [https://docs.k0sproject.io/v0.9.0/architecture/](https://docs.k0sproject.io/v0.9.0/architecture/)
|
||||
16. databendlabs/openraft: rust raft with improvements \- GitHub, accessed on January 28, 2026, [https://github.com/databendlabs/openraft](https://github.com/databendlabs/openraft)
|
||||
17. Everything You Need to Know about K3s: Lightweight Kubernetes for IoT, Edge Computing, Embedded Systems & More \- Mattermost, accessed on January 28, 2026, [https://mattermost.com/blog/intro-to-k3s-lightweight-kubernetes/](https://mattermost.com/blog/intro-to-k3s-lightweight-kubernetes/)
|
||||
18. openraft::docs::getting\_started \- Rust, accessed on January 28, 2026, [https://docs.rs/openraft/latest/openraft/docs/getting\_started/index.html](https://docs.rs/openraft/latest/openraft/docs/getting_started/index.html)
|
||||
19. hiqlite \- crates.io: Rust Package Registry, accessed on January 28, 2026, [https://crates.io/crates/hiqlite](https://crates.io/crates/hiqlite)
|
||||
20. Raftoral — Rust utility // Lib.rs, accessed on January 28, 2026, [https://lib.rs/crates/raftoral](https://lib.rs/crates/raftoral)
|
||||
21. openraft\_rocksstore \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/openraft-rocksstore](https://docs.rs/openraft-rocksstore)
|
||||
22. Octopii \- Turn any Rust struct into a replicated, fault tolerant cluster \- Reddit, accessed on January 28, 2026, [https://www.reddit.com/r/rust/comments/1q5i0tv/octopii\_turn\_any\_rust\_struct\_into\_a\_replicated/](https://www.reddit.com/r/rust/comments/1q5i0tv/octopii_turn_any_rust_struct_into_a_replicated/)
|
||||
23. octopii-rs/octopii: Distributed Systems Kernel written in rust \- GitHub, accessed on January 28, 2026, [https://github.com/octopii-rs/octopii](https://github.com/octopii-rs/octopii)
|
||||
24. Kubernetes API Concepts, accessed on January 28, 2026, [https://kubernetes.io/docs/reference/using-api/api-concepts/](https://kubernetes.io/docs/reference/using-api/api-concepts/)
|
||||
25. Nodes \- Kubernetes, accessed on January 28, 2026, [https://k8s-docs.netlify.app/en/docs/concepts/architecture/nodes/](https://k8s-docs.netlify.app/en/docs/concepts/architecture/nodes/)
|
||||
26. Architecture \- Jujutsu docs, accessed on January 28, 2026, [https://docs.jj-vcs.dev/latest/technical/architecture/](https://docs.jj-vcs.dev/latest/technical/architecture/)
|
||||
27. jj-cli — Rust utility // Lib.rs, accessed on January 28, 2026, [https://lib.rs/crates/jj-cli](https://lib.rs/crates/jj-cli)
|
||||
28. Tech Notes: The Jujutsu version control system \- neugierig.org, accessed on January 28, 2026, [https://neugierig.org/software/blog/2024/12/jujutsu.html](https://neugierig.org/software/blog/2024/12/jujutsu.html)
|
||||
29. jj\_lib \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/jj-lib/latest/jj\_lib/](https://docs.rs/jj-lib/latest/jj_lib/)
|
||||
30. jj\_lib \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/jj-lib/latest/jj\_lib/index.html](https://docs.rs/jj-lib/latest/jj_lib/index.html)
|
||||
31. Jujutsu: A Next Generation Replacement for Git \- Vincent Schmalbach, accessed on January 28, 2026, [https://www.vincentschmalbach.com/jujutsu-a-next-generation-replacement-for-git/](https://www.vincentschmalbach.com/jujutsu-a-next-generation-replacement-for-git/)
|
||||
32. jujutsu-lib \- crates.io: Rust Package Registry, accessed on January 28, 2026, [https://crates.io/crates/jujutsu-lib](https://crates.io/crates/jujutsu-lib)
|
||||
33. jj/CHANGELOG.md at main · jj-vcs/jj \- GitHub, accessed on January 28, 2026, [https://github.com/jj-vcs/jj/blob/main/CHANGELOG.md](https://github.com/jj-vcs/jj/blob/main/CHANGELOG.md)
|
||||
34. What is Kubelet? The K8s Node Agent Explained \- Plural.sh, accessed on January 28, 2026, [https://www.plural.sh/blog/what-is-kubelet-explained/](https://www.plural.sh/blog/what-is-kubelet-explained/)
|
||||
35. Build a Simple Kubernetes Operator in Rust (Rust \+ K8s \= ) \- YouTube, accessed on January 28, 2026, [https://www.youtube.com/watch?v=4wYK8REe9Ro](https://www.youtube.com/watch?v=4wYK8REe9Ro)
|
||||
36. Arnavion/k8s-openapi: Rust definitions of the resource types in the Kubernetes client API, accessed on January 28, 2026, [https://github.com/Arnavion/k8s-openapi](https://github.com/Arnavion/k8s-openapi)
|
||||
37. Using Kubernetes with Rust \- Shuttle.dev, accessed on January 28, 2026, [https://www.shuttle.dev/blog/2024/10/22/using-kubernetes-with-rust](https://www.shuttle.dev/blog/2024/10/22/using-kubernetes-with-rust)
|
||||
38. k8s\_openapi \- Rust, accessed on January 28, 2026, [https://arnavion.github.io/k8s-openapi/v0.18.x/k8s\_openapi/](https://arnavion.github.io/k8s-openapi/v0.18.x/k8s_openapi/)
|
||||
39. Working with OpenAPI using Rust \- Shuttle.dev, accessed on January 28, 2026, [https://www.shuttle.dev/blog/2024/04/04/using-openapi-rust](https://www.shuttle.dev/blog/2024/04/04/using-openapi-rust)
|
||||
40. Creating a REST API in Rust \- Arsh Sharma, accessed on January 28, 2026, [https://arshsharma.com/posts/rust-api/](https://arshsharma.com/posts/rust-api/)
|
||||
41. optionable \- crates.io: Rust Package Registry, accessed on January 28, 2026, [https://crates.io/crates/optionable/0.4.0](https://crates.io/crates/optionable/0.4.0)
|
||||
42. optionable: recursive partial structs/enums \+ kubernetes server-side apply : r/rust \- Reddit, accessed on January 28, 2026, [https://www.reddit.com/r/rust/comments/1pea67v/optionable\_recursive\_partial\_structsenums/](https://www.reddit.com/r/rust/comments/1pea67v/optionable_recursive_partial_structsenums/)
|
||||
43. Extend the Kubernetes API with CustomResourceDefinitions, accessed on January 28, 2026, [https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)
|
||||
44. Extend your Kubernetes APIs with CRDs \- DEV Community, accessed on January 28, 2026, [https://dev.to/litmus-chaos/extend-your-kubernetes-apis-with-crds-4iml](https://dev.to/litmus-chaos/extend-your-kubernetes-apis-with-crds-4iml)
|
||||
45. kube \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/kube/latest/kube/](https://docs.rs/kube/latest/kube/)
|
||||
46. Kubernetes Management with Rust \- A Dive into Generic Client-Go, Controller Abstractions, and CRD Macros with Kube.rs \- Kubesimplify, accessed on January 28, 2026, [https://blog.kubesimplify.com/kubernetes-management-with-rust-a-dive-into-generic-client-go-controller-abstractions-and-crd-macros-with-kubers](https://blog.kubesimplify.com/kubernetes-management-with-rust-a-dive-into-generic-client-go-controller-abstractions-and-crd-macros-with-kubers)
|
||||
47. Architecture \- Kube.rs, accessed on January 28, 2026, [https://kube.rs/architecture/](https://kube.rs/architecture/)
|
||||
48. Writing a Kubernetes Operator \- MetalBear, accessed on January 28, 2026, [https://metalbear.com/blog/writing-a-kubernetes-operator/](https://metalbear.com/blog/writing-a-kubernetes-operator/)
|
||||
49. kube-rs/kube: Rust Kubernetes client and controller runtime \- GitHub, accessed on January 28, 2026, [https://github.com/kube-rs/kube](https://github.com/kube-rs/kube)
|
||||
50. Write Your Next Kubernetes Controller in Rust \- kty, accessed on January 28, 2026, [https://kty.dev/blog/2024-09-30-use-kube-rs](https://kty.dev/blog/2024-09-30-use-kube-rs)
|
||||
51. Kubernetes Scheduler, accessed on January 28, 2026, [https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/)
|
||||
52. Kubernetes Pod Scheduling: Tutorial and Best Practices \- CloudBolt Software, accessed on January 28, 2026, [https://www.cloudbolt.io/kubernetes-pod-scheduling/](https://www.cloudbolt.io/kubernetes-pod-scheduling/)
|
||||
53. Custom Kube-Scheduler: Why And How to Set it Up in Kubernetes \- Cast AI, accessed on January 28, 2026, [https://cast.ai/blog/custom-kube-scheduler-why-and-how-to-set-it-up-in-kubernetes/](https://cast.ai/blog/custom-kube-scheduler-why-and-how-to-set-it-up-in-kubernetes/)
|
||||
54. A Rust controller for Kubernetes \- A Java geek, accessed on January 28, 2026, [https://blog.frankel.ch/start-rust/6/](https://blog.frankel.ch/start-rust/6/)
|
||||
55. acrlabs/kube-scheduler-rs-reference: A reference implementation of a Kubernetes scheduler written in Rust \- GitHub, accessed on January 28, 2026, [https://github.com/acrlabs/kube-scheduler-rs-reference](https://github.com/acrlabs/kube-scheduler-rs-reference)
|
||||
56. Inside kube-scheduler: The Plugin Framework That Powers Kubernetes Scheduling, accessed on January 28, 2026, [https://substack.com/home/post/p-180294019](https://substack.com/home/post/p-180294019)
|
||||
57. Mastering Large Project Organization in Rust | by Leapcell, accessed on January 28, 2026, [https://leapcell.medium.com/mastering-large-project-organization-in-rust-a21d62fb1e8e](https://leapcell.medium.com/mastering-large-project-organization-in-rust-a21d62fb1e8e)
|
||||
58. Kubernetes Optimization: Tutorial and Best Practices \- CloudBolt Software, accessed on January 28, 2026, [https://www.cloudbolt.io/kubernetes-cost-optimization/kubernetes-optimization/](https://www.cloudbolt.io/kubernetes-cost-optimization/kubernetes-optimization/)
|
||||
59. Building a Custom Kubernetes Scheduler Plugin: Scheduling Based on Pod-Specific Node Affinity | by Manjula Piyumal | Stackademic, accessed on January 28, 2026, [https://blog.stackademic.com/building-a-custom-kubernetes-scheduler-plugin-scheduling-based-on-pod-specific-node-affinity-7f66b6c607f9](https://blog.stackademic.com/building-a-custom-kubernetes-scheduler-plugin-scheduling-based-on-pod-specific-node-affinity-7f66b6c607f9)
|
||||
60. Nodes \- Kubernetes, accessed on January 28, 2026, [https://kubernetes.io/docs/concepts/architecture/nodes/](https://kubernetes.io/docs/concepts/architecture/nodes/)
|
||||
61. Optimizing Kubernetes Clusters for Cost & Performance: Part 1 \- Resource Requests, accessed on January 28, 2026, [https://kodekloud.com/blog/optimizing-clusters-for-cost-performance-part-1-resource-requests/](https://kodekloud.com/blog/optimizing-clusters-for-cost-performance-part-1-resource-requests/)
|
||||
62. How we replaced the default K8s scheduler to optimize our Continuous Integration builds, accessed on January 28, 2026, [https://codefresh.io/blog/custom-k8s-scheduler-continuous-integration/](https://codefresh.io/blog/custom-k8s-scheduler-continuous-integration/)
|
||||
63. Protobuf vs JSON: Why More Engineers Are Switching to Protobuf | by Divyam Sharma | Medium, accessed on January 28, 2026, [https://medium.com/@divyamsharma822/protobuf-vs-json-why-more-engineers-are-switching-to-protobuf-e140d4640d8d](https://medium.com/@divyamsharma822/protobuf-vs-json-why-more-engineers-are-switching-to-protobuf-e140d4640d8d)
|
||||
64. Protobuf vs JSON: Performance, Efficiency & API Speed \- Gravitee, accessed on January 28, 2026, [https://www.gravitee.io/blog/protobuf-vs-json](https://www.gravitee.io/blog/protobuf-vs-json)
|
||||
65. Kubernetes v1.33: Streaming List responses, accessed on January 28, 2026, [https://kubernetes.io/blog/2025/05/09/kubernetes-v1-33-streaming-list-responses/](https://kubernetes.io/blog/2025/05/09/kubernetes-v1-33-streaming-list-responses/)
|
||||
66. Fivefold slower compared to Go? Optimizing Rust's protobuf decoding performance | CNCF, accessed on January 28, 2026, [https://www.cncf.io/blog/2024/05/09/fivefold-slower-compared-to-go-optimizing-rusts-protobuf-decoding-performance/](https://www.cncf.io/blog/2024/05/09/fivefold-slower-compared-to-go-optimizing-rusts-protobuf-decoding-performance/)
|
||||
67. kube-rs/k8s-pb: Kubernetes structs from protos and openapi schemas \- GitHub, accessed on January 28, 2026, [https://github.com/kube-rs/k8s-pb](https://github.com/kube-rs/k8s-pb)
|
||||
68. JSON vs. Protocol Buffers in Go: Which Should You Use for Network Communication?, accessed on January 28, 2026, [https://dev.to/jones\_charles\_ad50858dbc0/json-vs-protocol-buffers-in-go-which-should-you-use-for-network-communication-4gio](https://dev.to/jones_charles_ad50858dbc0/json-vs-protocol-buffers-in-go-which-should-you-use-for-network-communication-4gio)
|
||||
69. Encoding | Protocol Buffers Documentation, accessed on January 28, 2026, [https://protobuf.dev/programming-guides/encoding/](https://protobuf.dev/programming-guides/encoding/)
|
||||
70. How Protobuf Works—The Art of Data Encoding \- VictoriaMetrics, accessed on January 28, 2026, [https://victoriametrics.com/blog/go-protobuf/](https://victoriametrics.com/blog/go-protobuf/)
|
||||
71. Protobuf vs. JSON: Choosing the Right Data Format for API Development, accessed on January 28, 2026, [https://www.abstractapi.com/guides/api-glossary/protobuf-vs-json](https://www.abstractapi.com/guides/api-glossary/protobuf-vs-json)
|
||||
72. Beating JSON performance with Protobuf \- Auth0, accessed on January 28, 2026, [https://auth0.com/blog/beating-json-performance-with-protobuf/](https://auth0.com/blog/beating-json-performance-with-protobuf/)
|
||||
73. Protobuf streaming (lazy serialization) API \- Stack Overflow, accessed on January 28, 2026, [https://stackoverflow.com/questions/13242349/protobuf-streaming-lazy-serialization-api](https://stackoverflow.com/questions/13242349/protobuf-streaming-lazy-serialization-api)
|
||||
74. Advanced Options / Configuration \- K3s \- Lightweight Kubernetes, accessed on January 28, 2026, [https://docs.k3s.io/advanced](https://docs.k3s.io/advanced)
|
||||
75. Basic Network Options \- K3s \- Lightweight Kubernetes, accessed on January 28, 2026, [https://docs.k3s.io/networking/basic-network-options](https://docs.k3s.io/networking/basic-network-options)
|
||||
76. A Comprehensive Guide to K3s Architecture and Agent Node Registration \- Medium, accessed on January 28, 2026, [https://medium.com/@thakuravnish2313/a-comprehensive-guide-to-k3s-architecture-and-agent-node-registration-76b3b684b5b2](https://medium.com/@thakuravnish2313/a-comprehensive-guide-to-k3s-architecture-and-agent-node-registration-76b3b684b5b2)
|
||||
77. K3s server \- K3s \- Lightweight Kubernetes, accessed on January 28, 2026, [https://docs.k3s.io/cli/server](https://docs.k3s.io/cli/server)
|
||||
78. Container Runtime Interface streaming explained \- Kubernetes, accessed on January 28, 2026, [https://kubernetes.io/blog/2024/05/01/cri-streaming-explained/](https://kubernetes.io/blog/2024/05/01/cri-streaming-explained/)
|
||||
79. How to Deploy Rust Applications to Kubernetes \- Devtron, accessed on January 28, 2026, [https://devtron.ai/blog/how-to-deploy-rust-applications-to-kubernetes/](https://devtron.ai/blog/how-to-deploy-rust-applications-to-kubernetes/)
|
||||
80. Interaction Process Between Kubelet, CRI, and CNI in Kubernetes | by Rifewang \- Medium, accessed on January 28, 2026, [https://medium.com/@rifewang/interaction-process-between-kubelet-cri-and-cni-in-kubernetes-034c64c32149](https://medium.com/@rifewang/interaction-process-between-kubelet-cri-and-cni-in-kubernetes-034c64c32149)
|
||||
81. What is K3s? A Quick Installation Guide for K3s \- Devtron, accessed on January 28, 2026, [https://devtron.ai/blog/what-is-k3s-a-quick-installation-guide-for-k3s/](https://devtron.ai/blog/what-is-k3s-a-quick-installation-guide-for-k3s/)
|
||||
82. kubelet | Kubernetes, accessed on January 28, 2026, [https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
|
||||
83. A Brief Overview of the Kubernetes Node Lifecycle | by Rifewang \- Medium, accessed on January 28, 2026, [https://medium.com/@rifewang/a-brief-overview-of-the-kubernetes-node-lifecycle-bde9ce547852](https://medium.com/@rifewang/a-brief-overview-of-the-kubernetes-node-lifecycle-bde9ce547852)
|
||||
84. How to Optimize Container Resources in Kubernetes? \- Zesty, accessed on January 28, 2026, [https://zesty.co/finops-academy/kubernetes/how-to-optimize-container-resources/](https://zesty.co/finops-academy/kubernetes/how-to-optimize-container-resources/)
|
||||
85. Kubernetes Resource Optimization: 5 Proven Strategies for 2025 \- ScaleOps, accessed on January 28, 2026, [https://scaleops.com/blog/5-kubernetes-resource-optimization-strategies-that-work-in-production/](https://scaleops.com/blog/5-kubernetes-resource-optimization-strategies-that-work-in-production/)
|
||||
86. robusta-dev/krr: Prometheus-based Kubernetes Resource Recommendations \- GitHub, accessed on January 28, 2026, [https://github.com/robusta-dev/krr](https://github.com/robusta-dev/krr)
|
||||
87. Self-generation of \`.rules\`/\`AGENT.md\` · zed-industries zed · Discussion \#35534 · GitHub, accessed on January 28, 2026, [https://github.com/zed-industries/zed/discussions/35534](https://github.com/zed-industries/zed/discussions/35534)
|
||||
88. Documentation for the rust-axum Generator, accessed on January 28, 2026, [https://openapi-generator.tech/docs/generators/rust-axum/](https://openapi-generator.tech/docs/generators/rust-axum/)
|
||||
89. How To Make Rust Multi-Arch Release Easy \- Qovery, accessed on January 28, 2026, [https://www.qovery.com/blog/how-to-make-rust-multi-arch-release-easy](https://www.qovery.com/blog/how-to-make-rust-multi-arch-release-easy)
|
||||
90. k8s\_openapi \- Rust \- Docs.rs, accessed on January 28, 2026, [https://docs.rs/k8s-openapi](https://docs.rs/k8s-openapi)
|
||||
Loading…
Add table
Reference in a new issue