feat: migrate from raw SQL to SeaORM migrations

Replace raw SQL CREATE TABLE statements with proper SeaORM migration
system. This eliminates verbose SQL logs on startup and provides
proper migration tracking and rollback support.

Changes:
- Add sea-orm-migration dependency and migration crate
- Create initial migration (m20250101_000001) with all 8 tables
- Update storage::init() to only connect to database
- Run migrations automatically in main.rs on startup
- Remove unused detect_backend() function and imports

The migration system properly handles both SQLite and PostgreSQL
backends with appropriate type handling (e.g., BIGSERIAL vs INTEGER
for auto-increment columns).

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Till Wegmueller 2025-12-02 21:42:58 +01:00
parent 2a865b2ba4
commit b6bf4ceee0
No known key found for this signature in database
7 changed files with 521 additions and 178 deletions

91
Cargo.lock generated
View file

@ -464,12 +464,14 @@ dependencies = [
"config", "config",
"josekit", "josekit",
"miette", "miette",
"migration",
"oauth2", "oauth2",
"openidconnect", "openidconnect",
"rand 0.8.5", "rand 0.8.5",
"regex", "regex",
"reqwest", "reqwest",
"sea-orm", "sea-orm",
"sea-orm-migration",
"seaography", "seaography",
"serde", "serde",
"serde_json", "serde_json",
@ -1492,6 +1494,12 @@ version = "0.32.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
[[package]]
name = "glob"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
[[package]] [[package]]
name = "governor" name = "governor"
version = "0.6.3" version = "0.6.3"
@ -2197,6 +2205,14 @@ dependencies = [
"syn 2.0.111", "syn 2.0.111",
] ]
[[package]]
name = "migration"
version = "0.1.0"
dependencies = [
"sea-orm",
"sea-orm-migration",
]
[[package]] [[package]]
name = "mime" name = "mime"
version = "0.3.17" version = "0.3.17"
@ -3420,6 +3436,25 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "sea-orm-cli"
version = "1.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c94492e2ab6c045b4cc38013809ce255d14c3d352c9f0d11e6b920e2adc948ad"
dependencies = [
"chrono",
"clap",
"dotenvy",
"glob",
"regex",
"sea-schema",
"sqlx",
"tokio",
"tracing",
"tracing-subscriber",
"url",
]
[[package]] [[package]]
name = "sea-orm-macros" name = "sea-orm-macros"
version = "1.1.19" version = "1.1.19"
@ -3435,6 +3470,22 @@ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]]
name = "sea-orm-migration"
version = "1.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7315c0cadb7e60fb17ee2bb282aa27d01911fc2a7e5836ec1d4ac37d19250bb4"
dependencies = [
"async-trait",
"clap",
"dotenvy",
"sea-orm",
"sea-orm-cli",
"sea-schema",
"tracing",
"tracing-subscriber",
]
[[package]] [[package]]
name = "sea-query" name = "sea-query"
version = "0.32.7" version = "0.32.7"
@ -3445,6 +3496,7 @@ dependencies = [
"inherent", "inherent",
"ordered-float 4.6.0", "ordered-float 4.6.0",
"rust_decimal", "rust_decimal",
"sea-query-derive",
"serde_json", "serde_json",
"uuid", "uuid",
] ]
@ -3463,6 +3515,45 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "sea-query-derive"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bae0cbad6ab996955664982739354128c58d16e126114fe88c2a493642502aab"
dependencies = [
"darling 0.20.11",
"heck 0.4.1",
"proc-macro2",
"quote",
"syn 2.0.111",
"thiserror 2.0.17",
]
[[package]]
name = "sea-schema"
version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2239ff574c04858ca77485f112afea1a15e53135d3097d0c86509cef1def1338"
dependencies = [
"futures",
"sea-query",
"sea-query-binder",
"sea-schema-derive",
"sqlx",
]
[[package]]
name = "sea-schema-derive"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "debdc8729c37fdbf88472f97fd470393089f997a909e535ff67c544d18cfccf0"
dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
"syn 2.0.111",
]
[[package]] [[package]]
name = "seahash" name = "seahash"
version = "4.1.0" version = "4.1.0"

View file

@ -25,6 +25,8 @@ serde_with = "3"
# SeaORM for SQLite and PostgreSQL # SeaORM for SQLite and PostgreSQL
sea-orm = { version = "1", default-features = false, features = ["sqlx-sqlite", "sqlx-postgres", "runtime-tokio-rustls", "macros"] } sea-orm = { version = "1", default-features = false, features = ["sqlx-sqlite", "sqlx-postgres", "runtime-tokio-rustls", "macros"] }
sea-orm-migration = { version = "1", features = ["sqlx-sqlite", "sqlx-postgres", "runtime-tokio-rustls"] }
migration = { path = "migration" }
# JOSE / JWKS & JWT # JOSE / JWKS & JWT
josekit = "0.10" josekit = "0.10"

17
migration/Cargo.toml Normal file
View file

@ -0,0 +1,17 @@
[package]
name = "migration"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
name = "migration"
path = "src/lib.rs"
[dependencies]
sea-orm-migration = { version = "1", features = ["sqlx-sqlite", "sqlx-postgres", "runtime-tokio-rustls"] }
[dependencies.sea-orm]
version = "1"
features = ["sqlx-sqlite", "sqlx-postgres", "runtime-tokio-rustls", "macros"]
default-features = false

12
migration/src/lib.rs Normal file
View file

@ -0,0 +1,12 @@
pub use sea_orm_migration::prelude::*;
mod m20250101_000001_initial_schema;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20250101_000001_initial_schema::Migration)]
}
}

View file

@ -0,0 +1,393 @@
use sea_orm_migration::{prelude::*, schema::*};
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Enable foreign keys for SQLite
if manager.get_database_backend() == sea_orm::DatabaseBackend::Sqlite {
manager
.get_connection()
.execute_unprepared("PRAGMA foreign_keys = ON")
.await?;
}
// Create clients table
manager
.create_table(
Table::create()
.table(Clients::Table)
.if_not_exists()
.col(
ColumnDef::new(Clients::ClientId)
.string()
.not_null()
.primary_key(),
)
.col(string(Clients::ClientSecret))
.col(string_null(Clients::ClientName))
.col(string(Clients::RedirectUris))
.col(big_integer(Clients::CreatedAt))
.to_owned(),
)
.await?;
// Create properties table
manager
.create_table(
Table::create()
.table(Properties::Table)
.if_not_exists()
.col(string(Properties::Owner))
.col(string(Properties::Key))
.col(string(Properties::Value))
.col(big_integer(Properties::UpdatedAt))
.primary_key(Index::create().col(Properties::Owner).col(Properties::Key))
.to_owned(),
)
.await?;
// Create auth_codes table
manager
.create_table(
Table::create()
.table(AuthCodes::Table)
.if_not_exists()
.col(
ColumnDef::new(AuthCodes::Code)
.string()
.not_null()
.primary_key(),
)
.col(string(AuthCodes::ClientId))
.col(string(AuthCodes::RedirectUri))
.col(string(AuthCodes::Scope))
.col(string(AuthCodes::Subject))
.col(string_null(AuthCodes::Nonce))
.col(string(AuthCodes::CodeChallenge))
.col(string(AuthCodes::CodeChallengeMethod))
.col(big_integer(AuthCodes::CreatedAt))
.col(big_integer(AuthCodes::ExpiresAt))
.col(
ColumnDef::new(AuthCodes::Consumed)
.big_integer()
.not_null()
.default(0),
)
.col(big_integer_null(AuthCodes::AuthTime))
.to_owned(),
)
.await?;
// Create access_tokens table
manager
.create_table(
Table::create()
.table(AccessTokens::Table)
.if_not_exists()
.col(
ColumnDef::new(AccessTokens::Token)
.string()
.not_null()
.primary_key(),
)
.col(string(AccessTokens::ClientId))
.col(string(AccessTokens::Subject))
.col(string(AccessTokens::Scope))
.col(big_integer(AccessTokens::CreatedAt))
.col(big_integer(AccessTokens::ExpiresAt))
.col(
ColumnDef::new(AccessTokens::Revoked)
.big_integer()
.not_null()
.default(0),
)
.to_owned(),
)
.await?;
// Create users table
manager
.create_table(
Table::create()
.table(Users::Table)
.if_not_exists()
.col(
ColumnDef::new(Users::Subject)
.string()
.not_null()
.primary_key(),
)
.col(
ColumnDef::new(Users::Username)
.string()
.not_null()
.unique_key(),
)
.col(string(Users::PasswordHash))
.col(string_null(Users::Email))
.col(
ColumnDef::new(Users::EmailVerified)
.big_integer()
.not_null()
.default(0),
)
.col(big_integer(Users::CreatedAt))
.col(
ColumnDef::new(Users::Enabled)
.big_integer()
.not_null()
.default(1),
)
.to_owned(),
)
.await?;
// Create sessions table
manager
.create_table(
Table::create()
.table(Sessions::Table)
.if_not_exists()
.col(
ColumnDef::new(Sessions::SessionId)
.string()
.not_null()
.primary_key(),
)
.col(string(Sessions::Subject))
.col(big_integer(Sessions::AuthTime))
.col(big_integer(Sessions::CreatedAt))
.col(big_integer(Sessions::ExpiresAt))
.col(string_null(Sessions::UserAgent))
.col(string_null(Sessions::IpAddress))
.to_owned(),
)
.await?;
// Create index on sessions.expires_at
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_sessions_expires")
.table(Sessions::Table)
.col(Sessions::ExpiresAt)
.to_owned(),
)
.await?;
// Create refresh_tokens table
manager
.create_table(
Table::create()
.table(RefreshTokens::Table)
.if_not_exists()
.col(
ColumnDef::new(RefreshTokens::Token)
.string()
.not_null()
.primary_key(),
)
.col(string(RefreshTokens::ClientId))
.col(string(RefreshTokens::Subject))
.col(string(RefreshTokens::Scope))
.col(big_integer(RefreshTokens::CreatedAt))
.col(big_integer(RefreshTokens::ExpiresAt))
.col(
ColumnDef::new(RefreshTokens::Revoked)
.big_integer()
.not_null()
.default(0),
)
.col(string_null(RefreshTokens::ParentToken))
.to_owned(),
)
.await?;
// Create index on refresh_tokens.expires_at
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_refresh_tokens_expires")
.table(RefreshTokens::Table)
.col(RefreshTokens::ExpiresAt)
.to_owned(),
)
.await?;
// Create job_executions table with backend-specific ID type
let id_col = match manager.get_database_backend() {
sea_orm::DatabaseBackend::Postgres => ColumnDef::new(JobExecutions::Id)
.big_integer()
.not_null()
.auto_increment()
.primary_key()
.to_owned(),
_ => ColumnDef::new(JobExecutions::Id)
.integer()
.not_null()
.auto_increment()
.primary_key()
.to_owned(),
};
manager
.create_table(
Table::create()
.table(JobExecutions::Table)
.if_not_exists()
.col(id_col)
.col(string(JobExecutions::JobName))
.col(big_integer(JobExecutions::StartedAt))
.col(big_integer_null(JobExecutions::CompletedAt))
.col(big_integer_null(JobExecutions::Success))
.col(string_null(JobExecutions::ErrorMessage))
.col(big_integer_null(JobExecutions::RecordsProcessed))
.to_owned(),
)
.await?;
// Create index on job_executions.started_at
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_job_executions_started")
.table(JobExecutions::Table)
.col(JobExecutions::StartedAt)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(JobExecutions::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(RefreshTokens::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Sessions::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Users::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(AccessTokens::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(AuthCodes::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Properties::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Clients::Table).to_owned())
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum Clients {
Table,
ClientId,
ClientSecret,
ClientName,
RedirectUris,
CreatedAt,
}
#[derive(DeriveIden)]
enum Properties {
Table,
Owner,
Key,
Value,
UpdatedAt,
}
#[derive(DeriveIden)]
enum AuthCodes {
Table,
Code,
ClientId,
RedirectUri,
Scope,
Subject,
Nonce,
CodeChallenge,
CodeChallengeMethod,
CreatedAt,
ExpiresAt,
Consumed,
AuthTime,
}
#[derive(DeriveIden)]
enum AccessTokens {
Table,
Token,
ClientId,
Subject,
Scope,
CreatedAt,
ExpiresAt,
Revoked,
}
#[derive(DeriveIden)]
enum Users {
Table,
Subject,
Username,
PasswordHash,
Email,
EmailVerified,
CreatedAt,
Enabled,
}
#[derive(DeriveIden)]
enum Sessions {
Table,
SessionId,
Subject,
AuthTime,
CreatedAt,
ExpiresAt,
UserAgent,
IpAddress,
}
#[derive(DeriveIden)]
enum RefreshTokens {
Table,
Token,
ClientId,
Subject,
Scope,
CreatedAt,
ExpiresAt,
Revoked,
ParentToken,
}
#[derive(DeriveIden)]
enum JobExecutions {
Table,
Id,
JobName,
StartedAt,
CompletedAt,
Success,
ErrorMessage,
RecordsProcessed,
}

View file

@ -12,6 +12,7 @@ mod web;
use clap::Parser; use clap::Parser;
use miette::{IntoDiagnostic, Result}; use miette::{IntoDiagnostic, Result};
use sea_orm_migration::MigratorTrait;
use tracing_subscriber::{fmt, EnvFilter}; use tracing_subscriber::{fmt, EnvFilter};
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
@ -54,6 +55,10 @@ async fn main() -> Result<()> {
// init storage (database) // init storage (database)
let db = storage::init(&settings.database).await?; let db = storage::init(&settings.database).await?;
// run migrations
migration::Migrator::up(&db, None).await.into_diagnostic()?;
tracing::info!("Database migrations applied successfully");
// Handle subcommands // Handle subcommands
match cli.command { match cli.command {
Some(Command::SyncUsers { file }) => { Some(Command::SyncUsers { file }) => {

View file

@ -5,8 +5,7 @@ use base64ct::Encoding;
use chrono::Utc; use chrono::Utc;
use rand::RngCore; use rand::RngCore;
use sea_orm::{ use sea_orm::{
ActiveModelTrait, ActiveValue::NotSet, ColumnTrait, ConnectionTrait, Database, ActiveModelTrait, ColumnTrait, Database, DatabaseConnection, EntityTrait, QueryFilter, Set,
DatabaseConnection, DbBackend, EntityTrait, QueryFilter, Set, Statement,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
@ -87,184 +86,8 @@ pub struct RefreshToken {
pub parent_token: Option<String>, // For token rotation tracking pub parent_token: Option<String>, // For token rotation tracking
} }
fn detect_backend(url: &str) -> DbBackend {
if url.starts_with("postgres://") || url.starts_with("postgresql://") {
DbBackend::Postgres
} else {
DbBackend::Sqlite
}
}
pub async fn init(cfg: &DbCfg) -> Result<DatabaseConnection, CrabError> { pub async fn init(cfg: &DbCfg) -> Result<DatabaseConnection, CrabError> {
let db = Database::connect(&cfg.url).await?; let db = Database::connect(&cfg.url).await?;
let backend = detect_backend(&cfg.url);
// bootstrap schema
// Enable foreign keys for SQLite only
if backend == DbBackend::Sqlite {
db.execute(Statement::from_string(
DbBackend::Sqlite,
"PRAGMA foreign_keys = ON",
))
.await?;
}
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS clients (
client_id TEXT PRIMARY KEY,
client_secret TEXT NOT NULL,
client_name TEXT,
redirect_uris TEXT NOT NULL,
created_at BIGINT NOT NULL
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS properties (
owner TEXT NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
updated_at BIGINT NOT NULL,
PRIMARY KEY(owner, key)
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS auth_codes (
code TEXT PRIMARY KEY,
client_id TEXT NOT NULL,
redirect_uri TEXT NOT NULL,
scope TEXT NOT NULL,
subject TEXT NOT NULL,
nonce TEXT,
code_challenge TEXT NOT NULL,
code_challenge_method TEXT NOT NULL,
created_at BIGINT NOT NULL,
expires_at BIGINT NOT NULL,
consumed BIGINT NOT NULL DEFAULT 0,
auth_time BIGINT
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS access_tokens (
token TEXT PRIMARY KEY,
client_id TEXT NOT NULL,
subject TEXT NOT NULL,
scope TEXT NOT NULL,
created_at BIGINT NOT NULL,
expires_at BIGINT NOT NULL,
revoked BIGINT NOT NULL DEFAULT 0
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS users (
subject TEXT PRIMARY KEY,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
email TEXT,
email_verified BIGINT NOT NULL DEFAULT 0,
created_at BIGINT NOT NULL,
enabled BIGINT NOT NULL DEFAULT 1
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS sessions (
session_id TEXT PRIMARY KEY,
subject TEXT NOT NULL,
auth_time BIGINT NOT NULL,
created_at BIGINT NOT NULL,
expires_at BIGINT NOT NULL,
user_agent TEXT,
ip_address TEXT
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
"CREATE INDEX IF NOT EXISTS idx_sessions_expires ON sessions(expires_at)",
))
.await?;
db.execute(Statement::from_string(
backend,
r#"
CREATE TABLE IF NOT EXISTS refresh_tokens (
token TEXT PRIMARY KEY,
client_id TEXT NOT NULL,
subject TEXT NOT NULL,
scope TEXT NOT NULL,
created_at BIGINT NOT NULL,
expires_at BIGINT NOT NULL,
revoked BIGINT NOT NULL DEFAULT 0,
parent_token TEXT
)
"#,
))
.await?;
db.execute(Statement::from_string(
backend,
"CREATE INDEX IF NOT EXISTS idx_refresh_tokens_expires ON refresh_tokens(expires_at)",
))
.await?;
// Job executions table for tracking background job runs
let id_type = match backend {
DbBackend::Postgres => "BIGSERIAL PRIMARY KEY",
_ => "INTEGER PRIMARY KEY AUTOINCREMENT",
};
db.execute(Statement::from_string(
backend,
format!(
r#"
CREATE TABLE IF NOT EXISTS job_executions (
id {},
job_name TEXT NOT NULL,
started_at BIGINT NOT NULL,
completed_at BIGINT,
success BIGINT,
error_message TEXT,
records_processed BIGINT
)
"#,
id_type
),
))
.await?;
db.execute(Statement::from_string(
backend,
"CREATE INDEX IF NOT EXISTS idx_job_executions_started ON job_executions(started_at)",
))
.await?;
Ok(db) Ok(db)
} }