New yaak-databases crate for shared core logic

This commit is contained in:
Gregory Schier
2026-03-08 08:10:37 -07:00
parent 3586c8fe24
commit cf28229f5f
12 changed files with 219 additions and 44 deletions

View File

@@ -0,0 +1,16 @@
[package]
name = "yaak-database"
version = "0.1.0"
edition = "2024"
publish = false
[dependencies]
include_dir = "0.7"
log = { workspace = true }
nanoid = "0.4.0"
r2d2 = "0.8.10"
r2d2_sqlite = { version = "0.25.0" }
rusqlite = { version = "0.32.1", features = ["bundled", "chrono"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
thiserror = { workspace = true }

View File

@@ -0,0 +1,25 @@
use r2d2::PooledConnection;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::{Connection, Statement, ToSql, Transaction};
pub enum ConnectionOrTx<'a> {
Connection(PooledConnection<SqliteConnectionManager>),
Transaction(&'a Transaction<'a>),
}
impl<'a> ConnectionOrTx<'a> {
pub fn resolve(&self) -> &Connection {
match self {
ConnectionOrTx::Connection(c) => c,
ConnectionOrTx::Transaction(c) => c,
}
}
pub fn prepare(&self, sql: &str) -> rusqlite::Result<Statement<'_>> {
self.resolve().prepare(sql)
}
pub fn execute(&self, sql: &str, params: &[&dyn ToSql]) -> rusqlite::Result<usize> {
self.resolve().execute(sql, params)
}
}

View File

@@ -0,0 +1,37 @@
use serde::{Serialize, Serializer};
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error("SQL error: {0}")]
SqlError(#[from] rusqlite::Error),
#[error("SQL Pool error: {0}")]
SqlPoolError(#[from] r2d2::Error),
#[error("Database error: {0}")]
Database(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("JSON error: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Model not found: {0}")]
ModelNotFound(String),
#[error("DB Migration Failed: {0}")]
MigrationError(String),
}
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_ref())
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -0,0 +1,15 @@
pub mod connection_or_tx;
pub mod error;
pub mod migrate;
pub mod util;
// Re-export key types for convenience
pub use connection_or_tx::ConnectionOrTx;
pub use error::{Error, Result};
pub use migrate::run_migrations;
pub use util::{generate_id, generate_id_of_length, generate_prefixed_id};
// Re-export pool types that consumers will need
pub use r2d2;
pub use r2d2_sqlite;
pub use rusqlite;

View File

@@ -0,0 +1,81 @@
use crate::error::Result;
use include_dir::Dir;
use log::{debug, info};
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::{OptionalExtension, params};
const TRACKING_TABLE: &str = "_sqlx_migrations";
/// Run SQL migrations from an embedded directory.
///
/// Migrations are sorted by filename (use timestamp prefixes like `00000001_init.sql`).
/// Applied migrations are tracked in `_sqlx_migrations`.
pub fn run_migrations(pool: &Pool<SqliteConnectionManager>, dir: &Dir<'_>) -> Result<()> {
info!("Running migrations");
// Create tracking table
pool.get()?.execute(
&format!(
"CREATE TABLE IF NOT EXISTS {TRACKING_TABLE} (
version TEXT PRIMARY KEY,
description TEXT NOT NULL,
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL
)"
),
[],
)?;
// Read and sort all .sql files
let mut entries: Vec<_> = dir
.entries()
.iter()
.filter(|e| e.path().extension().map(|ext| ext == "sql").unwrap_or(false))
.collect();
entries.sort_by_key(|e| e.path());
let mut ran_migrations = 0;
for entry in &entries {
let filename = entry.path().file_name().unwrap().to_str().unwrap();
let version = filename.split('_').next().unwrap();
// Check if already applied
let already_applied: Option<i64> = pool
.get()?
.query_row(
&format!("SELECT 1 FROM {TRACKING_TABLE} WHERE version = ?"),
[version],
|r| r.get(0),
)
.optional()?;
if already_applied.is_some() {
debug!("Skipping already applied migration: {}", filename);
continue;
}
let sql =
entry.as_file().unwrap().contents_utf8().expect("Failed to read migration file");
info!("Applying migration: {}", filename);
let conn = pool.get()?;
conn.execute_batch(sql)?;
// Record migration
conn.execute(
&format!("INSERT INTO {TRACKING_TABLE} (version, description) VALUES (?, ?)"),
params![version, filename],
)?;
ran_migrations += 1;
}
if ran_migrations == 0 {
info!("No migrations to run");
} else {
info!("Ran {} migration(s)", ran_migrations);
}
Ok(())
}

View File

@@ -0,0 +1,20 @@
use nanoid::nanoid;
pub fn generate_prefixed_id(prefix: &str) -> String {
format!("{prefix}_{}", generate_id())
}
pub fn generate_id() -> String {
generate_id_of_length(10)
}
pub fn generate_id_of_length(n: usize) -> String {
let alphabet: [char; 57] = [
'2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A',
'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z',
];
nanoid!(n, &alphabet)
}