Database access refactor (#190)

This commit is contained in:
Gregory Schier
2025-03-25 08:35:10 -07:00
committed by GitHub
parent 445c30f3a9
commit 1d37d46130
72 changed files with 4895 additions and 4702 deletions

View File

@@ -0,0 +1,24 @@
use crate::error::Result;
use crate::manager::QueryManagerExt;
use crate::models::AnyModel;
use crate::queries_legacy::UpdateSource;
use tauri::{Runtime, WebviewWindow};
#[tauri::command]
pub(crate) async fn upsert<R: Runtime>(
window: WebviewWindow<R>,
model: AnyModel,
) -> Result<String> {
let queries = window.queries().connect().await?;
let id = match model {
AnyModel::HttpRequest(r) => queries.upsert(&r, &UpdateSource::from_window(&window))?.id,
_ => todo!(),
};
Ok(id)
}
#[tauri::command]
pub(crate) fn delete() -> Result<()> {
Ok(())
}

View File

@@ -1,3 +1,4 @@
use serde::{Serialize, Serializer};
use thiserror::Error;
#[derive(Error, Debug)]
@@ -11,11 +12,29 @@ pub enum Error {
#[error("JSON error: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Model not found {0}")]
#[error("Model not found: {0}")]
ModelNotFound(String),
#[error("Model serialization error: {0}")]
ModelSerializationError(String),
#[error("Model error: {0}")]
GenericError(String),
#[error("Row not found")]
RowNotFound,
#[error("unknown error")]
Unknown,
}
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_ref())
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -1,7 +1,98 @@
use crate::commands::{delete, upsert};
use crate::manager::QueryManager;
use crate::queries_legacy::ModelChangeEvent;
use log::info;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use sqlx::migrate::Migrator;
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::SqlitePool;
use std::fs::create_dir_all;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use tauri::async_runtime::Mutex;
use tauri::path::BaseDirectory;
use tauri::plugin::TauriPlugin;
use tauri::{generate_handler, AppHandle, Emitter, Manager, Runtime};
use tokio::sync::mpsc;
mod commands;
pub mod error;
pub mod manager;
pub mod models;
pub mod queries;
pub mod plugin;
pub mod queries_legacy;
pub mod render;
pub mod manager;
pub struct SqliteConnection(pub Mutex<Pool<SqliteConnectionManager>>);
impl SqliteConnection {
pub(crate) fn new(pool: Pool<SqliteConnectionManager>) -> Self {
Self(Mutex::new(pool))
}
}
pub fn init<R: Runtime>() -> TauriPlugin<R> {
tauri::plugin::Builder::new("yaak_models")
.invoke_handler(generate_handler![upsert, delete])
.setup(|app_handle, _api| {
let app_path = app_handle.path().app_data_dir().unwrap();
create_dir_all(app_path.clone()).expect("Problem creating App directory!");
let db_file_path = app_path.join("db.sqlite");
{
let db_file_path = db_file_path.clone();
tauri::async_runtime::block_on(async move {
must_migrate_db(app_handle.app_handle(), &db_file_path).await;
});
};
let manager = SqliteConnectionManager::file(db_file_path);
let pool = Pool::builder()
.max_size(100) // Up from 10 (just in case)
.connection_timeout(Duration::from_secs(10)) // Down from 30
.build(manager)
.unwrap();
app_handle.manage(SqliteConnection::new(pool.clone()));
{
let (tx, mut rx) = mpsc::channel(128);
app_handle.manage(QueryManager::new(pool, tx));
let app_handle = app_handle.clone();
tauri::async_runtime::spawn(async move {
while let Some(p) = rx.recv().await {
let name = match p.change {
ModelChangeEvent::Upsert => "upserted_model",
ModelChangeEvent::Delete => "deleted_model",
};
app_handle.emit(name, p).unwrap();
}
});
}
Ok(())
})
.build()
}
async fn must_migrate_db<R: Runtime>(app_handle: &AppHandle<R>, sqlite_file_path: &PathBuf) {
info!("Connecting to database at {sqlite_file_path:?}");
let sqlite_file_path = sqlite_file_path.to_str().unwrap().to_string();
let opts = SqliteConnectOptions::from_str(&sqlite_file_path).unwrap().create_if_missing(true);
let pool = SqlitePool::connect_with(opts).await.expect("Failed to connect to database");
let p = app_handle
.path()
.resolve("migrations", BaseDirectory::Resource)
.expect("failed to resolve resource");
info!("Running database migrations from: {}", p.to_string_lossy());
let mut m = Migrator::new(p).await.expect("Failed to load migrations");
m.set_ignore_missing(true); // So we can roll back versions and not crash
m.run(&pool).await.expect("Failed to run migrations");
info!("Database migrations complete");
}

View File

@@ -1,40 +1,108 @@
use crate::error::Result;
use crate::models::{Workspace, WorkspaceIden};
use crate::plugin::SqliteConnection;
use crate::queries_legacy::ModelPayload;
use r2d2::{Pool, PooledConnection};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::Connection;
use sea_query::{Asterisk, Order, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
use std::future::Future;
use std::ops::Deref;
use tauri::{AppHandle, Manager, Runtime};
use rusqlite::{Connection, Statement, ToSql, Transaction, TransactionBehavior};
use std::sync::Arc;
use tauri::{Manager, Runtime};
use tokio::sync::{mpsc, Mutex};
pub struct QueryManager {
pool: Pool<SqliteConnectionManager>,
pub trait QueryManagerExt<'a, R> {
fn queries(&'a self) -> &'a QueryManager;
}
pub trait DBConnection {
fn connect(
&self,
) -> impl Future<Output = Result<PooledConnection<SqliteConnectionManager>>> + Send;
}
impl<R: Runtime> DBConnection for AppHandle<R> {
async fn connect(&self) -> Result<PooledConnection<SqliteConnectionManager>> {
let dbm = &*self.state::<SqliteConnection>();
let db = dbm.0.lock().await.get()?;
Ok(db)
impl<'a, R: Runtime, T: Manager<R>> QueryManagerExt<'a, R> for T {
fn queries(&'a self) -> &'a QueryManager {
let qm = self.state::<QueryManager>();
qm.inner()
}
}
pub async fn list_workspaces<T: Deref<Target = Connection>>(c: &T) -> Result<Vec<Workspace>> {
let (sql, params) = Query::select()
.from(WorkspaceIden::Table)
.column(Asterisk)
.order_by(WorkspaceIden::Name, Order::Asc)
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = c.prepare(sql.as_str())?;
let items = stmt.query_map(&*params.as_params(), |row| row.try_into())?;
Ok(items.map(|v| v.unwrap()).collect())
#[derive(Clone)]
pub struct QueryManager {
pool: Arc<Mutex<Pool<SqliteConnectionManager>>>,
events_tx: mpsc::Sender<ModelPayload>,
}
impl QueryManager {
pub(crate) fn new(
pool: Pool<SqliteConnectionManager>,
events_tx: mpsc::Sender<ModelPayload>,
) -> Self {
QueryManager {
pool: Arc::new(Mutex::new(pool)),
events_tx,
}
}
pub async fn connect(&self) -> Result<DbContext> {
let conn = self.pool.lock().await.get()?;
Ok(DbContext {
tx: self.events_tx.clone(),
conn: ConnectionOrTx::Connection(conn),
})
}
pub async fn with_conn<F, T>(&self, func: F) -> Result<T>
where
F: FnOnce(&DbContext) -> Result<T>,
{
let conn = self.pool.lock().await.get()?;
let db_context = DbContext {
tx: self.events_tx.clone(),
conn: ConnectionOrTx::Connection(conn),
};
func(&db_context)
}
pub async fn with_tx<F, T>(&self, func: F) -> Result<T>
where
F: FnOnce(&DbContext) -> Result<T>,
{
let mut conn = self.pool.lock().await.get()?;
let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
let db_context = DbContext {
tx: self.events_tx.clone(),
conn: ConnectionOrTx::Transaction(&tx),
};
match func(&db_context) {
Ok(val) => {
tx.commit()?;
Ok(val)
}
Err(e) => {
tx.rollback()?;
Err(e)
}
}
}
}
pub enum ConnectionOrTx<'a> {
Connection(PooledConnection<SqliteConnectionManager>),
Transaction(&'a Transaction<'a>),
}
impl<'a> ConnectionOrTx<'a> {
pub(crate) fn resolve(&self) -> &Connection {
match self {
ConnectionOrTx::Connection(c) => c,
ConnectionOrTx::Transaction(c) => c,
}
}
pub(crate) fn prepare(&self, sql: &str) -> rusqlite::Result<Statement<'_>> {
self.resolve().prepare(sql)
}
pub(crate) fn execute(&self, sql: &str, params: &[&dyn ToSql]) -> rusqlite::Result<usize> {
self.resolve().execute(sql, params)
}
}
pub struct DbContext<'a> {
pub(crate) tx: mpsc::Sender<ModelPayload>,
pub(crate) conn: ConnectionOrTx<'a>,
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,81 +0,0 @@
use log::info;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use serde::Deserialize;
use sqlx::migrate::Migrator;
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::SqlitePool;
use std::fs::create_dir_all;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use tauri::async_runtime::Mutex;
use tauri::path::BaseDirectory;
use tauri::plugin::TauriPlugin;
use tauri::{plugin, AppHandle, Manager, Runtime};
pub struct SqliteConnection(pub Mutex<Pool<SqliteConnectionManager>>);
#[derive(Default, Deserialize)]
pub struct PluginConfig {
// Nothing yet (will be configurable in tauri.conf.json
}
/// Tauri SQL plugin builder.
#[derive(Default)]
pub struct Builder {
// Nothing Yet
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn build<R: Runtime>(&self) -> TauriPlugin<R, Option<PluginConfig>> {
plugin::Builder::<R, Option<PluginConfig>>::new("yaak_models")
.setup(|app, _api| {
let app_path = app.path().app_data_dir().unwrap();
create_dir_all(app_path.clone()).expect("Problem creating App directory!");
let db_file_path = app_path.join("db.sqlite");
{
let db_file_path = db_file_path.clone();
tauri::async_runtime::block_on(async move {
must_migrate_db(app.app_handle(), &db_file_path).await;
});
};
let manager = SqliteConnectionManager::file(db_file_path);
let pool = Pool::builder()
.max_size(100) // Up from 10 (just in case)
.connection_timeout(Duration::from_secs(10)) // Down from 30
.build(manager)
.unwrap();
app.manage(SqliteConnection(Mutex::new(pool)));
Ok(())
})
.build()
}
}
async fn must_migrate_db<R: Runtime>(app_handle: &AppHandle<R>, sqlite_file_path: &PathBuf) {
info!("Connecting to database at {sqlite_file_path:?}");
let sqlite_file_path = sqlite_file_path.to_str().unwrap().to_string();
let opts = SqliteConnectOptions::from_str(&sqlite_file_path).unwrap().create_if_missing(true);
let pool = SqlitePool::connect_with(opts).await.expect("Failed to connect to database");
let p = app_handle
.path()
.resolve("migrations", BaseDirectory::Resource)
.expect("failed to resolve resource");
info!("Running database migrations from: {}", p.to_string_lossy());
let mut m = Migrator::new(p).await.expect("Failed to load migrations");
m.set_ignore_missing(true); // So we can roll back versions and not crash
m.run(&pool).await.expect("Failed to run migrations");
info!("Database migrations complete");
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,169 @@
use crate::error::Error::RowNotFound;
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{AnyModel, ModelType, UpsertModelInfo};
use crate::queries_legacy::{generate_model_id, ModelChangeEvent, ModelPayload, UpdateSource};
use rusqlite::OptionalExtension;
use sea_query::{
Asterisk, Expr, IntoColumnRef, IntoIden, IntoTableRef, OnConflict, Query, SimpleExpr,
SqliteQueryBuilder,
};
use sea_query_rusqlite::RusqliteBinder;
pub(crate) const MAX_HISTORY_ITEMS: usize = 20;
impl<'a> DbContext<'a> {
pub(crate) fn find_one<'s, M>(
&self,
col: impl IntoColumnRef,
value: impl Into<SimpleExpr>,
) -> Result<M>
where
M: Into<AnyModel> + Clone + UpsertModelInfo,
{
match self.find_optional::<M>(col, value) {
Ok(Some(v)) => Ok(v),
Ok(None) => Err(RowNotFound),
Err(e) => Err(e),
}
}
pub fn find_optional<'s, M>(
&self,
col: impl IntoColumnRef,
value: impl Into<SimpleExpr>,
) -> Result<Option<M>>
where
M: Into<AnyModel> + Clone + UpsertModelInfo,
{
let (sql, params) = Query::select()
.from(M::table_name())
.column(Asterisk)
.cond_where(Expr::col(col).eq(value))
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
Ok(stmt.query_row(&*params.as_params(), M::from_row).optional()?)
}
pub fn find_all<'s, M>(&self) -> Result<Vec<M>>
where
M: Into<AnyModel> + Clone + UpsertModelInfo,
{
let (sql, params) = Query::select()
.from(M::table_name())
.column(Asterisk)
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.resolve().prepare(sql.as_str())?;
let items = stmt.query_map(&*params.as_params(), M::from_row)?;
Ok(items.map(|v| v.unwrap()).collect())
}
pub fn find_many<'s, M>(
&self,
col: impl IntoColumnRef,
value: impl Into<SimpleExpr>,
limit: Option<u64>,
) -> Result<Vec<M>>
where
M: Into<AnyModel> + Clone + UpsertModelInfo,
{
// TODO: Figure out how to do this conditional builder better
let (sql, params) = if let Some(limit) = limit {
Query::select()
.from(M::table_name())
.column(Asterisk)
.cond_where(Expr::col(col).eq(value))
.limit(limit)
.build_rusqlite(SqliteQueryBuilder)
} else {
Query::select()
.from(M::table_name())
.column(Asterisk)
.cond_where(Expr::col(col).eq(value))
.build_rusqlite(SqliteQueryBuilder)
};
let mut stmt = self.conn.resolve().prepare(sql.as_str())?;
let items = stmt.query_map(&*params.as_params(), M::from_row)?;
Ok(items.map(|v| v.unwrap()).collect())
}
pub fn upsert<M>(&self, model: &M, source: &UpdateSource) -> Result<M>
where
M: Into<AnyModel> + From<AnyModel> + UpsertModelInfo + Clone,
{
self.upsert_one(
M::table_name(),
M::id_column(),
model.get_id().as_str(),
|| generate_model_id(ModelType::TypeEnvironment),
model.clone().insert_values(source)?,
M::update_columns(),
source,
)
}
fn upsert_one<M>(
&self,
table: impl IntoTableRef,
id_col: impl IntoIden + Eq + Clone,
id_val: &str,
gen_id: fn() -> String,
other_values: Vec<(impl IntoIden + Eq, impl Into<SimpleExpr>)>,
update_columns: Vec<impl IntoIden>,
source: &UpdateSource,
) -> Result<M>
where
M: Into<AnyModel> + From<AnyModel> + UpsertModelInfo + Clone,
{
let id_iden = id_col.into_iden();
let mut column_vec = vec![id_iden.clone()];
let mut value_vec = vec![if id_val == "" { gen_id().into() } else { id_val.into() }];
for (col, val) in other_values {
value_vec.push(val.into());
column_vec.push(col.into_iden());
}
let on_conflict = OnConflict::column(id_iden).update_columns(update_columns).to_owned();
let (sql, params) = Query::insert()
.into_table(table)
.columns(column_vec)
.values_panic(value_vec)
.on_conflict(on_conflict)
.returning_all()
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.resolve().prepare(sql.as_str())?;
let m: M = stmt.query_row(&*params.as_params(), |row| M::from_row(row))?;
let payload = ModelPayload {
model: m.clone().into(),
update_source: source.clone(),
change: ModelChangeEvent::Upsert,
};
self.tx.try_send(payload).unwrap();
Ok(m)
}
pub(crate) fn delete<'s, M>(&self, m: &M, update_source: &UpdateSource) -> Result<M>
where
M: Into<AnyModel> + Clone + UpsertModelInfo,
{
let (sql, params) = Query::delete()
.from_table(M::table_name())
.cond_where(Expr::col(M::id_column().into_iden()).eq(m.get_id()))
.build_rusqlite(SqliteQueryBuilder);
self.conn.execute(sql.as_str(), &*params.as_params())?;
let payload = ModelPayload {
model: m.clone().into(),
update_source: update_source.clone(),
change: ModelChangeEvent::Delete,
};
self.tx.try_send(payload).unwrap();
Ok(m.clone())
}
}

View File

@@ -0,0 +1,99 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{Environment, Folder, GrpcRequest, HttpRequest, WebsocketRequest, Workspace};
use crate::queries_legacy::{BatchUpsertResult, UpdateSource};
use log::info;
impl<'a> DbContext<'a> {
pub fn batch_upsert(
&self,
workspaces: Vec<Workspace>,
environments: Vec<Environment>,
folders: Vec<Folder>,
http_requests: Vec<HttpRequest>,
grpc_requests: Vec<GrpcRequest>,
websocket_requests: Vec<WebsocketRequest>,
source: &UpdateSource,
) -> Result<BatchUpsertResult> {
let mut imported_resources = BatchUpsertResult::default();
if workspaces.len() > 0 {
info!("Batch inserting {} workspaces", workspaces.len());
for v in workspaces {
let x = self.upsert_workspace(&v, source)?;
imported_resources.workspaces.push(x.clone());
}
}
if environments.len() > 0 {
while imported_resources.environments.len() < environments.len() {
for v in environments.clone() {
if let Some(id) = v.environment_id.clone() {
let has_parent_to_import =
environments.iter().find(|m| m.id == id).is_some();
let imported_parent =
imported_resources.environments.iter().find(|m| m.id == id);
// If there's also a parent to upsert, wait for that one
if imported_parent.is_none() && has_parent_to_import {
continue;
}
}
if let Some(_) = imported_resources.environments.iter().find(|f| f.id == v.id) {
continue;
}
let x = self.upsert_environment(&v, source)?;
imported_resources.environments.push(x.clone());
}
}
info!("Imported {} environments", imported_resources.environments.len());
}
if folders.len() > 0 {
while imported_resources.folders.len() < folders.len() {
for v in folders.clone() {
if let Some(id) = v.folder_id.clone() {
let has_parent_to_import = folders.iter().find(|m| m.id == id).is_some();
let imported_parent =
imported_resources.folders.iter().find(|m| m.id == id);
// If there's also a parent to upsert, wait for that one
if imported_parent.is_none() && has_parent_to_import {
continue;
}
}
if let Some(_) = imported_resources.folders.iter().find(|f| f.id == v.id) {
continue;
}
let x = self.upsert_folder(&v, source)?;
imported_resources.folders.push(x.clone());
}
}
info!("Imported {} folders", imported_resources.folders.len());
}
if http_requests.len() > 0 {
for v in http_requests {
let x = self.upsert(&v, source)?;
imported_resources.http_requests.push(x.clone());
}
info!("Imported {} http_requests", imported_resources.http_requests.len());
}
if grpc_requests.len() > 0 {
for v in grpc_requests {
let x = self.upsert_grpc_request(&v, source)?;
imported_resources.grpc_requests.push(x.clone());
}
info!("Imported {} grpc_requests", imported_resources.grpc_requests.len());
}
if websocket_requests.len() > 0 {
for v in websocket_requests {
let x = self.upsert_websocket_request(&v, source)?;
imported_resources.websocket_requests.push(x.clone());
}
info!("Imported {} websocket_requests", imported_resources.websocket_requests.len());
}
Ok(imported_resources)
}
}

View File

@@ -0,0 +1,35 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{CookieJar, CookieJarIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_cookie_jar(&self, id: &str) -> Result<CookieJar> {
self.find_one(CookieJarIden::Id, id)
}
pub fn list_cookie_jars(&self, workspace_id: &str) -> Result<Vec<CookieJar>> {
self.find_many(CookieJarIden::WorkspaceId, workspace_id, None)
}
pub fn delete_cookie_jar(
&self,
cookie_jar: &CookieJar,
source: &UpdateSource,
) -> Result<CookieJar> {
self.delete(cookie_jar, source)
}
pub fn delete_cookie_jar_by_id(&self, id: &str, source: &UpdateSource) -> Result<CookieJar> {
let cookie_jar = self.get_cookie_jar(id)?;
self.delete_cookie_jar(&cookie_jar, source)
}
pub fn upsert_cookie_jar(
&self,
cookie_jar: &CookieJar,
source: &UpdateSource,
) -> Result<CookieJar> {
self.upsert(cookie_jar, source)
}
}

View File

@@ -0,0 +1,79 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{Environment, EnvironmentIden, UpsertModelInfo};
use crate::queries_legacy::UpdateSource;
use log::info;
use sea_query::ColumnRef::Asterisk;
use sea_query::{Cond, Expr, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
impl<'a> DbContext<'a> {
pub fn get_environment(&self, id: &str) -> Result<Environment> {
self.find_one(EnvironmentIden::Id, id)
}
pub fn get_base_environment(&self, workspace_id: &str) -> Result<Environment> {
let (sql, params) = Query::select()
.from(EnvironmentIden::Table)
.column(Asterisk)
.cond_where(
Cond::all()
.add(Expr::col(EnvironmentIden::WorkspaceId).eq(workspace_id))
.add(Expr::col(EnvironmentIden::EnvironmentId).is_null()),
)
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
Ok(stmt.query_row(&*params.as_params(), Environment::from_row)?)
}
pub fn ensure_base_environment(&self, workspace_id: &str) -> Result<()> {
let environments = self.list_environments(workspace_id)?;
let base_environment = environments
.iter()
.find(|e| e.environment_id == None && e.workspace_id == workspace_id);
if let None = base_environment {
info!("Creating base environment for {workspace_id}");
self.upsert_environment(
&Environment {
workspace_id: workspace_id.to_string(),
name: "Global Variables".to_string(),
..Default::default()
},
&UpdateSource::Background,
)?;
}
Ok(())
}
pub fn list_environments(&self, workspace_id: &str) -> Result<Vec<Environment>> {
self.find_many(EnvironmentIden::WorkspaceId, workspace_id, None)
}
pub fn delete_environment(
&self,
environment: &Environment,
source: &UpdateSource,
) -> Result<Environment> {
for environment in
self.find_many::<Environment>(EnvironmentIden::EnvironmentId, &environment.id, None)?
{
self.delete_environment(&environment, source)?;
}
self.delete(environment, source)
}
pub fn delete_environment_by_id(&self, id: &str, source: &UpdateSource) -> Result<Environment> {
let environment = self.get_environment(id)?;
self.delete_environment(&environment, source)
}
pub fn upsert_environment(
&self,
environment: &Environment,
source: &UpdateSource,
) -> Result<Environment> {
self.upsert(environment, source)
}
}

View File

@@ -0,0 +1,106 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{
Folder, FolderIden, GrpcRequest, GrpcRequestIden, HttpRequest, HttpRequestIden,
WebsocketRequest, WebsocketRequestIden,
};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_folder(&self, id: &str) -> Result<Folder> {
self.find_one(FolderIden::Id, id)
}
pub fn list_folders(&self, workspace_id: &str) -> Result<Vec<Folder>> {
self.find_many(FolderIden::WorkspaceId, workspace_id, None)
}
pub fn delete_folder(&self, folder: &Folder, source: &UpdateSource) -> Result<Folder> {
for folder in self.find_many::<Folder>(FolderIden::FolderId, &folder.id, None)? {
self.delete_folder(&folder, source)?;
}
for request in self.find_many::<HttpRequest>(HttpRequestIden::FolderId, &folder.id, None)? {
self.delete_http_request(&request, source)?;
}
for request in self.find_many::<GrpcRequest>(GrpcRequestIden::FolderId, &folder.id, None)? {
self.delete_grpc_request(&request, source)?;
}
for request in
self.find_many::<WebsocketRequest>(WebsocketRequestIden::FolderId, &folder.id, None)?
{
self.delete_websocket_request(&request, source)?;
}
self.delete(folder, source)
}
pub fn delete_folder_by_id(&self, id: &str, source: &UpdateSource) -> Result<Folder> {
let folder = self.get_folder(id)?;
self.delete_folder(&folder, source)
}
pub fn upsert_folder(&self, folder: &Folder, source: &UpdateSource) -> Result<Folder> {
self.upsert(folder, source)
}
pub fn duplicate_folder(&self, src_folder: &Folder, source: &UpdateSource) -> Result<Folder> {
let workspace_id = src_folder.workspace_id.as_str();
let http_requests = self
.find_many::<HttpRequest>(HttpRequestIden::WorkspaceId, workspace_id, None)?
.into_iter()
.filter(|m| m.folder_id.as_ref() == Some(&src_folder.id));
let grpc_requests = self
.find_many::<GrpcRequest>(GrpcRequestIden::WorkspaceId, workspace_id, None)?
.into_iter()
.filter(|m| m.folder_id.as_ref() == Some(&src_folder.id));
let folders = self
.find_many::<Folder>(FolderIden::WorkspaceId, workspace_id, None)?
.into_iter()
.filter(|m| m.folder_id.as_ref() == Some(&src_folder.id));
let new_folder = self.upsert_folder(
&Folder {
id: "".into(),
sort_priority: src_folder.sort_priority + 0.001,
..src_folder.clone()
},
source,
)?;
for m in http_requests {
self.upsert_http_request(
&HttpRequest {
id: "".into(),
folder_id: Some(new_folder.id.clone()),
sort_priority: m.sort_priority + 0.001,
..m
},
source,
)?;
}
for m in grpc_requests {
self.upsert_grpc_request(
&GrpcRequest {
id: "".into(),
folder_id: Some(new_folder.id.clone()),
sort_priority: m.sort_priority + 0.001,
..m
},
source,
)?;
}
for m in folders {
// Recurse down
self.duplicate_folder(
&Folder {
folder_id: Some(new_folder.id.clone()),
..m
},
source,
)?;
}
Ok(new_folder)
}
}

View File

@@ -0,0 +1,98 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{GrpcConnection, GrpcConnectionIden, GrpcConnectionState};
use crate::queries::base::MAX_HISTORY_ITEMS;
use crate::queries_legacy::UpdateSource;
use log::debug;
use sea_query::{Expr, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
impl<'a> DbContext<'a> {
pub fn get_grpc_connection(&self, id: &str) -> Result<GrpcConnection> {
self.find_one(GrpcConnectionIden::Id, id)
}
pub fn delete_all_grpc_connections_for_request(
&self,
request_id: &str,
source: &UpdateSource,
) -> Result<()> {
let responses = self.list_grpc_connections_for_request(request_id, None)?;
for m in responses {
self.delete(&m, source)?;
}
Ok(())
}
pub fn delete_all_grpc_connections_for_workspace(
&self,
workspace_id: &str,
source: &UpdateSource,
) -> Result<()> {
for m in self.list_grpc_connections_for_workspace(workspace_id, None)? {
self.delete(&m, source)?;
}
Ok(())
}
pub fn delete_grpc_connection(
&self,
m: &GrpcConnection,
source: &UpdateSource,
) -> Result<GrpcConnection> {
self.delete(m, source)
}
pub fn delete_grpc_connection_by_id(
&self,
id: &str,
source: &UpdateSource,
) -> Result<GrpcConnection> {
let grpc_connection = self.get_grpc_connection(id)?;
self.delete_grpc_connection(&grpc_connection, source)
}
pub fn list_grpc_connections_for_request(
&self,
request_id: &str,
limit: Option<u64>,
) -> Result<Vec<GrpcConnection>> {
self.find_many(GrpcConnectionIden::RequestId, request_id, limit)
}
pub fn list_grpc_connections_for_workspace(
&self,
workspace_id: &str,
limit: Option<u64>,
) -> Result<Vec<GrpcConnection>> {
self.find_many(GrpcConnectionIden::WorkspaceId, workspace_id, limit)
}
pub fn cancel_pending_grpc_connections(&self) -> Result<()> {
let closed = serde_json::to_value(&GrpcConnectionState::Closed)?;
let (sql, params) = Query::update()
.table(GrpcConnectionIden::Table)
.values([(GrpcConnectionIden::State, closed.as_str().into())])
.cond_where(Expr::col(GrpcConnectionIden::State).ne(closed.as_str()))
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
stmt.execute(&*params.as_params())?;
Ok(())
}
pub fn upsert_grpc_connection(
&self,
grpc_connection: &GrpcConnection,
source: &UpdateSource,
) -> Result<GrpcConnection> {
let connections =
self.list_grpc_connections_for_request(grpc_connection.request_id.as_str(), None)?;
for m in connections.iter().skip(MAX_HISTORY_ITEMS - 1) {
debug!("Deleting old gRPC connection {}", grpc_connection.id);
self.delete_grpc_connection(&m, source)?;
}
self.upsert(grpc_connection, source)
}
}

View File

@@ -0,0 +1,22 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{GrpcEvent, GrpcEventIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_grpc_events(&self, id: &str) -> Result<GrpcEvent> {
self.find_one(GrpcEventIden::Id, id)
}
pub fn list_grpc_events(&self, connection_id: &str) -> Result<Vec<GrpcEvent>> {
self.find_many(GrpcEventIden::ConnectionId, connection_id, None)
}
pub fn upsert_grpc_event(
&self,
grpc_event: &GrpcEvent,
source: &UpdateSource,
) -> Result<GrpcEvent> {
self.upsert(grpc_event, source)
}
}

View File

@@ -0,0 +1,51 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{GrpcRequest, GrpcRequestIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_grpc_request(&self, id: &str) -> Result<Option<GrpcRequest>> {
self.find_optional(GrpcRequestIden::Id, id)
}
pub fn list_grpc_requests(&self, workspace_id: &str) -> Result<Vec<GrpcRequest>> {
self.find_many(GrpcRequestIden::WorkspaceId, workspace_id, None)
}
pub fn delete_grpc_request(
&self,
m: &GrpcRequest,
source: &UpdateSource,
) -> Result<GrpcRequest> {
self.delete_all_grpc_connections_for_request(m.id.as_str(), source)?;
self.delete(m, source)
}
pub fn delete_grpc_request_by_id(
&self,
id: &str,
source: &UpdateSource,
) -> Result<GrpcRequest> {
let request = self.get_grpc_request(id)?.unwrap();
self.delete_grpc_request(&request, source)
}
pub fn duplicate_grpc_request(
&self,
grpc_request: &GrpcRequest,
source: &UpdateSource,
) -> Result<GrpcRequest> {
let mut request = grpc_request.clone();
request.id = "".to_string();
request.sort_priority = request.sort_priority + 0.001;
self.upsert(&request, source)
}
pub fn upsert_grpc_request(
&self,
grpc_request: &GrpcRequest,
source: &UpdateSource,
) -> Result<GrpcRequest> {
self.upsert(grpc_request, source)
}
}

View File

@@ -0,0 +1,51 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{HttpRequest, HttpRequestIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_http_request(&self, id: &str) -> Result<Option<HttpRequest>> {
self.find_optional(HttpRequestIden::Id, id)
}
pub fn list_http_requests(&self, workspace_id: &str) -> Result<Vec<HttpRequest>> {
self.find_many(HttpRequestIden::WorkspaceId, workspace_id, None)
}
pub fn delete_http_request(
&self,
m: &HttpRequest,
source: &UpdateSource,
) -> Result<HttpRequest> {
self.delete_all_http_responses_for_request(m.id.as_str(), source)?;
self.delete(m, source)
}
pub fn delete_http_request_by_id(
&self,
id: &str,
source: &UpdateSource,
) -> Result<HttpRequest> {
let http_request = self.get_http_request(id)?.unwrap();
self.delete_http_request(&http_request, source)
}
pub fn duplicate_http_request(
&self,
http_request: &HttpRequest,
source: &UpdateSource,
) -> Result<HttpRequest> {
let mut http_request = http_request.clone();
http_request.id = "".to_string();
http_request.sort_priority = http_request.sort_priority + 0.001;
self.upsert(&http_request, source)
}
pub fn upsert_http_request(
&self,
http_request: &HttpRequest,
source: &UpdateSource,
) -> Result<HttpRequest> {
self.upsert(http_request, source)
}
}

View File

@@ -0,0 +1,110 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{HttpResponse, HttpResponseIden, HttpResponseState};
use crate::queries::base::MAX_HISTORY_ITEMS;
use crate::queries_legacy::UpdateSource;
use log::{debug, error};
use sea_query::{Expr, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
use std::fs;
impl<'a> DbContext<'a> {
pub fn get_http_response(&self, id: &str) -> Result<HttpResponse> {
self.find_one(HttpResponseIden::Id, id)
}
pub fn list_http_responses_for_request(
&self,
request_id: &str,
limit: Option<u64>,
) -> Result<Vec<HttpResponse>> {
self.find_many(HttpResponseIden::RequestId, request_id, limit)
}
pub fn list_http_responses_for_workspace(
&self,
workspace_id: &str,
limit: Option<u64>,
) -> Result<Vec<HttpResponse>> {
self.find_many(HttpResponseIden::WorkspaceId, workspace_id, limit)
}
pub fn delete_all_http_responses_for_request(
&self,
request_id: &str,
source: &UpdateSource,
) -> Result<()> {
let responses = self.list_http_responses_for_request(request_id, None)?;
for m in responses {
self.delete(&m, source)?;
}
Ok(())
}
pub fn delete_all_http_responses_for_workspace(
&self,
workspace_id: &str,
source: &UpdateSource,
) -> Result<()> {
let responses =
self.find_many::<HttpResponse>(HttpResponseIden::WorkspaceId, workspace_id, None)?;
for m in responses {
self.delete(&m, source)?;
}
Ok(())
}
pub fn delete_http_response(
&self,
http_response: &HttpResponse,
source: &UpdateSource,
) -> Result<HttpResponse> {
// Delete the body file if it exists
if let Some(p) = http_response.body_path.clone() {
if let Err(e) = fs::remove_file(p) {
error!("Failed to delete body file: {}", e);
};
}
Ok(self.delete(http_response, source)?)
}
pub fn upsert_http_response(
&self,
http_response: &HttpResponse,
source: &UpdateSource,
) -> Result<HttpResponse> {
let responses = self.list_http_responses_for_request(&http_response.request_id, None)?;
for m in responses.iter().skip(MAX_HISTORY_ITEMS - 1) {
debug!("Deleting old HTTP response {}", http_response.id);
self.delete_http_response(&m, source)?;
}
self.upsert(http_response, source)
}
pub fn cancel_pending_http_responses(&self) -> Result<()> {
let closed = serde_json::to_value(&HttpResponseState::Closed)?;
let (sql, params) = Query::update()
.table(HttpResponseIden::Table)
.values([(HttpResponseIden::State, closed.as_str().into())])
.cond_where(Expr::col(HttpResponseIden::State).ne(closed.as_str()))
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
stmt.execute(&*params.as_params())?;
Ok(())
}
pub fn update_http_response_if_id(
&self,
response: &HttpResponse,
source: &UpdateSource,
) -> Result<HttpResponse> {
if response.id.is_empty() {
Ok(response.clone())
} else {
self.upsert(response, source)
}
}
}

View File

@@ -0,0 +1,164 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{KeyValue, KeyValueIden};
use crate::queries_legacy::{ModelChangeEvent, ModelPayload, UpdateSource};
use log::error;
use sea_query::Keyword::CurrentTimestamp;
use sea_query::{Asterisk, Cond, Expr, OnConflict, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
impl<'a> DbContext<'a> {
pub fn list_key_values_raw(&self) -> Result<Vec<KeyValue>> {
let (sql, params) = Query::select()
.from(KeyValueIden::Table)
.column(Asterisk)
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
let items = stmt.query_map(&*params.as_params(), |row| row.try_into())?;
Ok(items.map(|v| v.unwrap()).collect())
}
pub fn get_key_value_string(&self, namespace: &str, key: &str, default: &str) -> String {
match self.get_key_value_raw(namespace, key) {
None => default.to_string(),
Some(v) => {
let result = serde_json::from_str(&v.value);
match result {
Ok(v) => v,
Err(e) => {
error!("Failed to parse string key value: {}", e);
default.to_string()
}
}
}
}
}
pub fn get_key_value_int(&self, namespace: &str, key: &str, default: i32) -> i32 {
match self.get_key_value_raw(namespace, key) {
None => default.clone(),
Some(v) => {
let result = serde_json::from_str(&v.value);
match result {
Ok(v) => v,
Err(e) => {
error!("Failed to parse int key value: {}", e);
default.clone()
}
}
}
}
}
pub fn get_key_value_raw(&self, namespace: &str, key: &str) -> Option<KeyValue> {
let (sql, params) = Query::select()
.from(KeyValueIden::Table)
.column(Asterisk)
.cond_where(
Cond::all()
.add(Expr::col(KeyValueIden::Namespace).eq(namespace))
.add(Expr::col(KeyValueIden::Key).eq(key)),
)
.build_rusqlite(SqliteQueryBuilder);
self.conn.resolve().query_row(sql.as_str(), &*params.as_params(), |row| row.try_into()).ok()
}
pub fn set_key_value_string(
&self,
namespace: &str,
key: &str,
value: &str,
source: &UpdateSource,
) -> (KeyValue, bool) {
let encoded = serde_json::to_string(&value).unwrap();
self.set_key_value_raw(namespace, key, &encoded, source)
}
pub fn set_key_value_int(
&self,
namespace: &str,
key: &str,
value: i32,
source: &UpdateSource,
) -> (KeyValue, bool) {
let encoded = serde_json::to_string(&value).unwrap();
self.set_key_value_raw(namespace, key, &encoded, source)
}
pub fn set_key_value_raw(
&self,
namespace: &str,
key: &str,
value: &str,
source: &UpdateSource,
) -> (KeyValue, bool) {
let existing = self.get_key_value_raw(namespace, key);
let (sql, params) = Query::insert()
.into_table(KeyValueIden::Table)
.columns([
KeyValueIden::CreatedAt,
KeyValueIden::UpdatedAt,
KeyValueIden::Namespace,
KeyValueIden::Key,
KeyValueIden::Value,
])
.values_panic([
CurrentTimestamp.into(),
CurrentTimestamp.into(),
namespace.into(),
key.into(),
value.into(),
])
.on_conflict(
OnConflict::new()
.update_columns([KeyValueIden::UpdatedAt, KeyValueIden::Value])
.to_owned(),
)
.returning_all()
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str()).expect("Failed to prepare KeyValue upsert");
let m: KeyValue = stmt
.query_row(&*params.as_params(), |row| row.try_into())
.expect("Failed to upsert KeyValue");
let payload = ModelPayload {
model: m.clone().into(),
update_source: source.clone(),
change: ModelChangeEvent::Upsert,
};
self.tx.try_send(payload).unwrap();
(m, existing.is_none())
}
pub fn delete_key_value(
&self,
namespace: &str,
key: &str,
source: &UpdateSource,
) -> Result<()> {
let kv = match self.get_key_value_raw(namespace, key) {
None => return Ok(()),
Some(m) => m,
};
let (sql, params) = Query::delete()
.from_table(KeyValueIden::Table)
.cond_where(
Cond::all()
.add(Expr::col(KeyValueIden::Namespace).eq(namespace))
.add(Expr::col(KeyValueIden::Key).eq(key)),
)
.build_rusqlite(SqliteQueryBuilder);
self.conn.execute(sql.as_str(), &*params.as_params())?;
let payload = ModelPayload {
model: kv.clone().into(),
update_source: source.clone(),
change: ModelChangeEvent::Delete,
};
self.tx.try_send(payload).unwrap();
Ok(())
}
}

View File

@@ -0,0 +1,20 @@
mod base;
mod batch;
mod cookie_jars;
mod environments;
mod folders;
mod grpc_connections;
mod grpc_events;
mod grpc_requests;
mod http_requests;
mod http_responses;
mod key_values;
mod plugin_key_values;
mod plugins;
mod settings;
mod sync_states;
mod websocket_connections;
mod websocket_events;
mod websocket_requests;
mod workspace_metas;
mod workspaces;

View File

@@ -0,0 +1,79 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{PluginKeyValue, PluginKeyValueIden};
use sea_query::Keyword::CurrentTimestamp;
use sea_query::{Asterisk, Cond, Expr, OnConflict, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
impl<'a> DbContext<'a> {
pub fn get_plugin_key_value(&self, plugin_name: &str, key: &str) -> Option<PluginKeyValue> {
let (sql, params) = Query::select()
.from(PluginKeyValueIden::Table)
.column(Asterisk)
.cond_where(
Cond::all()
.add(Expr::col(PluginKeyValueIden::PluginName).eq(plugin_name))
.add(Expr::col(PluginKeyValueIden::Key).eq(key)),
)
.build_rusqlite(SqliteQueryBuilder);
self.conn.resolve().query_row(sql.as_str(), &*params.as_params(), |row| row.try_into()).ok()
}
pub fn set_plugin_key_value(
&self,
plugin_name: &str,
key: &str,
value: &str,
) -> (PluginKeyValue, bool) {
let existing = self.get_plugin_key_value(plugin_name, key);
let (sql, params) = Query::insert()
.into_table(PluginKeyValueIden::Table)
.columns([
PluginKeyValueIden::CreatedAt,
PluginKeyValueIden::UpdatedAt,
PluginKeyValueIden::PluginName,
PluginKeyValueIden::Key,
PluginKeyValueIden::Value,
])
.values_panic([
CurrentTimestamp.into(),
CurrentTimestamp.into(),
plugin_name.into(),
key.into(),
value.into(),
])
.on_conflict(
OnConflict::new()
.update_columns([PluginKeyValueIden::UpdatedAt, PluginKeyValueIden::Value])
.to_owned(),
)
.returning_all()
.build_rusqlite(SqliteQueryBuilder);
let mut stmt =
self.conn.prepare(sql.as_str()).expect("Failed to prepare PluginKeyValue upsert");
let m: PluginKeyValue = stmt
.query_row(&*params.as_params(), |row| row.try_into())
.expect("Failed to upsert KeyValue");
(m, existing.is_none())
}
pub fn delete_plugin_key_value(&self, namespace: &str, key: &str) -> Result<bool> {
if let None = self.get_plugin_key_value(namespace, key) {
return Ok(false);
};
let (sql, params) = Query::delete()
.from_table(PluginKeyValueIden::Table)
.cond_where(
Cond::all()
.add(Expr::col(PluginKeyValueIden::PluginName).eq(namespace))
.add(Expr::col(PluginKeyValueIden::Key).eq(key)),
)
.build_rusqlite(SqliteQueryBuilder);
self.conn.execute(sql.as_str(), &*params.as_params())?;
Ok(true)
}
}

View File

@@ -0,0 +1,27 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{Plugin, PluginIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_plugin(&self, id: &str) -> Result<Plugin> {
self.find_one(PluginIden::Id, id)
}
pub fn list_plugins(&self) -> Result<Vec<Plugin>> {
self.find_all()
}
pub fn delete_plugin(&self, plugin: &Plugin, source: &UpdateSource) -> Result<Plugin> {
self.delete(plugin, source)
}
pub fn delete_plugin_by_id(&self, id: &str, source: &UpdateSource) -> Result<Plugin> {
let plugin = self.get_plugin(id)?;
self.delete_plugin(&plugin, source)
}
pub fn upsert_plugin(&self, plugin: &Plugin, source: &UpdateSource) -> Result<Plugin> {
self.upsert(plugin, source)
}
}

View File

@@ -0,0 +1,25 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{Settings, SettingsIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_or_create_settings(&self, source: &UpdateSource) -> Result<Settings> {
let id = "default";
if let Some(s) = self.find_optional::<Settings>(SettingsIden::Id, id)? {
return Ok(s);
};
self.upsert(
&Settings {
id: id.to_string(),
..Default::default()
},
source,
)
}
pub fn upsert_settings(&self, settings: &Settings, source: &UpdateSource) -> Result<Settings> {
self.upsert(settings, source)
}
}

View File

@@ -0,0 +1,45 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{SyncState, SyncStateIden, UpsertModelInfo};
use crate::queries_legacy::UpdateSource;
use sea_query::{Asterisk, Cond, Expr, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
use std::path::Path;
impl<'a> DbContext<'a> {
pub fn get_sync_state(&self, id: &str) -> Result<SyncState> {
self.find_one(SyncStateIden::Id, id)
}
pub fn upsert_sync_state(&self, sync_state: &SyncState) -> Result<SyncState> {
self.upsert(sync_state, &UpdateSource::Sync)
}
pub fn list_sync_states_for_workspace(
&self,
workspace_id: &str,
sync_dir: &Path,
) -> Result<Vec<SyncState>> {
let (sql, params) = Query::select()
.from(SyncStateIden::Table)
.column(Asterisk)
.cond_where(
Cond::all()
.add(Expr::col(SyncStateIden::WorkspaceId).eq(workspace_id))
.add(Expr::col(SyncStateIden::SyncDir).eq(sync_dir.to_string_lossy())),
)
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
let items = stmt.query_map(&*params.as_params(), SyncState::from_row)?;
Ok(items.map(|v| v.unwrap()).collect())
}
pub fn delete_sync_state(&self, sync_state: &SyncState) -> Result<SyncState> {
self.delete(sync_state, &UpdateSource::Sync)
}
pub fn delete_sync_state_by_id(&self, id: &str) -> Result<SyncState> {
let sync_state = self.get_sync_state(id)?;
self.delete_sync_state(&sync_state)
}
}

View File

@@ -0,0 +1,97 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{WebsocketConnection, WebsocketConnectionIden, WebsocketConnectionState};
use crate::queries::base::MAX_HISTORY_ITEMS;
use crate::queries_legacy::UpdateSource;
use log::debug;
use sea_query::{Expr, Query, SqliteQueryBuilder};
use sea_query_rusqlite::RusqliteBinder;
impl<'a> DbContext<'a> {
pub fn get_websocket_connection(&self, id: &str) -> Result<WebsocketConnection> {
self.find_one(WebsocketConnectionIden::Id, id)
}
pub fn delete_all_websocket_connections_for_request(
&self,
request_id: &str,
source: &UpdateSource,
) -> Result<()> {
let responses = self.list_websocket_connections_for_request(request_id)?;
for m in responses {
self.delete(&m, source)?;
}
Ok(())
}
pub fn delete_all_websocket_connections_for_workspace(
&self,
workspace_id: &str,
source: &UpdateSource,
) -> Result<()> {
let responses = self.list_websocket_connections_for_workspace(workspace_id)?;
for m in responses {
self.delete(&m, source)?;
}
Ok(())
}
pub fn list_websocket_connections_for_workspace(
&self,
workspace_id: &str,
) -> Result<Vec<WebsocketConnection>> {
self.find_many(WebsocketConnectionIden::WorkspaceId, workspace_id, None)
}
pub fn list_websocket_connections_for_request(
&self,
request_id: &str,
) -> Result<Vec<WebsocketConnection>> {
self.find_many(WebsocketConnectionIden::RequestId, request_id, None)
}
pub fn delete_websocket_connection(
&self,
websocket_connection: &WebsocketConnection,
source: &UpdateSource,
) -> Result<WebsocketConnection> {
self.delete(websocket_connection, source)
}
pub fn delete_websocket_connection_by_id(
&self,
id: &str,
source: &UpdateSource,
) -> Result<WebsocketConnection> {
let websocket_connection = self.get_websocket_connection(id)?;
self.delete_websocket_connection(&websocket_connection, source)
}
pub fn upsert_websocket_connection(
&self,
websocket_connection: &WebsocketConnection,
source: &UpdateSource,
) -> Result<WebsocketConnection> {
let connections =
self.list_websocket_connections_for_request(&websocket_connection.request_id)?;
for m in connections.iter().skip(MAX_HISTORY_ITEMS - 1) {
debug!("Deleting old websocket connection {}", websocket_connection.id);
self.delete_websocket_connection(&m, source)?;
}
self.upsert(websocket_connection, source)
}
pub fn cancel_pending_websocket_connections(&self) -> Result<()> {
let closed = serde_json::to_value(&WebsocketConnectionState::Closed)?;
let (sql, params) = Query::update()
.table(WebsocketConnectionIden::Table)
.values([(WebsocketConnectionIden::State, closed.as_str().into())])
.cond_where(Expr::col(WebsocketConnectionIden::State).ne(closed.as_str()))
.build_rusqlite(SqliteQueryBuilder);
let mut stmt = self.conn.prepare(sql.as_str())?;
stmt.execute(&*params.as_params())?;
Ok(())
}
}

View File

@@ -0,0 +1,25 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{
WebsocketEvent,
WebsocketEventIden,
};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_websocket_event(&self, id: &str) -> Result<WebsocketEvent> {
self.find_one(WebsocketEventIden::Id, id)
}
pub fn list_websocket_events(&self, connection_id: &str) -> Result<Vec<WebsocketEvent>> {
self.find_many(WebsocketEventIden::ConnectionId, connection_id, None)
}
pub fn upsert_websocket_event(
&self,
websocket_event: &WebsocketEvent,
source: &UpdateSource,
) -> Result<WebsocketEvent> {
self.upsert(websocket_event, source)
}
}

View File

@@ -0,0 +1,51 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{WebsocketRequest, WebsocketRequestIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_websocket_request(&self, id: &str) -> Result<Option<WebsocketRequest>> {
self.find_optional(WebsocketRequestIden::Id, id)
}
pub fn list_websocket_requests(&self, workspace_id: &str) -> Result<Vec<WebsocketRequest>> {
self.find_many(WebsocketRequestIden::WorkspaceId, workspace_id, None)
}
pub fn delete_websocket_request(
&self,
websocket_request: &WebsocketRequest,
source: &UpdateSource,
) -> Result<WebsocketRequest> {
self.delete_all_websocket_connections_for_request(websocket_request.id.as_str(), source)?;
self.delete(websocket_request, source)
}
pub fn delete_websocket_request_by_id(
&self,
id: &str,
source: &UpdateSource,
) -> Result<WebsocketRequest> {
let request = self.get_websocket_request(id)?.unwrap();
self.delete_websocket_request(&request, source)
}
pub fn duplicate_websocket_request(
&self,
websocket_request: &WebsocketRequest,
source: &UpdateSource,
) -> Result<WebsocketRequest> {
let mut websocket_request = websocket_request.clone();
websocket_request.id = "".to_string();
websocket_request.sort_priority = websocket_request.sort_priority + 0.001;
self.upsert(&websocket_request, source)
}
pub fn upsert_websocket_request(
&self,
websocket_request: &WebsocketRequest,
source: &UpdateSource,
) -> Result<WebsocketRequest> {
self.upsert(websocket_request, source)
}
}

View File

@@ -0,0 +1,36 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{Workspace, WorkspaceMeta, WorkspaceMetaIden};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_workspace_meta(&self, workspace: &Workspace) -> Result<Option<WorkspaceMeta>> {
self.find_optional(WorkspaceMetaIden::WorkspaceId, &workspace.id)
}
pub fn get_or_create_workspace_meta(
&self,
workspace: &Workspace,
source: &UpdateSource,
) -> Result<WorkspaceMeta> {
let workspace_meta = self.get_workspace_meta(workspace)?;
if let Some(workspace_meta) = workspace_meta {
return Ok(workspace_meta);
}
let workspace_meta = WorkspaceMeta {
workspace_id: workspace.to_owned().id,
..Default::default()
};
self.upsert_workspace_meta(&workspace_meta, source)
}
pub fn upsert_workspace_meta(
&self,
workspace_meta: &WorkspaceMeta,
source: &UpdateSource,
) -> Result<WorkspaceMeta> {
self.upsert(workspace_meta, source)
}
}

View File

@@ -0,0 +1,52 @@
use crate::error::Result;
use crate::manager::DbContext;
use crate::models::{
Folder, FolderIden, GrpcRequest, GrpcRequestIden, HttpRequest, HttpRequestIden,
WebsocketRequest, WebsocketRequestIden, Workspace, WorkspaceIden,
};
use crate::queries_legacy::UpdateSource;
impl<'a> DbContext<'a> {
pub fn get_workspace(&self, id: &str) -> Result<Workspace> {
self.find_one(WorkspaceIden::Id, id)
}
pub fn list_workspaces(&self) -> Result<Vec<Workspace>> {
self.find_all()
}
pub fn delete_workspace(
&self,
workspace: &Workspace,
source: &UpdateSource,
) -> Result<Workspace> {
for folder in self.find_many::<Folder>(FolderIden::WorkspaceId, &workspace.id, None)? {
self.delete_folder(&folder, source)?;
}
for request in
self.find_many::<HttpRequest>(HttpRequestIden::WorkspaceId, &workspace.id, None)?
{
self.delete_http_request(&request, source)?;
}
for request in
self.find_many::<GrpcRequest>(GrpcRequestIden::WorkspaceId, &workspace.id, None)?
{
self.delete_grpc_request(&request, source)?;
}
for request in
self.find_many::<WebsocketRequest>(WebsocketRequestIden::FolderId, &workspace.id, None)?
{
self.delete_websocket_request(&request, source)?;
}
self.delete(workspace, source)
}
pub fn delete_workspace_by_id(&self, id: &str, source: &UpdateSource) -> Result<Workspace> {
let workspace = self.get_workspace(id)?;
self.delete_workspace(&workspace, source)
}
pub fn upsert_workspace(&self, w: &Workspace, source: &UpdateSource) -> Result<Workspace> {
self.upsert(w, source)
}
}

View File

@@ -0,0 +1,150 @@
use crate::error::Result;
use crate::manager::QueryManagerExt;
use crate::models::{AnyModel, Environment, Folder, GrpcRequest, HttpRequest, ModelType, WebsocketRequest, Workspace, WorkspaceIden};
use chrono::{NaiveDateTime, Utc};
use log::warn;
use nanoid::nanoid;
use serde::{Deserialize, Serialize};
use tauri::{AppHandle, Listener, Runtime, WebviewWindow};
use ts_rs::TS;
pub fn generate_model_id(model: ModelType) -> String {
let id = generate_id();
format!("{}_{}", model.id_prefix(), id)
}
pub fn generate_id() -> String {
let alphabet: [char; 57] = [
'2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z',
];
nanoid!(10, &alphabet)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export, export_to = "gen_models.ts")]
pub struct ModelPayload {
pub model: AnyModel,
pub update_source: UpdateSource,
pub change: ModelChangeEvent,
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "snake_case", tag = "type")]
#[ts(export, export_to = "gen_models.ts")]
pub enum ModelChangeEvent {
Upsert,
Delete,
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "snake_case", tag = "type")]
#[ts(export, export_to = "gen_models.ts")]
pub enum UpdateSource {
Sync,
Window { label: String },
Plugin,
Background,
Import,
}
impl UpdateSource {
pub fn from_window<R: Runtime>(window: &WebviewWindow<R>) -> Self {
Self::Window {
label: window.label().to_string(),
}
}
}
pub fn listen_to_model_delete<F, R>(app_handle: &AppHandle<R>, handler: F)
where
F: Fn(ModelPayload) + Send + 'static,
R: Runtime,
{
app_handle.listen_any("deleted_model", move |e| {
match serde_json::from_str(e.payload()) {
Ok(payload) => handler(payload),
Err(e) => {
warn!("Failed to deserialize deleted model {}", e);
return;
}
};
});
}
pub fn listen_to_model_upsert<F, R>(app_handle: &AppHandle<R>, handler: F)
where
F: Fn(ModelPayload) + Send + 'static,
R: Runtime,
{
app_handle.listen_any("upserted_model", move |e| {
match serde_json::from_str(e.payload()) {
Ok(payload) => handler(payload),
Err(e) => {
warn!("Failed to deserialize upserted model {}", e);
return;
}
};
});
}
#[derive(Default, Debug, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")]
pub struct WorkspaceExport {
pub yaak_version: String,
pub yaak_schema: i64,
pub timestamp: NaiveDateTime,
pub resources: BatchUpsertResult,
}
#[derive(Default, Debug, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")]
pub struct BatchUpsertResult {
pub workspaces: Vec<Workspace>,
pub environments: Vec<Environment>,
pub folders: Vec<Folder>,
pub http_requests: Vec<HttpRequest>,
pub grpc_requests: Vec<GrpcRequest>,
pub websocket_requests: Vec<WebsocketRequest>,
}
pub async fn get_workspace_export_resources<R: Runtime>(
app_handle: &AppHandle<R>,
workspace_ids: Vec<&str>,
include_environments: bool,
) -> Result<WorkspaceExport> {
let mut data = WorkspaceExport {
yaak_version: app_handle.package_info().version.clone().to_string(),
yaak_schema: 3,
timestamp: Utc::now().naive_utc(),
resources: BatchUpsertResult {
workspaces: Vec::new(),
environments: Vec::new(),
folders: Vec::new(),
http_requests: Vec::new(),
grpc_requests: Vec::new(),
websocket_requests: Vec::new(),
},
};
let db = app_handle.queries().connect().await?;
for workspace_id in workspace_ids {
data.resources.workspaces.push(db.find_one(WorkspaceIden::Id, workspace_id)?);
data.resources.environments.append(&mut db.list_environments(workspace_id)?);
data.resources.folders.append(&mut db.list_folders(workspace_id)?);
data.resources.http_requests.append(&mut db.list_http_requests(workspace_id)?);
data.resources.grpc_requests.append(&mut db.list_grpc_requests(workspace_id)?);
data.resources.websocket_requests.append(&mut db.list_websocket_requests(workspace_id)?);
}
// Nuke environments if we don't want them
if !include_environments {
data.resources.environments.clear();
}
Ok(data)
}