Decouple core Yaak logic from Tauri (#354)

This commit is contained in:
Gregory Schier
2026-01-08 20:44:25 -08:00
committed by GitHub
parent 3bcc0b8356
commit ef80216ca1
465 changed files with 3052 additions and 6234 deletions

View File

@@ -0,0 +1,44 @@
use serde::{Serialize, Serializer};
use std::io;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error("Yaml error: {0}")]
YamlParseError(#[from] serde_yaml::Error),
#[error("Sync parse error: {0}")]
ParseError(String),
#[error(transparent)]
ModelError(#[from] yaak_models::error::Error),
#[error("Unknown model: {0}")]
UnknownModel(String),
#[error("I/o error: {0}")]
IoError(#[from] io::Error),
#[error("JSON error: {0}")]
JsonParseError(#[from] serde_json::Error),
#[error("Invalid sync file: {0}")]
InvalidSyncFile(String),
#[error("Invalid sync directory: {0}")]
InvalidSyncDirectory(String),
#[error("Watch error: {0}")]
NotifyError(#[from] notify::Error),
}
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_ref())
}
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -0,0 +1,4 @@
pub mod error;
pub mod models;
pub mod sync;
pub mod watch;

View File

@@ -0,0 +1,302 @@
use crate::error::Error::UnknownModel;
use crate::error::Result;
use chrono::NaiveDateTime;
use log::{debug, warn};
use serde::{Deserialize, Deserializer, Serialize};
use serde_yaml::{Mapping, Value};
use sha1::{Digest, Sha1};
use std::fs;
use std::path::Path;
use ts_rs::TS;
use yaak_models::models::{
AnyModel, Environment, Folder, GrpcRequest, HttpRequest, WebsocketRequest, Workspace,
};
#[derive(Debug, Clone, PartialEq, Serialize, TS)]
#[serde(rename_all = "snake_case", tag = "type")]
#[ts(export, export_to = "gen_models.ts")]
pub enum SyncModel {
Workspace(Workspace),
Environment(Environment),
Folder(Folder),
HttpRequest(HttpRequest),
GrpcRequest(GrpcRequest),
WebsocketRequest(WebsocketRequest),
}
impl<'de> Deserialize<'de> for SyncModel {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
use serde_path_to_error as spte;
let mut v = Value::deserialize(deserializer)?;
let model = match v.get("model") {
Some(Value::String(model)) => model.clone(),
_ => "".to_string(),
};
let model = model.as_str();
let obj = v
.as_mapping_mut()
.ok_or_else(|| serde::de::Error::custom("expected object for SyncModel"))?;
// Dispatch to CHILD types (no recursion)
match model {
"workspace" => {
let x: Workspace = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::Workspace(x))
}
"environment" => {
migrate_environment(obj);
let x: Environment = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::Environment(x))
}
"folder" => {
let x: Folder = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::Folder(x))
}
"http_request" => {
let x: HttpRequest = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::HttpRequest(x))
}
"grpc_request" => {
let x: GrpcRequest = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::GrpcRequest(x))
}
"websocket_request" => {
let x: WebsocketRequest = spte::deserialize(v).map_err(serde::de::Error::custom)?;
Ok(SyncModel::WebsocketRequest(x))
}
other => Err(serde::de::Error::unknown_variant(
other,
&[
"workspace",
"environment",
"folder",
"http_request",
"grpc_request",
"websocket_request",
],
)),
}
}
}
fn migrate_environment(obj: &mut Mapping) {
match (obj.get("base"), obj.get("parentModel")) {
(Some(Value::Bool(base)), None) => {
debug!("Migrating legacy environment {:?}", obj.get("id"));
if *base {
obj.insert("parentModel".into(), "workspace".into());
} else {
obj.insert("parentModel".into(), "environment".into());
}
}
_ => {}
}
}
impl SyncModel {
pub fn from_bytes(content: Vec<u8>, file_path: &Path) -> Result<Option<(SyncModel, String)>> {
let mut hasher = Sha1::new();
hasher.update(&content);
let checksum = hex::encode(hasher.finalize());
let content_str = String::from_utf8(content.clone()).unwrap_or_default();
// Check for some strings that will be in a model file for sure. If these strings
// don't exist, then it's probably not a Yaak file.
if !content_str.contains("model") || !content_str.contains("id") {
return Ok(None);
}
let ext = file_path.extension().unwrap_or_default();
if ext == "yml" || ext == "yaml" {
Ok(match serde_yaml::from_str::<SyncModel>(&content_str) {
Ok(m) => Some((m, checksum)),
Err(e) => {
warn!("Error parsing {:?} {:?}", file_path.file_name(), e);
None
}
})
} else if ext == "json" {
Ok(match serde_json::from_str::<SyncModel>(&content_str) {
Ok(m) => Some((m, checksum)),
Err(e) => {
warn!("Error parsing {:?} {:?}", file_path.file_name(), e);
None
}
})
} else {
Ok(None)
}
}
pub fn from_file(file_path: &Path) -> Result<Option<(SyncModel, String)>> {
let content = match fs::read(file_path) {
Ok(c) => c,
Err(_) => return Ok(None),
};
Self::from_bytes(content, file_path)
}
pub fn to_file_contents(&self, rel_path: &Path) -> Result<(Vec<u8>, String)> {
let ext = rel_path.extension().unwrap_or_default();
let content = if ext == "yaml" || ext == "yml" {
serde_yaml::to_string(self)?
} else {
serde_json::to_string(self)?
};
let mut hasher = Sha1::new();
hasher.update(&content);
let checksum = hex::encode(hasher.finalize());
Ok((content.into_bytes(), checksum))
}
pub fn id(&self) -> String {
match self.clone() {
SyncModel::Workspace(m) => m.id,
SyncModel::Environment(m) => m.id,
SyncModel::Folder(m) => m.id,
SyncModel::HttpRequest(m) => m.id,
SyncModel::GrpcRequest(m) => m.id,
SyncModel::WebsocketRequest(m) => m.id,
}
}
pub fn workspace_id(&self) -> String {
match self.clone() {
SyncModel::Workspace(m) => m.id,
SyncModel::Environment(m) => m.workspace_id,
SyncModel::Folder(m) => m.workspace_id,
SyncModel::HttpRequest(m) => m.workspace_id,
SyncModel::GrpcRequest(m) => m.workspace_id,
SyncModel::WebsocketRequest(m) => m.workspace_id,
}
}
pub fn updated_at(&self) -> NaiveDateTime {
match self.clone() {
SyncModel::Workspace(m) => m.updated_at,
SyncModel::Environment(m) => m.updated_at,
SyncModel::Folder(m) => m.updated_at,
SyncModel::HttpRequest(m) => m.updated_at,
SyncModel::GrpcRequest(m) => m.updated_at,
SyncModel::WebsocketRequest(m) => m.updated_at,
}
}
}
impl TryFrom<AnyModel> for SyncModel {
type Error = crate::error::Error;
fn try_from(value: AnyModel) -> Result<Self> {
let m = match value {
AnyModel::Environment(m) => SyncModel::Environment(m),
AnyModel::Folder(m) => SyncModel::Folder(m),
AnyModel::GrpcRequest(m) => SyncModel::GrpcRequest(m),
AnyModel::HttpRequest(m) => SyncModel::HttpRequest(m),
AnyModel::WebsocketRequest(m) => SyncModel::WebsocketRequest(m),
AnyModel::Workspace(m) => SyncModel::Workspace(m),
// Non-sync models
AnyModel::CookieJar(m) => return Err(UnknownModel(m.model)),
AnyModel::GraphQlIntrospection(m) => return Err(UnknownModel(m.model)),
AnyModel::GrpcConnection(m) => return Err(UnknownModel(m.model)),
AnyModel::GrpcEvent(m) => return Err(UnknownModel(m.model)),
AnyModel::HttpResponse(m) => return Err(UnknownModel(m.model)),
AnyModel::HttpResponseEvent(m) => return Err(UnknownModel(m.model)),
AnyModel::KeyValue(m) => return Err(UnknownModel(m.model)),
AnyModel::Plugin(m) => return Err(UnknownModel(m.model)),
AnyModel::Settings(m) => return Err(UnknownModel(m.model)),
AnyModel::WebsocketConnection(m) => return Err(UnknownModel(m.model)),
AnyModel::WebsocketEvent(m) => return Err(UnknownModel(m.model)),
AnyModel::WorkspaceMeta(m) => return Err(UnknownModel(m.model)),
AnyModel::SyncState(m) => return Err(UnknownModel(m.model)),
};
Ok(m)
}
}
#[cfg(test)]
mod migration_tests {
use crate::error::Result;
use crate::models::SyncModel;
#[test]
fn deserializes_environment_via_syncmodel_with_fixups() -> Result<()> {
let raw = r#"
type: environment
model: environment
id: ev_fAUS49FUN2
workspaceId: wk_kfSI3JDHd7
createdAt: 2025-01-11T17:02:58.012792
updatedAt: 2025-07-23T20:00:46.049649
name: Global Variables
public: true
base: true
variables: []
color: null
"#;
let m: SyncModel = serde_yaml::from_str(raw)?;
match m {
SyncModel::Environment(env) => {
assert_eq!(env.parent_model, "workspace".to_string());
assert_eq!(env.parent_id, None);
}
_ => panic!("expected base environment"),
}
let raw = r#"
type: environment
model: environment
id: ev_fAUS49FUN2
workspaceId: wk_kfSI3JDHd7
createdAt: 2025-01-11T17:02:58.012792
updatedAt: 2025-07-23T20:00:46.049649
name: Global Variables
public: true
base: false
variables: []
color: null
"#;
let m: SyncModel = serde_yaml::from_str(raw)?;
match m {
SyncModel::Environment(env) => {
assert_eq!(env.parent_model, "environment".to_string());
assert_eq!(env.parent_id, None);
}
_ => panic!("expected sub environment"),
}
let raw = r#"
type: environment
model: environment
id: ev_fAUS49FUN2
parentId: fld_123
parentModel: folder
workspaceId: wk_kfSI3JDHd7
createdAt: 2025-01-11T17:02:58.012792
updatedAt: 2025-07-23T20:00:46.049649
name: Folder Environment
public: true
base: false
variables: []
color: null
"#;
let m: SyncModel = serde_yaml::from_str(raw)?;
match m {
SyncModel::Environment(env) => {
assert_eq!(env.parent_model, "folder".to_string());
assert_eq!(env.parent_id, Some("fld_123".to_string()));
}
_ => panic!("expected folder environment"),
}
Ok(())
}
}

View File

@@ -0,0 +1,576 @@
use crate::error::Result;
use crate::models::SyncModel;
use chrono::Utc;
use log::{info, warn};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use ts_rs::TS;
use yaak_models::db_context::DbContext;
use yaak_models::models::{SyncState, WorkspaceMeta};
use yaak_models::util::{UpdateSource, get_workspace_export_resources};
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase", tag = "type")]
#[ts(export, export_to = "gen_sync.ts")]
pub enum SyncOp {
FsCreate {
model: SyncModel,
},
FsUpdate {
model: SyncModel,
state: SyncState,
},
FsDelete {
state: SyncState,
fs: Option<FsCandidate>,
},
DbCreate {
fs: FsCandidate,
},
DbUpdate {
state: SyncState,
fs: FsCandidate,
},
DbDelete {
model: SyncModel,
state: SyncState,
},
IgnorePrivate {
model: SyncModel,
},
}
impl SyncOp {
fn workspace_id(&self) -> String {
match self {
SyncOp::DbCreate { fs } => fs.model.workspace_id(),
SyncOp::DbDelete { model, .. } => model.workspace_id(),
SyncOp::DbUpdate { state, .. } => state.workspace_id.clone(),
SyncOp::FsCreate { model } => model.workspace_id(),
SyncOp::FsDelete { state, .. } => state.workspace_id.clone(),
SyncOp::FsUpdate { state, .. } => state.workspace_id.clone(),
SyncOp::IgnorePrivate { model } => model.workspace_id(),
}
}
}
impl Display for SyncOp {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(
match self {
SyncOp::FsCreate { model } => format!("fs_create({})", model.id()),
SyncOp::FsUpdate { model, .. } => format!("fs_update({})", model.id()),
SyncOp::FsDelete { state, .. } => format!("fs_delete({})", state.model_id),
SyncOp::DbCreate { fs } => format!("db_create({})", fs.model.id()),
SyncOp::DbUpdate { fs, .. } => format!("db_update({})", fs.model.id()),
SyncOp::DbDelete { model, .. } => format!("db_delete({})", model.id()),
SyncOp::IgnorePrivate { model } => format!("ignore_private({})", model.id()),
}
.as_str(),
)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum DbCandidate {
Added(SyncModel),
Deleted(SyncState),
Modified(SyncModel, SyncState),
Unmodified(SyncModel, SyncState),
}
impl DbCandidate {
fn model_id(&self) -> String {
match &self {
DbCandidate::Added(m) => m.id(),
DbCandidate::Deleted(s) => s.model_id.clone(),
DbCandidate::Modified(m, _) => m.id(),
DbCandidate::Unmodified(m, _) => m.id(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase", tag = "type")]
#[ts(export, export_to = "gen_sync.ts")]
pub struct FsCandidate {
pub model: SyncModel,
pub rel_path: PathBuf,
pub checksum: String,
}
pub fn get_db_candidates(
db: &DbContext,
version: &str,
workspace_id: &str,
sync_dir: &Path,
) -> Result<Vec<DbCandidate>> {
let models: HashMap<_, _> =
workspace_models(db, version, workspace_id)?.into_iter().map(|m| (m.id(), m)).collect();
let sync_states: HashMap<_, _> = db
.list_sync_states_for_workspace(workspace_id, sync_dir)?
.into_iter()
.map(|s| (s.model_id.clone(), s))
.collect();
// 1. Add candidates for models (created/modified/unmodified)
let mut candidates: Vec<DbCandidate> = models
.values()
.filter_map(|model| {
match sync_states.get(&model.id()) {
Some(existing_sync_state) => {
// If a sync state exists but the model is now private, treat it as a deletion
match model {
SyncModel::Environment(e) if !e.public => {
return Some(DbCandidate::Deleted(existing_sync_state.to_owned()));
}
_ => {}
};
let updated_since_flush = model.updated_at() > existing_sync_state.flushed_at;
if updated_since_flush {
Some(DbCandidate::Modified(
model.to_owned(),
existing_sync_state.to_owned(),
))
} else {
Some(DbCandidate::Unmodified(
model.to_owned(),
existing_sync_state.to_owned(),
))
}
}
None => {
return match model {
SyncModel::Environment(e) if !e.public => {
// No sync state yet, so ignore the model
None
}
_ => {
// No sync state yet, so the model was just added
Some(DbCandidate::Added(model.to_owned()))
}
};
}
}
})
.collect();
// 2. Add SyncState-only candidates (deleted)
candidates.extend(sync_states.values().filter_map(|sync_state| {
if models.contains_key(&sync_state.model_id) {
None
} else {
Some(DbCandidate::Deleted(sync_state.to_owned()))
}
}));
Ok(candidates)
}
pub fn get_fs_candidates(dir: &Path) -> Result<Vec<FsCandidate>> {
// Ensure the root directory exists
fs::create_dir_all(dir)?;
let mut candidates = Vec::new();
let entries = fs::read_dir(dir)?;
for dir_entry in entries {
let dir_entry = match dir_entry {
Ok(v) => v,
Err(_) => continue,
};
if !dir_entry.file_type()?.is_file() {
continue;
};
let path = dir_entry.path();
let (model, checksum) = match SyncModel::from_file(&path) {
Ok(Some(m)) => m,
Ok(None) => continue,
Err(e) => {
warn!("Failed to parse sync file {e}");
return Err(e);
}
};
let rel_path = Path::new(&dir_entry.file_name()).to_path_buf();
candidates.push(FsCandidate { rel_path, model, checksum })
}
Ok(candidates)
}
pub fn compute_sync_ops(
db_candidates: Vec<DbCandidate>,
fs_candidates: Vec<FsCandidate>,
) -> Vec<SyncOp> {
let mut db_map: HashMap<String, DbCandidate> = HashMap::new();
for c in db_candidates {
db_map.insert(c.model_id(), c);
}
let mut fs_map: HashMap<String, FsCandidate> = HashMap::new();
for c in fs_candidates {
fs_map.insert(c.model.id(), c);
}
// Collect all keys from both maps for the OUTER JOIN
let keys: std::collections::HashSet<_> = db_map.keys().chain(fs_map.keys()).collect();
keys.into_iter()
.filter_map(|k| {
let op = match (db_map.get(k), fs_map.get(k)) {
(None, None) => return None, // Can never happen
(None, Some(fs)) => SyncOp::DbCreate { fs: fs.to_owned() },
// DB unchanged <-> FS missing
(Some(DbCandidate::Unmodified(model, sync_state)), None) => {
SyncOp::DbDelete { model: model.to_owned(), state: sync_state.to_owned() }
}
// DB modified <-> FS missing
(Some(DbCandidate::Modified(model, sync_state)), None) => {
SyncOp::FsUpdate { model: model.to_owned(), state: sync_state.to_owned() }
}
// DB added <-> FS missing
(Some(DbCandidate::Added(model)), None) => {
SyncOp::FsCreate { model: model.to_owned() }
}
// DB deleted <-> FS missing
// Already deleted on FS, but sending it so the SyncState gets dealt with
(Some(DbCandidate::Deleted(sync_state)), None) => {
SyncOp::FsDelete { state: sync_state.to_owned(), fs: None }
}
// DB unchanged <-> FS exists
(Some(DbCandidate::Unmodified(_, sync_state)), Some(fs_candidate)) => {
if sync_state.checksum == fs_candidate.checksum {
return None;
} else {
SyncOp::DbUpdate {
state: sync_state.to_owned(),
fs: fs_candidate.to_owned(),
}
}
}
// DB modified <-> FS exists
(Some(DbCandidate::Modified(model, sync_state)), Some(fs_candidate)) => {
if sync_state.checksum == fs_candidate.checksum {
SyncOp::FsUpdate { model: model.to_owned(), state: sync_state.to_owned() }
} else if model.updated_at() < fs_candidate.model.updated_at() {
// CONFLICT! Write to DB if the fs model is newer
SyncOp::DbUpdate {
state: sync_state.to_owned(),
fs: fs_candidate.to_owned(),
}
} else {
// CONFLICT! Write to FS if the db model is newer
SyncOp::FsUpdate { model: model.to_owned(), state: sync_state.to_owned() }
}
}
// DB added <-> FS anything
(Some(DbCandidate::Added(model)), Some(_)) => {
// This would be super rare (impossible?), so let's follow the user's intention
SyncOp::FsCreate { model: model.to_owned() }
}
// DB deleted <-> FS exists
(Some(DbCandidate::Deleted(sync_state)), Some(fs_candidate)) => SyncOp::FsDelete {
state: sync_state.to_owned(),
fs: Some(fs_candidate.to_owned()),
},
};
Some(op)
})
.collect()
}
fn workspace_models(
db: &DbContext,
version: &str,
workspace_id: &str,
) -> Result<Vec<SyncModel>> {
// We want to include private environments here so that we can take them into account during
// the sync process. Otherwise, they would be treated as deleted.
let include_private_environments = true;
let resources = get_workspace_export_resources(
db,
version,
vec![workspace_id],
include_private_environments,
)?
.resources;
let workspace = resources.workspaces.iter().find(|w| w.id == workspace_id);
let workspace = match workspace {
None => return Ok(Vec::new()),
Some(w) => w,
};
let mut sync_models = vec![SyncModel::Workspace(workspace.to_owned())];
for m in resources.environments {
sync_models.push(SyncModel::Environment(m));
}
for m in resources.folders {
sync_models.push(SyncModel::Folder(m));
}
for m in resources.http_requests {
sync_models.push(SyncModel::HttpRequest(m));
}
for m in resources.grpc_requests {
sync_models.push(SyncModel::GrpcRequest(m));
}
for m in resources.websocket_requests {
sync_models.push(SyncModel::WebsocketRequest(m));
}
Ok(sync_models)
}
/// Apply sync operations to the filesystem and database.
/// Returns a list of SyncStateOps that should be applied afterward.
pub fn apply_sync_ops(
db: &DbContext,
workspace_id: &str,
sync_dir: &Path,
sync_ops: Vec<SyncOp>,
) -> Result<Vec<SyncStateOp>> {
if sync_ops.is_empty() {
return Ok(Vec::new());
}
info!(
"Applying sync ops {}",
sync_ops.iter().map(|op| op.to_string()).collect::<Vec<String>>().join(", ")
);
let mut sync_state_ops = Vec::new();
let mut workspaces_to_upsert = Vec::new();
let mut environments_to_upsert = Vec::new();
let mut folders_to_upsert = Vec::new();
let mut http_requests_to_upsert = Vec::new();
let mut grpc_requests_to_upsert = Vec::new();
let mut websocket_requests_to_upsert = Vec::new();
for op in sync_ops {
// Only apply things if workspace ID matches
if op.workspace_id() != workspace_id {
continue;
}
sync_state_ops.push(match op {
SyncOp::FsCreate { model } => {
let rel_path = derive_model_filename(&model);
let abs_path = sync_dir.join(rel_path.clone());
let (content, checksum) = model.to_file_contents(&rel_path)?;
let mut f = File::create(&abs_path)?;
f.write_all(&content)?;
SyncStateOp::Create { model_id: model.id(), checksum, rel_path }
}
SyncOp::FsUpdate { model, state } => {
// Always write the existing path
let rel_path = Path::new(&state.rel_path);
let abs_path = Path::new(&state.sync_dir).join(&rel_path);
let (content, checksum) = model.to_file_contents(&rel_path)?;
let mut f = File::create(&abs_path)?;
f.write_all(&content)?;
SyncStateOp::Update {
state: state.to_owned(),
checksum,
rel_path: rel_path.to_owned(),
}
}
SyncOp::FsDelete { state, fs: fs_candidate } => match fs_candidate {
None => SyncStateOp::Delete { state: state.to_owned() },
Some(_) => {
// Always delete the existing path
let rel_path = Path::new(&state.rel_path);
let abs_path = Path::new(&state.sync_dir).join(&rel_path);
fs::remove_file(&abs_path)?;
SyncStateOp::Delete { state: state.to_owned() }
}
},
SyncOp::DbCreate { fs } => {
let model_id = fs.model.id();
// Push updates to arrays so we can do them all in a single
// batch upsert to make foreign keys happy
match fs.model {
SyncModel::Environment(m) => environments_to_upsert.push(m),
SyncModel::Folder(m) => folders_to_upsert.push(m),
SyncModel::GrpcRequest(m) => grpc_requests_to_upsert.push(m),
SyncModel::HttpRequest(m) => http_requests_to_upsert.push(m),
SyncModel::WebsocketRequest(m) => websocket_requests_to_upsert.push(m),
SyncModel::Workspace(m) => workspaces_to_upsert.push(m),
};
SyncStateOp::Create {
model_id,
checksum: fs.checksum.to_owned(),
rel_path: fs.rel_path.to_owned(),
}
}
SyncOp::DbUpdate { state, fs } => {
// Push updates to arrays so we can do them all in a single
// batch upsert to make foreign keys happy
match fs.model {
SyncModel::Environment(m) => environments_to_upsert.push(m),
SyncModel::Folder(m) => folders_to_upsert.push(m),
SyncModel::GrpcRequest(m) => grpc_requests_to_upsert.push(m),
SyncModel::HttpRequest(m) => http_requests_to_upsert.push(m),
SyncModel::WebsocketRequest(m) => websocket_requests_to_upsert.push(m),
SyncModel::Workspace(m) => workspaces_to_upsert.push(m),
}
SyncStateOp::Update {
state: state.to_owned(),
checksum: fs.checksum.to_owned(),
rel_path: fs.rel_path.to_owned(),
}
}
SyncOp::DbDelete { model, state } => {
delete_model(db, &model)?;
SyncStateOp::Delete { state: state.to_owned() }
}
SyncOp::IgnorePrivate { .. } => SyncStateOp::NoOp,
});
}
let upserted_models = db.batch_upsert(
workspaces_to_upsert,
environments_to_upsert,
folders_to_upsert,
http_requests_to_upsert,
grpc_requests_to_upsert,
websocket_requests_to_upsert,
&UpdateSource::Sync,
)?;
// Ensure we create WorkspaceMeta models for each new workspace, with the appropriate sync dir
let sync_dir_string = sync_dir.to_string_lossy().to_string();
for workspace in upserted_models.workspaces {
match db.get_workspace_meta(&workspace.id) {
Some(m) => {
if m.setting_sync_dir == Some(sync_dir_string.clone()) {
// We don't need to update if unchanged
continue;
}
db.upsert_workspace_meta(
&WorkspaceMeta {
setting_sync_dir: Some(sync_dir.to_string_lossy().to_string()),
..m
},
&UpdateSource::Sync,
)
}
None => db.upsert_workspace_meta(
&WorkspaceMeta {
workspace_id: workspace_id.to_string(),
setting_sync_dir: Some(sync_dir.to_string_lossy().to_string()),
..Default::default()
},
&UpdateSource::Sync,
),
}?;
}
Ok(sync_state_ops)
}
#[derive(Debug)]
pub enum SyncStateOp {
Create {
model_id: String,
checksum: String,
rel_path: PathBuf,
},
Update {
state: SyncState,
checksum: String,
rel_path: PathBuf,
},
Delete {
state: SyncState,
},
NoOp,
}
pub fn apply_sync_state_ops(
db: &DbContext,
workspace_id: &str,
sync_dir: &Path,
ops: Vec<SyncStateOp>,
) -> Result<()> {
for op in ops {
match op {
SyncStateOp::Create { checksum, rel_path, model_id } => {
let sync_state = SyncState {
workspace_id: workspace_id.to_string(),
model_id,
checksum,
sync_dir: sync_dir.to_str().unwrap().to_string(),
rel_path: rel_path.to_str().unwrap().to_string(),
flushed_at: Utc::now().naive_utc(),
..Default::default()
};
db.upsert_sync_state(&sync_state)?;
}
SyncStateOp::Update { state: sync_state, checksum, rel_path } => {
let sync_state = SyncState {
checksum,
sync_dir: sync_dir.to_str().unwrap().to_string(),
rel_path: rel_path.to_str().unwrap().to_string(),
flushed_at: Utc::now().naive_utc(),
..sync_state
};
db.upsert_sync_state(&sync_state)?;
}
SyncStateOp::Delete { state } => {
db.delete_sync_state(&state)?;
}
SyncStateOp::NoOp => {
// Nothing
}
}
}
Ok(())
}
fn derive_model_filename(m: &SyncModel) -> PathBuf {
let rel = format!("yaak.{}.yaml", m.id());
Path::new(&rel).to_path_buf()
}
fn delete_model(db: &DbContext, model: &SyncModel) -> Result<()> {
match model {
SyncModel::Workspace(m) => {
db.delete_workspace(&m, &UpdateSource::Sync)?;
}
SyncModel::Environment(m) => {
db.delete_environment(&m, &UpdateSource::Sync)?;
}
SyncModel::Folder(m) => {
db.delete_folder(&m, &UpdateSource::Sync)?;
}
SyncModel::HttpRequest(m) => {
db.delete_http_request(&m, &UpdateSource::Sync)?;
}
SyncModel::GrpcRequest(m) => {
db.delete_grpc_request(&m, &UpdateSource::Sync)?;
}
SyncModel::WebsocketRequest(m) => {
db.delete_websocket_request(&m, &UpdateSource::Sync)?;
}
};
Ok(())
}

View File

@@ -0,0 +1,84 @@
use crate::error::Result;
use log::{error, info};
use notify::Watcher;
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use std::sync::mpsc;
use tokio::select;
use ts_rs::TS;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export, export_to = "gen_watch.ts")]
pub struct WatchEvent {
pub paths: Vec<PathBuf>,
pub kind: String,
}
/// Watch a directory for changes and send events through a callback.
///
/// The callback is invoked for each watch event. The function returns when
/// the cancel receiver receives a signal.
pub async fn watch_directory<F>(
dir: &Path,
callback: F,
mut cancel_rx: tokio::sync::watch::Receiver<()>,
) -> Result<()>
where
F: Fn(WatchEvent) + Send + 'static,
{
let dir = dir.to_owned();
let (tx, rx) = mpsc::channel::<notify::Result<notify::Event>>();
let mut watcher = notify::recommended_watcher(tx)?;
// Spawn a blocking thread to handle the blocking `std::sync::mpsc::Receiver`
let (async_tx, mut async_rx) = tokio::sync::mpsc::channel::<notify::Result<notify::Event>>(100);
std::thread::spawn(move || {
for res in rx {
if async_tx.blocking_send(res).is_err() {
break; // Exit the thread if the async receiver is closed
}
}
});
tokio::spawn(async move {
watcher.watch(&dir, notify::RecursiveMode::Recursive).expect("Failed to watch directory");
info!("Watching directory {:?}", dir);
loop {
select! {
// Listen for new watch events
Some(event_res) = async_rx.recv() => {
match event_res {
Ok(event) => {
// Filter out any ignored directories and see if we still get a result
let paths = event.paths.into_iter()
.map(|p| p.strip_prefix(&dir).unwrap().to_path_buf())
.filter(|p| !p.starts_with(".git") && !p.starts_with("node_modules"))
.collect::<Vec<PathBuf>>();
if paths.is_empty() {
continue;
}
callback(WatchEvent {
paths,
kind: format!("{:?}", event.kind),
});
}
Err(e) => error!("Directory watch error: {:?}", e),
}
}
// Listen for cancellation
_ = cancel_rx.changed() => {
// To cancel, we break from the loop, which will exit the task and make the
// watcher go out of scope (cancelling it)
info!("Cancelling watch for {:?}", dir);
break;
}
}
}
});
Ok(())
}