in progress, routers and main split up

This commit is contained in:
Per Stark
2025-03-04 07:44:00 +01:00
parent 037bc52a64
commit 847571729b
80 changed files with 599 additions and 1577 deletions

47
crates/main/src/server.rs Normal file
View File

@@ -0,0 +1,47 @@
use api_router::{api_routes_v1, api_state::ApiState};
use axum::{extract::FromRef, Router};
use common::utils::config::get_config;
use html_router::{html_routes, html_state::HtmlState};
use tracing::info;
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
#[tokio::main(flavor = "multi_thread", worker_threads = 2)]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Set up tracing
tracing_subscriber::registry()
.with(fmt::layer())
.with(EnvFilter::from_default_env())
.try_init()
.ok();
// Get config
let config = get_config()?;
// Set up router states
let html_state = HtmlState::new(&config).await?;
let api_state = ApiState {
surreal_db_client: html_state.surreal_db_client.clone(),
job_queue: html_state.job_queue.clone(),
};
// Create Axum router
let app = Router::new()
.nest("/api/v1", api_routes_v1(&api_state))
.nest("/", html_routes(&html_state))
.with_state(AppState {
api_state,
html_state,
});
info!("Listening on 0.0.0.0:3000");
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await?;
axum::serve(listener, app).await?;
Ok(())
}
#[derive(Clone, FromRef)]
struct AppState {
api_state: ApiState,
html_state: HtmlState,
}

150
crates/main/src/worker.rs Normal file
View File

@@ -0,0 +1,150 @@
use std::sync::Arc;
use common::{
ingress::{
content_processor::ContentProcessor,
jobqueue::{JobQueue, MAX_ATTEMPTS},
},
storage::{
db::{get_item, SurrealDbClient},
types::job::{Job, JobStatus},
},
utils::config::get_config,
};
use futures::StreamExt;
use surrealdb::Action;
use tracing::{error, info};
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Set up tracing
tracing_subscriber::registry()
.with(fmt::layer())
.with(EnvFilter::from_default_env())
.try_init()
.ok();
let config = get_config()?;
let surreal_db_client = Arc::new(
SurrealDbClient::new(
&config.surrealdb_address,
&config.surrealdb_username,
&config.surrealdb_password,
&config.surrealdb_namespace,
&config.surrealdb_database,
)
.await?,
);
let openai_client = Arc::new(async_openai::Client::new());
let job_queue = JobQueue::new(surreal_db_client.clone());
let content_processor = ContentProcessor::new(surreal_db_client, openai_client.clone()).await?;
loop {
// First, check for any unfinished jobs
let unfinished_jobs = job_queue.get_unfinished_jobs().await?;
if !unfinished_jobs.is_empty() {
info!("Found {} unfinished jobs", unfinished_jobs.len());
for job in unfinished_jobs {
job_queue
.process_job(job, &content_processor, openai_client.clone())
.await?;
}
}
// If no unfinished jobs, start listening for new ones
info!("Listening for new jobs...");
let mut job_stream = job_queue.listen_for_jobs().await?;
while let Some(notification) = job_stream.next().await {
match notification {
Ok(notification) => {
info!("Received notification: {:?}", notification);
match notification.action {
Action::Create => {
if let Err(e) = job_queue
.process_job(
notification.data,
&content_processor,
openai_client.clone(),
)
.await
{
error!("Error processing job: {}", e);
}
}
Action::Update => {
match notification.data.status {
JobStatus::Completed
| JobStatus::Error(_)
| JobStatus::Cancelled => {
info!(
"Skipping already completed/error/cancelled job: {}",
notification.data.id
);
continue;
}
JobStatus::InProgress { attempts, .. } => {
// Only process if this is a retry after an error, not our own update
if let Ok(Some(current_job)) =
get_item::<Job>(&job_queue.db.client, &notification.data.id)
.await
{
match current_job.status {
JobStatus::Error(_) if attempts < MAX_ATTEMPTS => {
// This is a retry after an error
if let Err(e) = job_queue
.process_job(
current_job,
&content_processor,
openai_client.clone(),
)
.await
{
error!("Error processing job retry: {}", e);
}
}
_ => {
info!(
"Skipping in-progress update for job: {}",
notification.data.id
);
continue;
}
}
}
}
JobStatus::Created => {
// Shouldn't happen with Update action, but process if it does
if let Err(e) = job_queue
.process_job(
notification.data,
&content_processor,
openai_client.clone(),
)
.await
{
error!("Error processing job: {}", e);
}
}
}
}
_ => {} // Ignore other actions
}
}
Err(e) => error!("Error in job notification: {}", e),
}
}
// If we reach here, the stream has ended (connection lost?)
error!("Job stream ended unexpectedly, reconnecting...");
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
}
}