at protocol indexer with flexible filtering, xrpc queries, and a cursor-backed event stream, built on fjall
at-protocol atproto indexer rust fjall

[appwide] refactor ingesting, api and bunch of stuff, should be a lot more consistent and durable now

ptr.pet 0de7dd05 85291e9d

verified
+1179 -954
+1 -1
Cargo.toml
··· 1 1 [package] 2 2 name = "hydrant" 3 3 version = "0.1.0" 4 - edition = "2021" 4 + edition = "2024" 5 5 6 6 [dependencies] 7 7 tokio = { version = "1.0", features = ["full"] }
+3 -9
src/api/debug.rs
··· 1 - use crate::api::AppState; 2 1 use crate::db::keys; 2 + use crate::{api::AppState, db::types::TrimmedDid}; 3 3 use axum::{ 4 4 extract::{Query, State}, 5 5 http::StatusCode, ··· 39 39 40 40 // {did_prefix}\x00{collection}\x00 41 41 let mut prefix = Vec::new(); 42 - prefix.extend_from_slice(keys::did_prefix(&did).as_bytes()); 42 + prefix.extend_from_slice(TrimmedDid::from(&did).as_bytes()); 43 43 prefix.push(keys::SEP); 44 44 prefix.extend_from_slice(req.collection.as_bytes()); 45 45 prefix.push(keys::SEP); 46 46 47 47 let count = tokio::task::spawn_blocking(move || { 48 - let mut count = 0; 49 48 let start_key = prefix.clone(); 50 49 let mut end_key = prefix.clone(); 51 50 if let Some(msg) = end_key.last_mut() { 52 51 *msg += 1; 53 52 } 54 53 55 - for item in ks.range(start_key..end_key) { 56 - if item.into_inner().is_ok() { 57 - count += 1; 58 - } 59 - } 60 - count 54 + ks.range(start_key..end_key).count() 61 55 }) 62 56 .await 63 57 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
+5 -1
src/api/mod.rs
··· 1 1 use crate::state::AppState; 2 2 use axum::{routing::get, Router}; 3 + use jacquard::xrpc::GenericXrpcError; 4 + use jacquard_axum::XrpcErrorResponse; 3 5 use std::{net::SocketAddr, sync::Arc}; 4 6 use tower_http::cors::CorsLayer; 5 7 use tower_http::trace::TraceLayer; ··· 9 11 pub mod stats; 10 12 mod stream; 11 13 pub mod xrpc; 14 + 15 + pub type XrpcResult<T> = Result<T, XrpcErrorResponse<GenericXrpcError>>; 12 16 13 17 pub async fn serve(state: Arc<AppState>, port: u16) -> miette::Result<()> { 14 18 let app = Router::new() ··· 47 51 .map_err(|e| miette::miette!("failed to bind debug server to port {port}: {e}"))?; 48 52 49 53 tracing::info!( 50 - "Debug server listening on {}", 54 + "debug server listening on {}", 51 55 listener.local_addr().unwrap() 52 56 ); 53 57
+34 -37
src/api/repo.rs
··· 1 1 use crate::api::AppState; 2 2 use crate::db::{keys, ser_repo_state, Db}; 3 - use crate::types::{RepoState, RepoStatus}; 3 + use crate::types::RepoState; 4 4 use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; 5 5 use jacquard::{types::did::Did, IntoStatic}; 6 6 use serde::Deserialize; ··· 13 13 } 14 14 15 15 #[derive(Deserialize)] 16 - pub struct RepoAddRemoveRequest { 16 + pub struct RepoAddRequest { 17 17 pub dids: Vec<String>, 18 18 } 19 19 20 20 pub async fn handle_repo_add( 21 21 State(state): State<Arc<AppState>>, 22 - Json(req): Json<RepoAddRemoveRequest>, 22 + Json(req): Json<RepoAddRequest>, 23 23 ) -> Result<StatusCode, (StatusCode, String)> { 24 24 let db = &state.db; 25 25 let mut batch = db.inner.batch(); 26 - let mut added_count = 0; 26 + let mut added = 0; 27 27 let mut to_backfill = Vec::new(); 28 28 29 29 for did_str in req.dids { 30 30 let did = Did::new_owned(did_str.as_str()) 31 31 .map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?; 32 32 let did_key = keys::repo_key(&did); 33 - if !Db::contains_key(db.repos.clone(), did_key) 33 + if !Db::contains_key(db.repos.clone(), &did_key) 34 34 .await 35 35 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 36 36 { 37 - let mut repo_state = RepoState::new(did.clone()); 38 - repo_state.status = RepoStatus::Backfilling; 37 + let repo_state = RepoState::backfilling(&did); 39 38 let bytes = ser_repo_state(&repo_state) 40 39 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; 41 40 42 - batch.insert(&db.repos, did_key, bytes); 43 - batch.insert(&db.pending, did_key, Vec::new()); 41 + batch.insert(&db.repos, &did_key, bytes); 42 + batch.insert(&db.pending, &did_key, Vec::new()); 44 43 45 - added_count += 1; 44 + added += 1; 46 45 47 46 let jacquard_did = Did::new_owned(did.as_str()) 48 47 .map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?; ··· 50 49 } 51 50 } 52 51 53 - if added_count > 0 { 52 + if added > 0 { 54 53 tokio::task::spawn_blocking(move || batch.commit().map_err(|e| e.to_string())) 55 54 .await 56 55 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 57 56 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?; 58 - 59 - // update counts 60 - tokio::task::spawn({ 61 - let state = state.clone(); 62 - async move { 63 - let _ = state 64 - .db 65 - .increment_count(keys::count_keyspace_key("repos"), added_count) 66 - .await; 67 - let _ = state 68 - .db 69 - .increment_count(keys::count_keyspace_key("pending"), added_count) 70 - .await; 71 - } 72 - }); 57 + state.db.update_count_async("repos", added).await; 58 + state.db.update_count_async("pending", added).await; 73 59 74 60 // trigger backfill 75 61 for did in to_backfill { ··· 79 65 Ok(StatusCode::OK) 80 66 } 81 67 68 + #[derive(Deserialize)] 69 + pub struct RepoRemoveRequest { 70 + pub dids: Vec<String>, 71 + } 72 + 82 73 pub async fn handle_repo_remove( 83 74 State(state): State<Arc<AppState>>, 84 - Json(req): Json<RepoAddRemoveRequest>, 75 + Json(req): Json<RepoRemoveRequest>, 85 76 ) -> Result<StatusCode, (StatusCode, String)> { 86 77 let db = &state.db; 78 + let mut batch = db.inner.batch(); 79 + let mut removed = 0; 80 + 87 81 for did_str in req.dids { 88 82 let did = Did::new_owned(did_str.as_str()) 89 83 .map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?; 90 84 let did_key = keys::repo_key(&did); 91 - if Db::contains_key(db.repos.clone(), did_key) 85 + if Db::contains_key(db.repos.clone(), &did_key) 92 86 .await 93 87 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 94 88 { 95 - let mut batch = db.inner.batch(); 96 - batch.remove(&db.repos, did_key); 97 - batch.remove(&db.pending, did_key); 98 - batch.remove(&db.resync, did_key); 99 - 100 - tokio::task::spawn_blocking(move || batch.commit().map_err(|e| e.to_string())) 101 - .await 102 - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 103 - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?; 89 + batch.remove(&db.repos, &did_key); 90 + batch.remove(&db.pending, &did_key); 91 + batch.remove(&db.resync, &did_key); 92 + removed -= 1; 104 93 } 105 94 } 95 + 96 + tokio::task::spawn_blocking(move || batch.commit().map_err(|e| e.to_string())) 97 + .await 98 + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 99 + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?; 100 + state.db.update_count_async("repos", removed).await; 101 + state.db.update_count_async("pending", removed).await; 102 + 106 103 Ok(StatusCode::OK) 107 104 }
+9 -25
src/api/stats.rs
··· 1 1 use crate::api::AppState; 2 - use crate::db::keys; 3 2 use axum::{extract::State, response::Result, Json}; 4 3 use serde::Serialize; 5 - use smol_str::SmolStr; 6 - use std::sync::Arc; 4 + use std::{collections::HashMap, sync::Arc}; 7 5 8 6 #[derive(Serialize)] 9 7 pub struct StatsResponse { 10 - pub keyspace_stats: Vec<KeyspaceStat>, 11 - } 12 - 13 - #[derive(Serialize)] 14 - pub struct KeyspaceStat { 15 - pub name: SmolStr, 16 - pub count: i64, 8 + pub counts: HashMap<&'static str, u64>, 17 9 } 18 10 19 11 pub async fn get_stats(State(state): State<Arc<AppState>>) -> Result<Json<StatsResponse>> { 20 12 let db = &state.db; 21 13 22 - let stats = futures::future::try_join_all( 23 - [ 24 - "repos", "records", "blocks", "events", "buffer", "pending", "resync", 25 - ] 26 - .into_iter() 27 - .map(|name| async move { 28 - Ok::<_, miette::Report>(KeyspaceStat { 29 - name: name.into(), 30 - count: db.get_count(keys::count_keyspace_key(name)).await?, 31 - }) 32 - }), 14 + let counts = futures::future::join_all( 15 + ["repos", "records", "blocks", "pending", "resync"] 16 + .into_iter() 17 + .map(|name| async move { (name, db.get_count(name).await) }), 33 18 ) 34 19 .await 35 - .map_err(|e| e.to_string())?; 20 + .into_iter() 21 + .collect(); 36 22 37 - Ok(Json(StatsResponse { 38 - keyspace_stats: stats, 39 - })) 23 + Ok(Json(StatsResponse { counts })) 40 24 }
+33 -52
src/api/stream.rs
··· 53 53 // 1. catch up from DB 54 54 loop { 55 55 let mut found = false; 56 - for item in ks.range(keys::event_key((current_id + 1) as i64)..) { 56 + for item in ks.range(keys::event_key(current_id + 1)..) { 57 57 if let Ok((k, v)) = item.into_inner() { 58 58 let mut buf = [0u8; 8]; 59 59 buf.copy_from_slice(&k); 60 60 let id = u64::from_be_bytes(buf); 61 61 current_id = id; 62 62 63 - let stored_evt: StoredEvent = match rmp_serde::from_slice(&v) { 63 + let StoredEvent { 64 + did, 65 + rev, 66 + collection, 67 + rkey, 68 + action, 69 + cid, 70 + } = match rmp_serde::from_slice(&v) { 64 71 Ok(e) => e, 65 72 Err(_) => continue, 66 73 }; 67 74 68 - let marshallable = match stored_evt { 69 - StoredEvent::Record { 70 - live, 71 - did, 72 - rev, 73 - collection, 74 - rkey, 75 - action, 76 - cid, 77 - } => { 78 - let mut record_val = None; 79 - if let Some(cid_str) = &cid { 80 - if let Ok(Some(block_bytes)) = 81 - db.blocks.get(keys::block_key(cid_str)) 75 + let marshallable = { 76 + let mut record_val = None; 77 + if let Some(cid_str) = &cid { 78 + if let Ok(Some(block_bytes)) = 79 + db.blocks.get(keys::block_key(cid_str)) 80 + { 81 + if let Ok(raw_data) = 82 + serde_ipld_dagcbor::from_slice::<RawData>(&block_bytes) 82 83 { 83 - if let Ok(raw_data) = 84 - serde_ipld_dagcbor::from_slice::<RawData>( 85 - &block_bytes, 86 - ) 87 - { 88 - record_val = serde_json::to_value(raw_data).ok(); 89 - } 84 + record_val = serde_json::to_value(raw_data).ok(); 90 85 } 91 86 } 87 + } 92 88 93 - MarshallableEvt { 94 - id, 95 - event_type: "record".into(), 96 - record: Some(RecordEvt { 97 - live, 98 - did, 99 - rev, 100 - collection, 101 - rkey, 102 - action, 103 - record: record_val, 104 - cid, 105 - }), 106 - identity: None, 107 - account: None, 108 - } 109 - } 110 - StoredEvent::Identity(identity) => MarshallableEvt { 89 + MarshallableEvt { 111 90 id, 112 - event_type: "identity".into(), 113 - record: None, 114 - identity: Some(identity), 115 - account: None, 116 - }, 117 - StoredEvent::Account(account) => MarshallableEvt { 118 - id, 119 - event_type: "account".into(), 120 - record: None, 91 + event_type: "record".into(), 92 + record: Some(RecordEvt { 93 + live: true, 94 + did: did.to_did(), 95 + rev, 96 + collection, 97 + rkey, 98 + action, 99 + record: record_val, 100 + cid, 101 + }), 121 102 identity: None, 122 - account: Some(account), 123 - }, 103 + account: None, 104 + } 124 105 }; 125 106 126 107 let json_str = match serde_json::to_string(&marshallable) {
+14 -10
src/api/xrpc.rs
··· 1 - use crate::api::AppState; 2 - use crate::db::{keys, Db}; 1 + use crate::api::{AppState, XrpcResult}; 2 + use crate::db::types::TrimmedDid; 3 + use crate::db::{self, keys, Db}; 3 4 use axum::{extract::State, http::StatusCode, Json, Router}; 5 + use futures::TryFutureExt; 4 6 use jacquard::types::ident::AtIdentifier; 5 7 use jacquard::{ 6 8 api::com_atproto::repo::{ ··· 22 24 use serde::{Deserialize, Serialize}; 23 25 use smol_str::ToSmolStr; 24 26 use std::{fmt::Display, sync::Arc}; 27 + use tokio::task::spawn_blocking; 25 28 26 29 pub fn router() -> Router<Arc<AppState>> { 27 30 Router::new() ··· 125 128 126 129 let prefix = format!( 127 130 "{}{}{}{}", 128 - keys::did_prefix(&did), 131 + TrimmedDid::from(&did).as_str(), 129 132 keys::SEP as char, 130 133 req.collection.as_str(), 131 134 keys::SEP as char ··· 254 257 255 258 #[derive(Serialize, Deserialize, jacquard_derive::IntoStatic)] 256 259 pub struct CountRecordsOutput { 257 - pub count: i64, 260 + pub count: u64, 258 261 } 259 262 260 263 pub struct CountRecordsResponse; ··· 290 293 pub async fn handle_count_records( 291 294 State(state): State<Arc<AppState>>, 292 295 ExtractXrpc(req): ExtractXrpc<CountRecords>, 293 - ) -> Result<Json<CountRecordsOutput>, XrpcErrorResponse<GenericXrpcError>> { 294 - let db = &state.db; 296 + ) -> XrpcResult<Json<CountRecordsOutput>> { 295 297 let did = state 296 298 .resolver 297 299 .resolve_did(&req.identifier) 298 300 .await 299 301 .map_err(|e| bad_request(CountRecordsRequest::NSID, e))?; 300 302 301 - let count = db 302 - .get_count(keys::count_collection_key(&did, &req.collection)) 303 - .await 304 - .map_err(|e| internal_error(CountRecordsRequest::NSID, e))?; 303 + let count = spawn_blocking(move || { 304 + db::get_record_count(&state.db, &did, &req.collection) 305 + .map_err(|e| internal_error(CountRecordsRequest::NSID, e)) 306 + }) 307 + .map_err(|e| internal_error(CountRecordsRequest::NSID, e)) 308 + .await??; 305 309 306 310 Ok(Json(CountRecordsOutput { count })) 307 311 }
+26 -20
src/backfill/manager.rs
··· 1 - use crate::db::keys::reconstruct_did; 2 - use crate::db::{deser_repo_state, ser_repo_state, Db}; 1 + use crate::db::types::TrimmedDid; 2 + use crate::db::{self, deser_repo_state, ser_repo_state}; 3 3 use crate::state::AppState; 4 4 use crate::types::{RepoStatus, ResyncState}; 5 5 use miette::{IntoDiagnostic, Result}; ··· 13 13 14 14 for guard in state.db.pending.iter() { 15 15 let key = guard.key().into_diagnostic()?; 16 - let did_str = String::from_utf8_lossy(&key); 17 - let Ok(did) = reconstruct_did(&did_str) else { 18 - error!("invalid did in db, skipping: did:{did_str}"); 19 - continue; 16 + let did = match TrimmedDid::try_from(key.as_ref()) { 17 + Ok(did) => did.to_did(), 18 + Err(e) => { 19 + error!("invalid did in db, skipping: {e}"); 20 + continue; 21 + } 20 22 }; 21 23 22 24 debug!("queuing did {did}"); 23 - if let Err(e) = state.backfill_tx.send(did) { 24 - error!("failed to queue pending backfill for did:{did_str}: {e}"); 25 + if let Err(e) = state.backfill_tx.send(did.clone()) { 26 + error!("failed to queue pending backfill for did:{did}: {e}"); 25 27 } else { 26 28 count += 1; 27 29 } ··· 37 39 38 40 for guard in state.db.resync.iter() { 39 41 let (key, val) = guard.into_inner().into_diagnostic()?; 40 - let did_str = String::from_utf8_lossy(&key); 41 - let Ok(did) = reconstruct_did(&did_str) else { 42 - error!("invalid did in db, skipping: did:{did_str}"); 43 - continue; 42 + let did = match TrimmedDid::try_from(key.as_ref()) { 43 + Ok(did) => did.to_did(), 44 + Err(e) => { 45 + error!("invalid did in db, skipping: {e}"); 46 + continue; 47 + } 44 48 }; 45 49 46 50 if let Ok(resync_state) = rmp_serde::from_slice::<ResyncState>(&val) { ··· 54 58 55 59 // update repo state back to backfilling 56 60 let repo_key = crate::db::keys::repo_key(&did); 57 - if let Some(state_bytes) = state.db.repos.get(repo_key).into_diagnostic()? { 61 + if let Some(state_bytes) = state.db.repos.get(&repo_key).into_diagnostic()? { 58 62 let mut repo_state = deser_repo_state(&state_bytes)?; 59 63 repo_state.status = RepoStatus::Backfilling; 60 - batch.insert(&state.db.repos, repo_key, ser_repo_state(&repo_state)?); 64 + batch.insert(&state.db.repos, &repo_key, ser_repo_state(&repo_state)?); 61 65 } 62 66 63 67 batch.commit().into_diagnostic()?; ··· 90 94 Ok(t) => t, 91 95 Err(e) => { 92 96 error!("failed to get resync state: {e}"); 93 - Db::check_poisoned(&e); 97 + db::check_poisoned(&e); 94 98 continue; 95 99 } 96 100 }; 97 - let did_str = String::from_utf8_lossy(&key); 98 - let Ok(did) = reconstruct_did(&did_str) else { 99 - error!("invalid did in db, skipping: did:{did_str}"); 100 - continue; 101 + let did = match TrimmedDid::try_from(key.as_ref()) { 102 + Ok(did) => did.to_did(), 103 + Err(e) => { 104 + error!("invalid did in db, skipping: {e}"); 105 + continue; 106 + } 101 107 }; 102 108 103 109 if let Ok(ResyncState::Error { next_retry, .. }) = ··· 109 115 // move back to pending 110 116 if let Err(e) = db.pending.insert(key, Vec::new()) { 111 117 error!("failed to move {did} to pending: {e}"); 112 - Db::check_poisoned(&e); 118 + db::check_poisoned(&e); 113 119 continue; 114 120 } 115 121
+102 -117
src/backfill/mod.rs
··· 1 - use crate::db::{keys, Db}; 1 + use crate::db::types::TrimmedDid; 2 + use crate::db::{self, Db, keys, ser_repo_state}; 2 3 use crate::ops; 3 4 use crate::state::{AppState, BackfillRx}; 4 5 use crate::types::{AccountEvt, BroadcastEvent, RepoState, RepoStatus, ResyncState, StoredEvent}; 5 6 use futures::TryFutureExt; 6 7 use jacquard::api::com_atproto::sync::get_repo::{GetRepo, GetRepoError}; 8 + use jacquard::types::cid::Cid; 7 9 use jacquard::types::did::Did; 8 - use jacquard::{prelude::*, IntoStatic}; 10 + use jacquard::{CowStr, IntoStatic, prelude::*}; 9 11 use jacquard_common::xrpc::XrpcError; 10 - use jacquard_repo::mst::Mst; 11 12 use jacquard_repo::MemoryBlockStore; 13 + use jacquard_repo::mst::Mst; 12 14 use miette::{IntoDiagnostic, Result}; 13 - use smol_str::{SmolStr, ToSmolStr}; 15 + use smol_str::{SmolStr, ToSmolStr, format_smolstr}; 14 16 use std::collections::HashMap; 15 - use std::iter::once; 17 + use std::sync::Arc; 16 18 use std::sync::atomic::Ordering; 17 - use std::sync::Arc; 18 19 use std::time::{Duration, Instant}; 19 20 use tokio::sync::Semaphore; 20 21 use tracing::{debug, error, info, trace, warn}; 21 22 22 23 pub mod manager; 23 24 24 - pub struct Worker { 25 + pub struct BackfillWorker { 25 26 state: Arc<AppState>, 26 27 rx: BackfillRx, 27 28 http: reqwest::Client, 28 29 semaphore: Arc<Semaphore>, 29 30 } 30 31 31 - impl Worker { 32 + impl BackfillWorker { 32 33 pub fn new( 33 34 state: Arc<AppState>, 34 35 rx: BackfillRx, ··· 68 69 ) 69 70 .inspect_err(move |e| { 70 71 error!("backfill process failed for {did}: {e}"); 71 - Db::check_poisoned_report(e); 72 + db::check_poisoned_report(e); 72 73 }), 73 74 ); 74 75 } ··· 82 83 ) -> Result<()> { 83 84 let db = &state.db; 84 85 85 - // block buffer processing for this DID during backfill 86 - let _ = state.blocked_dids.insert_async(did.clone()).await; 87 - 88 86 match Self::process_did(&state, &http, &did).await { 89 87 Ok(previous_state) => { 90 88 let did_key = keys::repo_key(&did); 91 89 92 - let is_pending = matches!( 93 - previous_state.status, 94 - RepoStatus::Backfilling | RepoStatus::New 95 - ); 90 + let was_pending = matches!(previous_state.status, RepoStatus::Backfilling); 96 91 let was_resync = matches!( 97 92 previous_state.status, 98 93 RepoStatus::Error(_) ··· 103 98 104 99 let mut batch = db.inner.batch(); 105 100 // remove from pending 106 - if is_pending { 107 - batch.remove(&db.pending, did_key); 101 + if was_pending { 102 + batch.remove(&db.pending, &did_key); 108 103 } 109 104 // remove from resync 110 105 if was_resync { 111 - batch.remove(&db.resync, did_key); 106 + batch.remove(&db.resync, &did_key); 112 107 } 113 108 tokio::task::spawn_blocking(move || batch.commit().into_diagnostic()) 114 109 .await 115 110 .into_diagnostic()??; 116 - 117 - tokio::spawn({ 118 - let pending_fut = is_pending.then(|| { 119 - state 120 - .db 121 - .increment_count(keys::count_keyspace_key("pending"), -1) 122 - }); 123 - let resync_fut = was_resync.then(|| { 124 - state 125 - .db 126 - .increment_count(keys::count_keyspace_key("resync"), -1) 127 - }); 128 - futures::future::join_all(pending_fut.into_iter().chain(resync_fut)) 129 - }); 111 + if was_pending { 112 + state.db.update_count_async("pending", -1).await; 113 + } 114 + if was_resync { 115 + state.db.update_count_async("resync", -1).await; 116 + } 130 117 131 118 let state_for_persist = state.clone(); 132 119 tokio::task::spawn_blocking(move || { ··· 145 132 146 133 // 1. get current retry count 147 134 let mut retry_count = 0; 148 - if let Ok(Some(bytes)) = Db::get(db.resync.clone(), did_key).await { 135 + if let Ok(Some(bytes)) = Db::get(db.resync.clone(), &did_key).await { 149 136 if let Ok(ResyncState::Error { 150 137 retry_count: old_count, 151 138 .. ··· 166 153 167 154 tokio::task::spawn_blocking({ 168 155 let state = state.clone(); 169 - let did_key = did_key.to_vec(); 156 + let did_key = did_key.into_static(); 170 157 move || { 171 158 // 3. save to resync 172 159 let serialized_resync_state = ··· 208 195 } 209 196 210 197 // returns previous repo state if successful 211 - async fn process_did( 198 + async fn process_did<'i>( 212 199 app_state: &Arc<AppState>, 213 200 http: &reqwest::Client, 214 201 did: &Did<'static>, 215 - ) -> Result<RepoState> { 202 + ) -> Result<RepoState<'static>> { 216 203 debug!("backfilling {}", did); 217 204 218 205 let db = &app_state.db; ··· 220 207 let state_bytes = Db::get(db.repos.clone(), did_key) 221 208 .await? 222 209 .ok_or_else(|| miette::miette!("!!!THIS IS A BUG!!! repo state for {did} missing"))?; 223 - let mut state: RepoState = rmp_serde::from_slice(&state_bytes).into_diagnostic()?; 210 + let mut state: RepoState<'static> = rmp_serde::from_slice::<RepoState>(&state_bytes) 211 + .into_diagnostic()? 212 + .into_static(); 224 213 let previous_state = state.clone(); 225 214 226 215 // 1. resolve pds ··· 265 254 Err(XrpcError::Xrpc(e)) => { 266 255 if matches!(e, GetRepoError::RepoNotFound(_)) { 267 256 warn!("repo {did} not found, deleting"); 268 - ops::delete_repo(db, did)?; 257 + let mut batch = db.inner.batch(); 258 + ops::delete_repo(&mut batch, db, did)?; 259 + batch.commit().into_diagnostic()?; 269 260 return Ok(previous_state); // stop backfill 270 261 } 271 262 ··· 353 344 354 345 // 6. insert records into db 355 346 let start = Instant::now(); 356 - let (_state, added_records, added_blocks, collection_counts, count) = { 347 + let (_state, added_records, added_blocks, count) = { 357 348 let app_state = app_state.clone(); 358 349 let did = did.clone(); 359 350 let rev = commit.rev; ··· 363 354 let mut count = 0; 364 355 let mut added_records = 0; 365 356 let mut added_blocks = 0; 366 - let mut collection_counts: HashMap<SmolStr, i64> = HashMap::new(); 357 + let mut collection_counts: HashMap<SmolStr, u64> = HashMap::new(); 367 358 let mut batch = app_state.db.inner.batch(); 368 359 360 + // pre-load existing record CIDs for this DID to detect duplicates/updates 361 + let prefix = keys::record_prefix(&did); 362 + let prefix_len = prefix.len(); 363 + let mut existing_cids: HashMap<SmolStr, SmolStr> = HashMap::new(); 364 + for guard in app_state.db.records.prefix(&prefix) { 365 + let (key, cid_bytes) = guard.into_inner().into_diagnostic()?; 366 + // extract path (collection/rkey) from key by skipping the DID prefix 367 + let path = String::from_utf8_lossy(&key[prefix_len..]).to_smolstr(); 368 + let cid = String::from_utf8_lossy(&cid_bytes).to_smolstr(); 369 + existing_cids.insert(path, cid); 370 + } 371 + 369 372 for (key, cid) in leaves { 370 373 let val_bytes = tokio::runtime::Handle::current() 371 374 .block_on(jacquard_repo::BlockStore::get(storage.as_ref(), &cid)) 372 375 .into_diagnostic()?; 373 376 374 377 if let Some(val) = val_bytes { 375 - let parts: Vec<&str> = key.splitn(2, '/').collect(); 376 - if parts.len() == 2 { 377 - let collection = parts[0]; 378 - let rkey = parts[1]; 378 + let (collection, rkey) = ops::parse_path(&key)?; 379 + let collection = collection.to_smolstr(); 380 + let cid = Cid::ipld(cid); 379 381 380 - let db_key = keys::record_key(&did, collection, rkey); 381 - let cid_str = cid.to_smolstr(); 382 + // check if this record already exists with same CID 383 + let path = format_smolstr!("{collection}{}{rkey}", keys::SEP as char); 384 + let (action, is_new) = if let Some(existing_cid) = existing_cids.get(&path) 385 + { 386 + if existing_cid == cid.as_str() { 387 + continue; // skip unchanged record 388 + } 389 + ("update", false) 390 + } else { 391 + ("create", true) 392 + }; 382 393 383 - let val_vec: Vec<u8> = val.to_vec(); 384 - batch.insert( 385 - &app_state.db.blocks, 386 - keys::block_key(&cid_str), 387 - val_vec.clone(), 388 - ); 394 + let db_key = keys::record_key(&did, &collection, rkey); 389 395 390 - batch.insert( 391 - &app_state.db.records, 392 - db_key, 393 - cid_str.as_bytes().to_vec(), 394 - ); 396 + batch.insert( 397 + &app_state.db.blocks, 398 + keys::block_key(cid.as_str()), 399 + val.as_ref(), 400 + ); 401 + batch.insert(&app_state.db.records, db_key, cid.as_str().as_bytes()); 395 402 403 + added_blocks += 1; 404 + if is_new { 396 405 added_records += 1; 397 - added_blocks += 1; 398 - *collection_counts 399 - .entry(collection.to_smolstr()) 400 - .or_default() += 1; 406 + *collection_counts.entry(collection.clone()).or_default() += 1; 407 + } 401 408 402 - let event_id = 403 - app_state.db.next_event_id.fetch_add(1, Ordering::SeqCst); 404 - let evt = StoredEvent::Record { 405 - live: false, 406 - did: did.clone().into_static(), 407 - rev: rev.as_str().into(), 408 - collection: collection.into(), 409 - rkey: rkey.into(), 410 - action: "create".into(), 411 - cid: Some(cid_str), 412 - }; 409 + let event_id = app_state.db.next_event_id.fetch_add(1, Ordering::SeqCst); 410 + let evt = StoredEvent { 411 + did: TrimmedDid::from(&did), 412 + rev: CowStr::Borrowed(rev.as_str()), 413 + collection: CowStr::Borrowed(collection.as_str()), 414 + rkey: CowStr::Borrowed(rkey), 415 + action: CowStr::Borrowed(action), 416 + cid: Some(cid), 417 + }; 413 418 414 - let bytes = rmp_serde::to_vec(&evt).into_diagnostic()?; 415 - batch.insert( 416 - &app_state.db.events, 417 - keys::event_key(event_id as i64), 418 - bytes, 419 - ); 419 + let bytes = rmp_serde::to_vec(&evt).into_diagnostic()?; 420 + batch.insert(&app_state.db.events, keys::event_key(event_id), bytes); 420 421 421 - count += 1; 422 - } 422 + count += 1; 423 423 } 424 424 } 425 425 426 426 // 6. update status to synced 427 427 state.status = RepoStatus::Synced; 428 - state.rev = rev.as_str().into(); 429 - state.data = commit.data.to_smolstr(); 428 + state.rev = Some(rev); 429 + state.data = Some(Cid::ipld(parsed.root)); 430 430 state.last_updated_at = chrono::Utc::now().timestamp(); 431 431 432 - let did_key = keys::repo_key(&did); 433 - let bytes = rmp_serde::to_vec(&state).into_diagnostic()?; 434 - batch.insert(&app_state.db.repos, did_key, bytes); 432 + batch.insert( 433 + &app_state.db.repos, 434 + keys::repo_key(&did), 435 + ser_repo_state(&state)?, 436 + ); 437 + 438 + // add the counts 439 + for (col, cnt) in collection_counts { 440 + db::set_record_count(&mut batch, &app_state.db, &did, &col, cnt); 441 + } 435 442 436 443 batch.commit().into_diagnostic()?; 437 444 438 - Ok::<_, miette::Report>(( 439 - state, 440 - added_records, 441 - added_blocks, 442 - collection_counts, 443 - count, 444 - )) 445 + Ok::<_, miette::Report>((state, added_records, added_blocks, count)) 445 446 }) 446 447 .await 447 448 .into_diagnostic()?? ··· 456 457 457 458 // do the counts 458 459 if added_records > 0 { 459 - tokio::spawn({ 460 - let state = app_state.clone(); 461 - let did = did.clone(); 462 - let records_fut = state 463 - .db 464 - .increment_count(keys::count_keyspace_key("records"), added_records); 465 - let blocks_fut = state 466 - .db 467 - .increment_count(keys::count_keyspace_key("blocks"), added_blocks); 468 - let events_fut = state 469 - .db 470 - .increment_count(keys::count_keyspace_key("events"), added_records); 471 - let collections_futs = collection_counts.into_iter().map(|(col, cnt)| { 472 - state 473 - .db 474 - .increment_count(keys::count_collection_key(&did, &col), cnt) 475 - }); 476 - futures::future::join_all( 477 - once(records_fut) 478 - .chain(once(blocks_fut)) 479 - .chain(once(events_fut)) 480 - .chain(collections_futs), 481 - ) 482 - }); 460 + app_state 461 + .db 462 + .update_count_async("records", added_records) 463 + .await; 464 + app_state 465 + .db 466 + .update_count_async("blocks", added_blocks) 467 + .await; 483 468 } 484 469 trace!( 485 470 "committed backfill batch for {did} in {:?}",
-9
src/buffer/mod.rs
··· 1 - use jacquard::{api::com_atproto::sync::subscribe_repos::SubscribeReposMessage, types::did::Did}; 2 - 3 - pub mod processor; 4 - 5 - pub struct BufferedMessage { 6 - pub did: Did<'static>, 7 - pub msg: SubscribeReposMessage<'static>, 8 - pub buffered_at: i64, 9 - }
-213
src/buffer/processor.rs
··· 1 - use crate::db::{keys, Db}; 2 - use crate::ops; 3 - use crate::state::AppState; 4 - use crate::types::{AccountEvt, IdentityEvt}; 5 - 6 - use super::BufferedMessage; 7 - use jacquard::api::com_atproto::sync::subscribe_repos::SubscribeReposMessage; 8 - use jacquard::types::did::Did; 9 - use jacquard_common::IntoStatic; 10 - use miette::{IntoDiagnostic, Result}; 11 - use smol_str::ToSmolStr; 12 - use std::collections::{HashMap, VecDeque}; 13 - use std::sync::Arc; 14 - use tokio::sync::mpsc; 15 - use tokio::task::spawn_blocking; 16 - use tracing::{debug, error, info, trace, warn}; 17 - 18 - pub struct BufferProcessor { 19 - state: Arc<AppState>, 20 - rx: mpsc::UnboundedReceiver<BufferedMessage>, 21 - } 22 - 23 - impl BufferProcessor { 24 - pub fn new(state: Arc<AppState>, rx: mpsc::UnboundedReceiver<BufferedMessage>) -> Self { 25 - Self { state, rx } 26 - } 27 - 28 - pub async fn run(mut self) -> Result<()> { 29 - let mut queues: HashMap<Did<'static>, VecDeque<BufferedMessage>> = HashMap::new(); 30 - 31 - // recover from DB 32 - let recovered = self.recover_from_db().await?; 33 - if !recovered.is_empty() { 34 - info!("recovered {} buffered messages from db", recovered.len()); 35 - for msg in recovered { 36 - queues.entry(msg.did.clone()).or_default().push_back(msg); 37 - } 38 - } 39 - 40 - let mut to_remove: Vec<Did<'static>> = Vec::new(); 41 - 42 - loop { 43 - while let Ok(msg) = self.rx.try_recv() { 44 - queues.entry(msg.did.clone()).or_default().push_back(msg); 45 - } 46 - 47 - for (did, queue) in &mut queues { 48 - if self.state.blocked_dids.contains_sync(did) { 49 - continue; 50 - } 51 - 52 - while let Some(buffered) = queue.pop_front() { 53 - if let Err(e) = self.process_message(buffered).await { 54 - error!("failed to process buffered message for {did}: {e}"); 55 - Db::check_poisoned_report(&e); 56 - } 57 - } 58 - 59 - if queue.is_empty() { 60 - to_remove.push(did.clone()); 61 - } 62 - } 63 - 64 - for did in to_remove.drain(..) { 65 - queues.remove(&did); 66 - } 67 - 68 - // wait until we receive a new message 69 - match self.rx.recv().await { 70 - Some(msg) => { 71 - queues.entry(msg.did.clone()).or_default().push_back(msg); 72 - } 73 - None => { 74 - debug!("buffer processor channel closed, exiting"); 75 - break; 76 - } 77 - } 78 - } 79 - 80 - Ok(()) 81 - } 82 - 83 - async fn process_message(&self, buffered: BufferedMessage) -> Result<()> { 84 - let did = buffered.did; 85 - let buffered_at = buffered.buffered_at; 86 - 87 - match buffered.msg { 88 - SubscribeReposMessage::Commit(commit) => { 89 - trace!("processing buffered commit for {did}"); 90 - let state = self.state.clone(); 91 - tokio::task::spawn_blocking(move || ops::apply_commit(&state.db, &commit, true)) 92 - .await 93 - .into_diagnostic()??; 94 - } 95 - SubscribeReposMessage::Identity(identity) => { 96 - debug!("processing buffered identity for {did}"); 97 - let handle = identity.handle.as_ref().map(|h| h.as_str().to_smolstr()); 98 - let evt = IdentityEvt { 99 - did: did.clone(), 100 - handle, 101 - }; 102 - ops::emit_identity_event(&self.state.db, evt); 103 - } 104 - SubscribeReposMessage::Account(account) => { 105 - debug!("processing buffered account for {did}"); 106 - let evt = AccountEvt { 107 - did: did.clone(), 108 - active: account.active, 109 - status: account.status.as_ref().map(|s| s.to_smolstr()), 110 - }; 111 - ops::emit_account_event(&self.state.db, evt); 112 - 113 - let did = did.clone(); 114 - let state = self.state.clone(); 115 - let account = account.clone(); 116 - 117 - tokio::task::spawn_blocking(move || -> Result<()> { 118 - if !account.active { 119 - use jacquard::api::com_atproto::sync::subscribe_repos::AccountStatus; 120 - if let Some(status) = &account.status { 121 - match status { 122 - AccountStatus::Deleted => { 123 - info!("account {did} deleted, wiping data"); 124 - ops::delete_repo(&state.db, &did)?; 125 - } 126 - AccountStatus::Takendown => { 127 - ops::update_repo_status( 128 - &state.db, 129 - &did, 130 - crate::types::RepoStatus::Takendown, 131 - )?; 132 - } 133 - AccountStatus::Suspended => { 134 - ops::update_repo_status( 135 - &state.db, 136 - &did, 137 - crate::types::RepoStatus::Suspended, 138 - )?; 139 - } 140 - AccountStatus::Deactivated => { 141 - ops::update_repo_status( 142 - &state.db, 143 - &did, 144 - crate::types::RepoStatus::Deactivated, 145 - )?; 146 - } 147 - AccountStatus::Throttled | AccountStatus::Desynchronized => { 148 - let status_str = status.as_str().to_smolstr(); 149 - ops::update_repo_status( 150 - &state.db, 151 - &did, 152 - crate::types::RepoStatus::Error(status_str), 153 - )?; 154 - } 155 - AccountStatus::Other(s) => { 156 - warn!("unknown account status for {did}: {s}"); 157 - } 158 - } 159 - } else { 160 - warn!("account {did} inactive but no status provided"); 161 - } 162 - } else { 163 - // active account, clear any error/suspension states if they exist 164 - // we set it to Synced because we are receiving live events for it 165 - ops::update_repo_status(&state.db, &did, crate::types::RepoStatus::Synced)?; 166 - } 167 - Ok(()) 168 - }) 169 - .await 170 - .into_diagnostic()??; 171 - } 172 - _ => { 173 - warn!("unknown message type in buffer for {did}"); 174 - } 175 - } 176 - 177 - // remove from DB buffer 178 - self.remove_from_db_buffer(&did, buffered_at).await?; 179 - 180 - Ok(()) 181 - } 182 - 183 - async fn recover_from_db(&self) -> Result<Vec<BufferedMessage>> { 184 - let state = self.state.clone(); 185 - 186 - spawn_blocking(move || { 187 - let mut recovered = Vec::new(); 188 - for item in state.db.buffer.iter() { 189 - let (key, value) = item.into_inner().into_diagnostic()?; 190 - let (did, ts) = keys::parse_buffer_key(&key)?; 191 - let msg = 192 - rmp_serde::from_slice::<SubscribeReposMessage>(&value).into_diagnostic()?; 193 - recovered.push(BufferedMessage { 194 - did, 195 - msg: msg.into_static(), 196 - buffered_at: ts, 197 - }); 198 - } 199 - // ensure chronological order across all DIDs 200 - recovered.sort_by_key(|m| m.buffered_at); 201 - Ok(recovered) 202 - }) 203 - .await 204 - .into_diagnostic() 205 - .flatten() 206 - } 207 - 208 - async fn remove_from_db_buffer(&self, did: &Did<'_>, buffered_at: i64) -> Result<()> { 209 - let key = keys::buffer_key(did, buffered_at); 210 - self.state.db.buffer.remove(key).into_diagnostic()?; 211 - Ok(()) 212 - } 213 - }
+17 -37
src/crawler/mod.rs
··· 1 1 use crate::db::{keys, ser_repo_state, Db}; 2 + use crate::ops::send_backfill_req; 2 3 use crate::state::AppState; 3 - use crate::types::{RepoState, RepoStatus}; 4 + use crate::types::RepoState; 4 5 use jacquard::api::com_atproto::sync::list_repos::{ListRepos, ListReposOutput}; 5 6 use jacquard::prelude::*; 6 - use jacquard::types::did::Did; 7 7 use jacquard_common::CowStr; 8 8 use miette::{IntoDiagnostic, Result}; 9 9 use smol_str::SmolStr; ··· 74 74 75 75 // 3. process repos 76 76 for repo in output.repos { 77 - let did_str = smol_str::SmolStr::from(repo.did.as_str()); 78 77 let did_key = keys::repo_key(&repo.did); 79 78 80 79 // check if known 81 - if !Db::contains_key(db.repos.clone(), did_key).await? { 82 - debug!("crawler found new repo: {did_str}"); 80 + if !Db::contains_key(db.repos.clone(), &did_key).await? { 81 + debug!("crawler found new repo: {}", repo.did); 83 82 84 - // create state (backfilling) 85 - let mut state = RepoState::new(repo.did.to_owned()); 86 - state.status = RepoStatus::Backfilling; 87 - 88 - batch.insert(&db.repos, did_key, ser_repo_state(&state)?); 89 - batch.insert(&db.pending, did_key, Vec::new()); 90 - to_queue.push(did_str); 83 + let state = RepoState::backfilling(&repo.did); 84 + batch.insert(&db.repos, &did_key, ser_repo_state(&state)?); 85 + batch.insert(&db.pending, &did_key, Vec::new()); 86 + to_queue.push(repo.did.clone()); 91 87 } 92 88 } 93 89 94 - // update counts if we found new repos 95 - if !to_queue.is_empty() { 96 - let count = to_queue.len() as i64; 97 - let repos_fut = self 98 - .state 99 - .db 100 - .increment_count(keys::count_keyspace_key("repos"), count); 101 - let pending_fut = self 102 - .state 103 - .db 104 - .increment_count(keys::count_keyspace_key("pending"), count); 105 - tokio::spawn(futures::future::join_all([repos_fut, pending_fut])); 106 - } 107 - 108 90 // 4. update cursor 109 91 if let Some(new_cursor) = output.cursor { 110 92 cursor = Some(new_cursor.as_str().into()); ··· 124 106 .await 125 107 .into_diagnostic()??; 126 108 109 + // update counts if we found new repos 110 + if !to_queue.is_empty() { 111 + let count = to_queue.len() as i64; 112 + self.state.db.update_count_async("repos", count).await; 113 + self.state.db.update_count_async("pending", count).await; 114 + } 115 + 127 116 // 5. queue for backfill 128 - for did_str in to_queue { 129 - let did = match Did::new_owned(did_str.as_str()) { 130 - Ok(d) => d, 131 - Err(e) => { 132 - error!("got invalid DID ({did_str}) from relay, skipping this: {e}"); 133 - continue; 134 - } 135 - }; 136 - if let Err(e) = self.state.backfill_tx.send(did) { 137 - error!("crawler failed to queue {did_str}: {e}"); 138 - } 117 + for did in to_queue { 118 + send_backfill_req(&self.state, did)?; 139 119 } 140 120 141 121 if cursor.is_none() {
+29 -49
src/db/keys.rs
··· 1 1 use jacquard_common::types::string::Did; 2 - use miette::{Context, IntoDiagnostic, Result}; 2 + 3 + use crate::db::types::TrimmedDid; 3 4 4 5 /// separator used for composite keys 5 6 pub const SEP: u8 = 0x00; 6 7 7 - pub fn did_prefix<'a>(did: &'a Did<'a>) -> &'a str { 8 - did.as_str().trim_start_matches("did:") 9 - } 10 - 11 - pub fn reconstruct_did<'a>(trimmed_did: &'a str) -> Result<Did<'static>> { 12 - Did::new_owned(format!("did:{trimmed_did}")) 13 - .into_diagnostic() 14 - .wrap_err("expected did to be trimmed") 15 - } 8 + pub const CURSOR_KEY: &[u8] = b"firehose_cursor"; 16 9 17 10 // Key format: {DID} (trimmed) 18 - pub fn repo_key<'a>(did: &'a Did) -> &'a [u8] { 19 - did_prefix(did).as_bytes() 11 + pub fn repo_key<'a>(did: &'a Did) -> TrimmedDid<'a> { 12 + TrimmedDid::from(did) 20 13 } 21 14 22 15 // key format: {DID}\x00{Collection}\x00{RKey} (DID trimmed) 23 16 pub fn record_key(did: &Did, collection: &str, rkey: &str) -> Vec<u8> { 24 - let prefix = did_prefix(did); 25 - let mut key = Vec::with_capacity(prefix.len() + collection.len() + rkey.len() + 2); 26 - key.extend_from_slice(prefix.as_bytes()); 17 + let repo = TrimmedDid::from(did); 18 + let mut key = Vec::with_capacity(repo.len() + collection.len() + rkey.len() + 2); 19 + key.extend_from_slice(repo.as_bytes()); 27 20 key.push(SEP); 28 21 key.extend_from_slice(collection.as_bytes()); 29 22 key.push(SEP); ··· 31 24 key 32 25 } 33 26 27 + // prefix format: {DID}\x00 (DID trimmed) - for scanning all records of a DID 28 + pub fn record_prefix(did: &Did) -> Vec<u8> { 29 + let repo = TrimmedDid::from(did); 30 + let mut prefix = Vec::with_capacity(repo.len() + 1); 31 + prefix.extend_from_slice(repo.as_bytes()); 32 + prefix.push(SEP); 33 + prefix 34 + } 35 + 34 36 // key format: {DID} 35 37 36 38 // key format: {SEQ} 37 - pub fn event_key(seq: i64) -> [u8; 8] { 39 + pub fn event_key(seq: u64) -> [u8; 8] { 38 40 seq.to_be_bytes() 39 41 } 40 42 ··· 43 45 cid.as_bytes() 44 46 } 45 47 48 + pub const COUNT_KS_PREFIX: &[u8] = &[b'k', SEP]; 49 + 46 50 // count keys for the counts keyspace 47 51 // key format: k\x00{keyspace_name} 48 52 pub fn count_keyspace_key(name: &str) -> Vec<u8> { 49 - let mut key = Vec::with_capacity(2 + name.len()); 50 - key.push(b'k'); 51 - key.push(SEP); 53 + let mut key = Vec::with_capacity(COUNT_KS_PREFIX.len() + name.len()); 54 + key.extend_from_slice(COUNT_KS_PREFIX); 52 55 key.extend_from_slice(name.as_bytes()); 53 56 key 54 57 } 55 58 59 + pub const COUNT_COLLECTION_PREFIX: &[u8] = &[b'r', SEP]; 60 + 56 61 // key format: r\x00{DID}\x00{collection} (DID trimmed) 57 62 pub fn count_collection_key(did: &Did, collection: &str) -> Vec<u8> { 58 - let prefix = did_prefix(did); 59 - let mut key = Vec::with_capacity(2 + prefix.len() + 1 + collection.len()); 60 - key.push(b'r'); 61 - key.push(SEP); 62 - key.extend_from_slice(prefix.as_bytes()); 63 + let repo = TrimmedDid::from(did); 64 + let mut key = 65 + Vec::with_capacity(COUNT_COLLECTION_PREFIX.len() + repo.len() + 1 + collection.len()); 66 + key.extend_from_slice(COUNT_COLLECTION_PREFIX); 67 + key.extend_from_slice(repo.as_bytes()); 63 68 key.push(SEP); 64 69 key.extend_from_slice(collection.as_bytes()); 65 70 key 66 71 } 67 - 68 - // key format: {DID}\x00{timestamp} (for buffer entries) 69 - pub fn buffer_key(did: &Did, timestamp: i64) -> Vec<u8> { 70 - let mut key = Vec::with_capacity(did.len() + 1 + 8); 71 - key.extend_from_slice(did_prefix(did).as_bytes()); 72 - key.push(SEP); 73 - key.extend_from_slice(&timestamp.to_be_bytes()); 74 - key 75 - } 76 - 77 - pub fn parse_buffer_key(key: &[u8]) -> Result<(Did<'static>, i64)> { 78 - let pos = key 79 - .iter() 80 - .rposition(|&b| b == SEP) 81 - .ok_or_else(|| miette::miette!("buffer key invalid, no seperator found"))?; 82 - let did_bytes = &key[..pos]; 83 - let ts_bytes = &key[pos + 1..]; 84 - let timestamp = i64::from_be_bytes( 85 - ts_bytes 86 - .try_into() 87 - .map_err(|e| miette::miette!("buffer key invalid, {e}"))?, 88 - ); 89 - let did = reconstruct_did(&String::from_utf8_lossy(did_bytes))?; 90 - Ok((did, timestamp)) 91 - }
+150 -78
src/db/mod.rs
··· 1 1 use crate::types::{BroadcastEvent, RepoState}; 2 2 use fjall::{Database, Keyspace, KeyspaceCreateOptions, OwnedWriteBatch, PersistMode, Slice}; 3 - use futures::FutureExt; 4 3 use jacquard::IntoStatic; 5 4 use jacquard_common::types::string::Did; 6 5 use miette::{Context, IntoDiagnostic, Result}; 7 - use std::future::Future; 6 + use scc::HashMap; 7 + use smol_str::SmolStr; 8 8 use std::path::Path; 9 9 use std::sync::Arc; 10 10 11 11 pub mod keys; 12 + pub mod types; 12 13 13 14 use std::sync::atomic::AtomicU64; 14 15 use tokio::sync::broadcast; ··· 20 21 pub records: Keyspace, 21 22 pub blocks: Keyspace, 22 23 pub cursors: Keyspace, 23 - pub buffer: Keyspace, 24 24 pub pending: Keyspace, 25 25 pub resync: Keyspace, 26 26 pub events: Keyspace, 27 27 pub counts: Keyspace, 28 28 pub event_tx: broadcast::Sender<BroadcastEvent>, 29 29 pub next_event_id: Arc<AtomicU64>, 30 + pub counts_map: HashMap<SmolStr, u64>, 30 31 } 31 32 32 33 impl Db { ··· 62 63 .max_memtable_size(32 * 2_u64.pow(20)), 63 64 )?; 64 65 let cursors = open_ks("cursors", opts().expect_point_read_hits(true))?; 65 - let buffer = open_ks("buffer", opts())?; 66 66 let pending = open_ks("pending", opts())?; 67 67 let resync = open_ks("resync", opts())?; 68 68 let events = open_ks("events", opts())?; ··· 79 79 ); 80 80 } 81 81 82 + // load counts into memory 83 + let counts_map = HashMap::new(); 84 + for guard in counts.prefix(keys::COUNT_KS_PREFIX) { 85 + let (k, v) = guard.into_inner().into_diagnostic()?; 86 + let name = std::str::from_utf8(&k[keys::COUNT_KS_PREFIX.len()..]) 87 + .into_diagnostic() 88 + .wrap_err("expected valid utf8 for ks count key")?; 89 + let _ = counts_map.insert_sync( 90 + SmolStr::new(name), 91 + u64::from_be_bytes(v.as_ref().try_into().unwrap()), 92 + ); 93 + } 94 + 82 95 let (event_tx, _) = broadcast::channel(10000); 83 96 84 97 Ok(Self { ··· 87 100 records, 88 101 blocks, 89 102 cursors, 90 - buffer, 91 103 pending, 92 104 resync, 93 105 events, 94 106 counts, 95 107 event_tx, 108 + counts_map, 96 109 next_event_id: Arc::new(AtomicU64::new(last_id + 1)), 97 110 }) 98 111 } ··· 109 122 .into_diagnostic()? 110 123 } 111 124 125 + #[allow(dead_code)] 112 126 pub async fn insert( 113 127 ks: Keyspace, 114 128 key: impl AsRef<[u8]>, ··· 136 150 .into_diagnostic()? 137 151 } 138 152 139 - pub fn increment_count( 140 - &self, 141 - key: impl AsRef<[u8]>, 142 - delta: i64, 143 - ) -> impl Future<Output = Result<i64>> + Send + 'static { 144 - let key = key.as_ref().to_vec(); 145 - let counts = self.counts.clone(); 146 - tokio::task::spawn_blocking(move || { 147 - let current = counts 148 - .get(&key) 149 - .into_diagnostic()? 150 - .map(|v| { 151 - let mut buf = [0u8; 8]; 152 - if v.len() == 8 { 153 - buf.copy_from_slice(&v); 154 - i64::from_be_bytes(buf) 155 - } else { 156 - 0 157 - } 158 - }) 159 - .unwrap_or(0); 160 - let new_val = current.saturating_add(delta); 161 - counts 162 - .insert(key, new_val.to_be_bytes()) 163 - .into_diagnostic()?; 164 - Ok(new_val) 165 - }) 166 - .map(|res| res.into_diagnostic().flatten()) 153 + pub fn update_count(&self, key: &str, delta: i64) { 154 + let mut entry = self.counts_map.entry_sync(SmolStr::new(key)).or_insert(0); 155 + *entry = (*entry as i64).saturating_add(delta) as u64; 167 156 } 168 157 169 - pub async fn get_count(&self, key: impl AsRef<[u8]>) -> Result<i64> { 170 - let key = key.as_ref().to_vec(); 171 - let counts = self.counts.clone(); 172 - tokio::task::spawn_blocking(move || { 173 - Ok(counts 174 - .get(&key) 175 - .into_diagnostic()? 176 - .map(|v| { 177 - let mut buf = [0u8; 8]; 178 - if v.len() == 8 { 179 - buf.copy_from_slice(&v); 180 - i64::from_be_bytes(buf) 181 - } else { 182 - 0 183 - } 184 - }) 185 - .unwrap_or(0)) 186 - }) 187 - .await 188 - .into_diagnostic() 189 - .flatten() 158 + pub async fn update_count_async(&self, key: &str, delta: i64) { 159 + let mut entry = self 160 + .counts_map 161 + .entry_async(SmolStr::new(key)) 162 + .await 163 + .or_insert(0); 164 + *entry = (*entry as i64).saturating_add(delta) as u64; 165 + } 166 + 167 + pub async fn get_count(&self, key: &str) -> u64 { 168 + self.counts_map 169 + .read_async(key, |_, v| *v) 170 + .await 171 + .unwrap_or(0) 190 172 } 191 173 192 174 pub fn update_repo_state<F, T>( 193 - mut batch: OwnedWriteBatch, 175 + batch: &mut OwnedWriteBatch, 194 176 repos: &Keyspace, 195 177 did: &Did<'_>, 196 178 f: F, 197 - ) -> Result<(Option<(RepoState, T)>, fjall::OwnedWriteBatch)> 179 + ) -> Result<Option<(RepoState<'static>, T)>> 198 180 where 199 181 F: FnOnce(&mut RepoState, (&[u8], &mut fjall::OwnedWriteBatch)) -> Result<(bool, T)>, 200 182 { 201 183 let key = keys::repo_key(did); 202 - if let Some(bytes) = repos.get(key).into_diagnostic()? { 203 - let mut state: RepoState = deser_repo_state(bytes)?; 204 - let (changed, result) = f(&mut state, (key, &mut batch))?; 184 + if let Some(bytes) = repos.get(&key).into_diagnostic()? { 185 + let mut state: RepoState = deser_repo_state(bytes.as_ref())?.into_static(); 186 + let (changed, result) = f(&mut state, (key.as_bytes(), batch))?; 205 187 if changed { 206 188 batch.insert(repos, key, ser_repo_state(&state)?); 207 189 } 208 - Ok((Some((state, result)), batch)) 190 + Ok(Some((state, result))) 209 191 } else { 210 - Ok((None, batch)) 192 + Ok(None) 211 193 } 212 194 } 213 195 ··· 215 197 &self, 216 198 did: &Did<'_>, 217 199 f: F, 218 - ) -> Result<Option<(RepoState, T)>> 200 + ) -> Result<Option<(RepoState<'static>, T)>> 219 201 where 220 202 F: FnOnce(&mut RepoState, (&[u8], &mut fjall::OwnedWriteBatch)) -> Result<(bool, T)> 221 203 + Send 222 204 + 'static, 223 205 T: Send + 'static, 224 206 { 225 - let batch = self.inner.batch(); 207 + let mut batch = self.inner.batch(); 226 208 let repos = self.repos.clone(); 227 209 let did = did.clone().into_static(); 228 210 229 211 tokio::task::spawn_blocking(move || { 230 - let (Some((state, t)), batch) = Self::update_repo_state(batch, &repos, &did, f)? else { 212 + let Some((state, t)) = Self::update_repo_state(&mut batch, &repos, &did, f)? else { 231 213 return Ok(None); 232 214 }; 233 215 batch.commit().into_diagnostic()?; ··· 236 218 .await 237 219 .into_diagnostic()? 238 220 } 221 + } 239 222 240 - pub fn check_poisoned(e: &fjall::Error) { 241 - if matches!(e, fjall::Error::Poisoned) { 242 - error!("!!! DATABASE POISONED !!! exiting"); 243 - std::process::exit(10); 244 - } 245 - } 223 + pub fn set_firehose_cursor(db: &Db, cursor: i64) -> Result<()> { 224 + db.cursors 225 + .insert(keys::CURSOR_KEY, cursor.to_be_bytes()) 226 + .into_diagnostic() 227 + } 246 228 247 - pub fn check_poisoned_report(e: &miette::Report) { 248 - let Some(err) = e.downcast_ref::<fjall::Error>() else { 249 - return; 250 - }; 251 - Self::check_poisoned(err); 252 - } 229 + pub async fn get_firehose_cursor(db: &Db) -> Result<Option<i64>> { 230 + Db::get(db.cursors.clone(), keys::CURSOR_KEY) 231 + .await? 232 + .map(|v| { 233 + Ok(i64::from_be_bytes( 234 + v.as_ref() 235 + .try_into() 236 + .into_diagnostic() 237 + .wrap_err("cursor is not 8 bytes")?, 238 + )) 239 + }) 240 + .transpose() 253 241 } 254 242 255 243 pub fn ser_repo_state(state: &RepoState) -> Result<Vec<u8>> { 256 244 rmp_serde::to_vec(&state).into_diagnostic() 257 245 } 258 246 259 - pub fn deser_repo_state(bytes: impl AsRef<[u8]>) -> Result<RepoState> { 260 - rmp_serde::from_slice(bytes.as_ref()).into_diagnostic() 247 + pub fn deser_repo_state<'b>(bytes: &'b [u8]) -> Result<RepoState<'b>> { 248 + rmp_serde::from_slice(bytes).into_diagnostic() 249 + } 250 + 251 + pub fn check_poisoned(e: &fjall::Error) { 252 + if matches!(e, fjall::Error::Poisoned) { 253 + error!("!!! DATABASE POISONED !!! exiting"); 254 + std::process::exit(10); 255 + } 256 + } 257 + 258 + pub fn check_poisoned_report(e: &miette::Report) { 259 + let Some(err) = e.downcast_ref::<fjall::Error>() else { 260 + return; 261 + }; 262 + self::check_poisoned(err); 263 + } 264 + 265 + pub fn set_ks_count(batch: &mut OwnedWriteBatch, db: &Db, name: &str, count: u64) { 266 + let key = keys::count_keyspace_key(name); 267 + batch.insert(&db.counts, key, count.to_be_bytes()); 268 + } 269 + 270 + pub fn persist_counts(db: &Db) -> Result<()> { 271 + let mut batch = db.inner.batch(); 272 + db.counts_map.iter_sync(|k, v| { 273 + set_ks_count(&mut batch, db, k, *v); 274 + true 275 + }); 276 + batch.commit().into_diagnostic() 277 + } 278 + 279 + pub fn set_record_count( 280 + batch: &mut OwnedWriteBatch, 281 + db: &Db, 282 + did: &Did<'_>, 283 + collection: &str, 284 + count: u64, 285 + ) { 286 + let key = keys::count_collection_key(did, collection); 287 + batch.insert(&db.counts, key, count.to_be_bytes()); 288 + } 289 + 290 + pub fn update_record_count( 291 + batch: &mut OwnedWriteBatch, 292 + db: &Db, 293 + did: &Did<'_>, 294 + collection: &str, 295 + delta: i64, 296 + ) -> Result<()> { 297 + let key = keys::count_collection_key(did, collection); 298 + let count = db 299 + .counts 300 + .get(&key) 301 + .into_diagnostic()? 302 + .map(|v| -> Result<_> { 303 + Ok(u64::from_be_bytes( 304 + v.as_ref() 305 + .try_into() 306 + .into_diagnostic() 307 + .wrap_err("expected to be count (8 bytes)")?, 308 + )) 309 + }) 310 + .transpose()? 311 + .unwrap_or(0); 312 + let new_count = (count as i64).saturating_add(delta) as u64; 313 + batch.insert(&db.counts, key, new_count.to_be_bytes()); 314 + Ok(()) 315 + } 316 + 317 + pub fn get_record_count(db: &Db, did: &Did<'_>, collection: &str) -> Result<u64> { 318 + let key = keys::count_collection_key(did, collection); 319 + let count = db 320 + .counts 321 + .get(&key) 322 + .into_diagnostic()? 323 + .map(|v| -> Result<_> { 324 + Ok(u64::from_be_bytes( 325 + v.as_ref() 326 + .try_into() 327 + .into_diagnostic() 328 + .wrap_err("expected to be count (8 bytes)")?, 329 + )) 330 + }) 331 + .transpose()?; 332 + Ok(count.unwrap_or(0)) 261 333 }
+95
src/db/types.rs
··· 1 + use fjall::UserKey; 2 + use jacquard::{CowStr, IntoStatic}; 3 + use jacquard_common::types::string::Did; 4 + use miette::{Context, IntoDiagnostic, Result}; 5 + use serde::{Deserialize, Deserializer, Serialize, Serializer}; 6 + use smol_str::format_smolstr; 7 + 8 + #[derive(Clone, Debug)] 9 + pub struct TrimmedDid<'s>(CowStr<'s>); 10 + 11 + impl<'s> TrimmedDid<'s> { 12 + pub fn as_str(&self) -> &str { 13 + self.0.as_ref() 14 + } 15 + 16 + pub fn as_bytes(&self) -> &[u8] { 17 + self.0.as_bytes() 18 + } 19 + 20 + pub fn len(&self) -> usize { 21 + self.0.len() 22 + } 23 + 24 + pub fn into_static(self) -> TrimmedDid<'static> { 25 + TrimmedDid(self.0.into_static()) 26 + } 27 + 28 + pub fn to_did(&self) -> Did<'static> { 29 + Did::new_owned(format_smolstr!("did:{}", self.0)).expect("expected valid trimmed did") 30 + } 31 + } 32 + 33 + impl<'a> TryFrom<&'a [u8]> for TrimmedDid<'a> { 34 + type Error = miette::Report; 35 + 36 + fn try_from(value: &'a [u8]) -> Result<Self> { 37 + let s = std::str::from_utf8(value) 38 + .into_diagnostic() 39 + .wrap_err("expected trimmed did to be valid utf-8")?; 40 + 41 + // validate using Did::new with stack-allocated buffer 42 + const PREFIX: &[u8] = b"did:"; 43 + const MAX_DID_LEN: usize = 2048; 44 + let full_len = PREFIX.len() + value.len(); 45 + if full_len > MAX_DID_LEN { 46 + miette::bail!("trimmed did too long"); 47 + } 48 + let mut buf = [0u8; MAX_DID_LEN]; 49 + buf[..PREFIX.len()].copy_from_slice(PREFIX); 50 + buf[PREFIX.len()..full_len].copy_from_slice(value); 51 + let full_did = std::str::from_utf8(&buf[..full_len]).expect("already validated utf-8"); 52 + Did::new(full_did) 53 + .into_diagnostic() 54 + .wrap_err("expected trimmed did to be valid did")?; 55 + 56 + Ok(TrimmedDid(CowStr::Borrowed(s))) 57 + } 58 + } 59 + 60 + impl<'a> AsRef<[u8]> for TrimmedDid<'a> { 61 + fn as_ref(&self) -> &[u8] { 62 + self.as_bytes() 63 + } 64 + } 65 + 66 + impl<'a> Into<UserKey> for TrimmedDid<'a> { 67 + fn into(self) -> UserKey { 68 + UserKey::new(self.as_bytes()) 69 + } 70 + } 71 + 72 + impl<'a> Into<UserKey> for &TrimmedDid<'a> { 73 + fn into(self) -> UserKey { 74 + UserKey::new(self.as_bytes()) 75 + } 76 + } 77 + 78 + impl<'a> From<&'a Did<'a>> for TrimmedDid<'a> { 79 + fn from(did: &'a Did<'a>) -> Self { 80 + TrimmedDid(CowStr::Borrowed(did.as_str().trim_start_matches("did:"))) 81 + } 82 + } 83 + 84 + impl Serialize for TrimmedDid<'_> { 85 + fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { 86 + self.0.serialize(serializer) 87 + } 88 + } 89 + 90 + impl<'de> Deserialize<'de> for TrimmedDid<'de> { 91 + fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { 92 + let s = <&'de str>::deserialize(deserializer)?; 93 + Ok(TrimmedDid(CowStr::Borrowed(s))) 94 + } 95 + }
+117
src/ingest/firehose.rs
··· 1 + use crate::db::{self, keys, Db}; 2 + use crate::ingest::BufferTx; 3 + use crate::state::AppState; 4 + use jacquard::api::com_atproto::sync::subscribe_repos::{SubscribeRepos, SubscribeReposMessage}; 5 + use jacquard::types::did::Did; 6 + use jacquard_common::xrpc::{SubscriptionClient, TungsteniteSubscriptionClient}; 7 + use miette::Result; 8 + use n0_future::StreamExt; 9 + use std::sync::atomic::Ordering; 10 + use std::sync::Arc; 11 + use std::time::Duration; 12 + use tracing::{error, info}; 13 + use url::Url; 14 + 15 + pub struct FirehoseIngestor { 16 + state: Arc<AppState>, 17 + buffer_tx: BufferTx, 18 + relay_host: Url, 19 + full_network: bool, 20 + } 21 + 22 + impl FirehoseIngestor { 23 + pub fn new( 24 + state: Arc<AppState>, 25 + buffer_tx: BufferTx, 26 + relay_host: Url, 27 + full_network: bool, 28 + ) -> Self { 29 + Self { 30 + state, 31 + buffer_tx, 32 + relay_host, 33 + full_network, 34 + } 35 + } 36 + 37 + pub async fn run(mut self) -> Result<()> { 38 + loop { 39 + // 1. load cursor 40 + let current_cursor = self.state.cur_firehose.load(Ordering::SeqCst); 41 + let start_cursor = if current_cursor > 0 { 42 + Some(current_cursor) 43 + } else { 44 + db::get_firehose_cursor(&self.state.db).await? 45 + }; 46 + match start_cursor { 47 + Some(c) => info!("resuming from cursor: {c}"), 48 + None => info!("no cursor found, live tailing"), 49 + } 50 + 51 + if let Some(c) = start_cursor { 52 + self.state.cur_firehose.store(c, Ordering::SeqCst); 53 + } 54 + 55 + // 2. connect 56 + let client = TungsteniteSubscriptionClient::from_base_uri(self.relay_host.clone()); 57 + let params = SubscribeRepos::new; 58 + let params = start_cursor 59 + .map(|c| params().cursor(c)) 60 + .unwrap_or_else(params) 61 + .build(); 62 + 63 + let stream = match client.subscribe(&params).await { 64 + Ok(s) => s, 65 + Err(e) => { 66 + error!("failed to connect to firehose: {e}, retrying in 5s..."); 67 + tokio::time::sleep(Duration::from_secs(5)).await; 68 + continue; 69 + } 70 + }; 71 + 72 + let (_sink, mut messages) = stream.into_stream(); 73 + 74 + info!("firehose connected"); 75 + 76 + // 3. process loop 77 + while let Some(msg_res) = messages.next().await { 78 + match msg_res { 79 + Ok(msg) => self.handle_message(msg).await, 80 + Err(e) => { 81 + error!("firehose stream error: {e}"); 82 + break; 83 + } 84 + } 85 + } 86 + 87 + error!("firehose disconnected, reconnecting in 5s..."); 88 + tokio::time::sleep(Duration::from_secs(5)).await; 89 + } 90 + } 91 + 92 + async fn handle_message(&mut self, msg: SubscribeReposMessage<'static>) { 93 + let did = match &msg { 94 + SubscribeReposMessage::Commit(commit) => &commit.repo, 95 + SubscribeReposMessage::Identity(identity) => &identity.did, 96 + SubscribeReposMessage::Account(account) => &account.did, 97 + SubscribeReposMessage::Sync(sync) => &sync.did, 98 + _ => return, 99 + }; 100 + 101 + if !self.should_process(did).await.unwrap_or(false) { 102 + return; 103 + } 104 + 105 + if let Err(e) = self.buffer_tx.send(msg) { 106 + error!("failed to send message to buffer processor: {e}"); 107 + } 108 + } 109 + 110 + async fn should_process(&self, did: &Did<'_>) -> Result<bool> { 111 + if self.full_network { 112 + return Ok(true); 113 + } 114 + let did_key = keys::repo_key(did); 115 + Db::contains_key(self.state.db.repos.clone(), did_key).await 116 + } 117 + }
+8 -131
src/ingest/mod.rs
··· 1 - use crate::db::{keys, Db}; 2 - use crate::state::AppState; 3 - use jacquard::api::com_atproto::sync::subscribe_repos::{SubscribeRepos, SubscribeReposMessage}; 4 - use jacquard::types::did::Did; 5 - use jacquard_common::xrpc::{SubscriptionClient, TungsteniteSubscriptionClient}; 6 - use jacquard_common::IntoStatic; 7 - use miette::Result; 8 - use n0_future::StreamExt; 9 - use std::sync::atomic::Ordering; 10 - use std::sync::Arc; 11 - use std::time::Duration; 12 - use tracing::{debug, error, info}; 13 - use url::Url; 14 - 15 - pub struct Ingestor { 16 - state: Arc<AppState>, 17 - relay_host: Url, 18 - full_network: bool, 19 - } 20 - 21 - impl Ingestor { 22 - pub fn new(state: Arc<AppState>, relay_host: Url, full_network: bool) -> Self { 23 - Self { 24 - state, 25 - relay_host, 26 - full_network, 27 - } 28 - } 29 - 30 - pub async fn run(mut self) -> Result<()> { 31 - loop { 32 - // 1. load cursor 33 - let current_cursor = self.state.cur_firehose.load(Ordering::SeqCst); 34 - let start_cursor = if current_cursor > 0 { 35 - Some(current_cursor) 36 - } else { 37 - let cursor_key = b"firehose_cursor"; 38 - if let Ok(Some(bytes)) = 39 - crate::db::Db::get(self.state.db.cursors.clone(), cursor_key.to_vec()).await 40 - { 41 - let s = String::from_utf8_lossy(&bytes); 42 - debug!("resuming from cursor: {}", s); 43 - s.parse::<i64>().ok() 44 - } else { 45 - info!("no cursor found, live tailing"); 46 - None 47 - } 48 - }; 1 + use jacquard_api::com_atproto::sync::subscribe_repos::SubscribeReposMessage; 2 + use tokio::sync::mpsc; 49 3 50 - if let Some(c) = start_cursor { 51 - self.state.cur_firehose.store(c, Ordering::SeqCst); 52 - } 4 + pub mod firehose; 5 + pub mod worker; 53 6 54 - // 2. connect 55 - let client = TungsteniteSubscriptionClient::from_base_uri(self.relay_host.clone()); 56 - let params = if let Some(c) = start_cursor { 57 - SubscribeRepos::new().cursor(c).build() 58 - } else { 59 - SubscribeRepos::new().build() 60 - }; 7 + pub type BufferedMessage = SubscribeReposMessage<'static>; 61 8 62 - let stream = match client.subscribe(&params).await { 63 - Ok(s) => s, 64 - Err(e) => { 65 - error!("failed to connect to firehose: {e}, retrying in 5s..."); 66 - tokio::time::sleep(Duration::from_secs(5)).await; 67 - continue; 68 - } 69 - }; 70 - 71 - let (_sink, mut messages) = stream.into_stream(); 72 - 73 - info!("firehose connected"); 74 - 75 - // 3. process loop 76 - while let Some(msg_res) = messages.next().await { 77 - match msg_res { 78 - Ok(msg) => self.handle_message(msg).await, 79 - Err(e) => { 80 - error!("firehose stream error: {e}"); 81 - break; 82 - } 83 - } 84 - } 85 - 86 - error!("firehose disconnected, reconnecting in 5s..."); 87 - tokio::time::sleep(Duration::from_secs(5)).await; 88 - } 89 - } 90 - 91 - async fn handle_message(&mut self, msg: SubscribeReposMessage<'_>) { 92 - let (did, seq) = match &msg { 93 - SubscribeReposMessage::Commit(commit) => (&commit.repo, commit.seq), 94 - SubscribeReposMessage::Identity(identity) => (&identity.did, identity.seq), 95 - SubscribeReposMessage::Account(account) => (&account.did, account.seq), 96 - _ => return, 97 - }; 98 - 99 - if !self.should_process(did).await.unwrap_or(false) { 100 - return; 101 - } 102 - 103 - self.state.cur_firehose.store(seq, Ordering::SeqCst); 104 - 105 - let buffered_at = chrono::Utc::now().timestamp_millis(); 106 - 107 - // persist to DB for crash recovery 108 - let db_key = keys::buffer_key(&did, buffered_at); 109 - if let Ok(bytes) = rmp_serde::to_vec(&msg) { 110 - if let Err(e) = Db::insert(self.state.db.buffer.clone(), db_key, bytes).await { 111 - error!("failed to persist buffered message: {e}"); 112 - } 113 - } 114 - 115 - // always buffer through the BufferProcessor 116 - let buffered_msg = crate::buffer::BufferedMessage { 117 - did: did.clone().into_static(), 118 - msg: msg.into_static(), 119 - buffered_at, 120 - }; 121 - 122 - if let Err(e) = self.state.buffer_tx.send(buffered_msg) { 123 - error!("failed to send message to buffer processor: {e}"); 124 - } 125 - } 126 - 127 - async fn should_process(&self, did: &Did<'_>) -> Result<bool> { 128 - if self.full_network { 129 - return Ok(true); 130 - } 131 - let did_key = keys::repo_key(did); 132 - Db::contains_key(self.state.db.repos.clone(), did_key).await 133 - } 134 - } 9 + pub type BufferTx = mpsc::UnboundedSender<BufferedMessage>; 10 + #[allow(dead_code)] 11 + pub type BufferRx = mpsc::UnboundedReceiver<BufferedMessage>;
+302
src/ingest/worker.rs
··· 1 + use crate::db::{self, keys}; 2 + use crate::ingest::BufferedMessage; 3 + use crate::ops::{self, send_backfill_req}; 4 + use crate::state::AppState; 5 + use crate::types::{AccountEvt, IdentityEvt, RepoState, RepoStatus}; 6 + use jacquard::api::com_atproto::sync::subscribe_repos::SubscribeReposMessage; 7 + 8 + use fjall::OwnedWriteBatch; 9 + use jacquard::cowstr::ToCowStr; 10 + use jacquard::types::did::Did; 11 + use jacquard_common::IntoStatic; 12 + use miette::{IntoDiagnostic, Result}; 13 + use smol_str::ToSmolStr; 14 + use std::collections::HashSet; 15 + use std::sync::Arc; 16 + use std::time::Duration; 17 + use tokio::sync::mpsc; 18 + use tracing::{debug, error, trace, warn}; 19 + 20 + #[derive(Debug, Clone, Copy)] 21 + enum ProcessResult { 22 + Deleted, 23 + Ok, 24 + } 25 + 26 + enum RepoCheckResult { 27 + Syncing, 28 + Ok(RepoState<'static>), 29 + } 30 + 31 + pub struct FirehoseWorker { 32 + state: Arc<AppState>, 33 + rx: mpsc::UnboundedReceiver<BufferedMessage>, 34 + } 35 + 36 + impl FirehoseWorker { 37 + pub fn new(state: Arc<AppState>, rx: mpsc::UnboundedReceiver<BufferedMessage>) -> Self { 38 + Self { state, rx } 39 + } 40 + 41 + pub fn run(mut self, handle: tokio::runtime::Handle) -> Result<()> { 42 + const BUF_SIZE: usize = 500; 43 + let mut buf = Vec::<BufferedMessage>::with_capacity(BUF_SIZE); 44 + let mut failed = Vec::<BufferedMessage>::new(); 45 + 46 + loop { 47 + let mut batch = self.state.db.inner.batch(); 48 + let mut deleted = HashSet::new(); 49 + 50 + for msg in buf.drain(..) { 51 + let (did, seq) = match &msg { 52 + SubscribeReposMessage::Commit(c) => (&c.repo, c.seq), 53 + SubscribeReposMessage::Identity(i) => (&i.did, i.seq), 54 + SubscribeReposMessage::Account(a) => (&a.did, a.seq), 55 + SubscribeReposMessage::Sync(s) => (&s.did, s.seq), 56 + _ => continue, 57 + }; 58 + 59 + if self.state.blocked_dids.contains_sync(did) { 60 + failed.push(msg); 61 + continue; 62 + } 63 + if deleted.contains(did) { 64 + continue; 65 + } 66 + 67 + match Self::process_message(&self.state, &mut batch, &msg, did) { 68 + Ok(ProcessResult::Ok) => {} 69 + Ok(ProcessResult::Deleted) => { 70 + deleted.insert(did.clone()); 71 + } 72 + Err(e) => { 73 + error!("failed to process buffered message for {did}: {e}"); 74 + db::check_poisoned_report(&e); 75 + failed.push(msg); 76 + } 77 + } 78 + 79 + self.state 80 + .cur_firehose 81 + .store(seq, std::sync::atomic::Ordering::SeqCst); 82 + } 83 + 84 + // commit all changes to db 85 + batch.commit().into_diagnostic()?; 86 + self.state 87 + .db 88 + .inner 89 + .persist(fjall::PersistMode::Buffer) 90 + .into_diagnostic()?; 91 + 92 + // add failed back to buf here so the ordering is preserved 93 + if !failed.is_empty() { 94 + buf.append(&mut failed); 95 + } 96 + 97 + // wait until we receive some messages 98 + // this does mean we will have an up to 1 second delay, before we send events to consumers 99 + // but thats reasonable imo, could also be configured of course 100 + let _ = handle.block_on(tokio::time::timeout( 101 + Duration::from_secs(1), 102 + self.rx.recv_many(&mut buf, BUF_SIZE), 103 + )); 104 + if buf.is_empty() { 105 + if self.rx.is_closed() { 106 + error!("ingestor crashed? shutting down buffer processor"); 107 + break; 108 + } 109 + continue; 110 + } 111 + } 112 + 113 + Ok(()) 114 + } 115 + 116 + fn process_message( 117 + state: &AppState, 118 + batch: &mut OwnedWriteBatch, 119 + msg: &BufferedMessage, 120 + did: &Did, 121 + ) -> Result<ProcessResult> { 122 + let RepoCheckResult::Ok(repo_state) = Self::check_repo_state(batch, state, did)? else { 123 + return Ok(ProcessResult::Ok); 124 + }; 125 + 126 + match msg { 127 + SubscribeReposMessage::Commit(commit) => { 128 + trace!("processing buffered commit for {did}"); 129 + 130 + if matches!(repo_state.rev, Some(ref rev) if commit.rev.as_str() <= rev.as_str()) { 131 + debug!( 132 + "skipping replayed event for {}: {} <= {}", 133 + did, 134 + commit.rev, 135 + repo_state.rev.as_ref().expect("we checked in if") 136 + ); 137 + return Ok(ProcessResult::Ok); 138 + } 139 + 140 + if let (Some(prev_repo), Some(prev_commit)) = (&repo_state.data, &commit.prev_data) 141 + && prev_repo != &prev_commit.0 142 + { 143 + warn!( 144 + "gap detected for {}: prev {} != stored {}. triggering backfill", 145 + did, prev_repo, prev_commit.0 146 + ); 147 + 148 + let mut batch = state.db.inner.batch(); 149 + ops::update_repo_status( 150 + &mut batch, 151 + &state.db, 152 + did, 153 + repo_state, 154 + RepoStatus::Backfilling, 155 + )?; 156 + batch.commit().into_diagnostic()?; 157 + 158 + send_backfill_req(state, did.clone().into_static())?; 159 + 160 + return Ok(ProcessResult::Ok); 161 + } 162 + 163 + ops::apply_commit(batch, &state.db, repo_state, &commit)?(); 164 + } 165 + SubscribeReposMessage::Identity(identity) => { 166 + debug!("processing buffered identity for {did}"); 167 + let handle = identity 168 + .handle 169 + .as_ref() 170 + .map(|h| h.to_cowstr().into_static()); 171 + 172 + let evt = IdentityEvt { 173 + did: did.clone().into_static(), 174 + handle, 175 + }; 176 + ops::emit_identity_event(&state.db, evt); 177 + } 178 + SubscribeReposMessage::Account(account) => { 179 + debug!("processing buffered account for {did}"); 180 + let evt = AccountEvt { 181 + did: did.clone().into_static(), 182 + active: account.active, 183 + status: account.status.as_ref().map(|s| s.to_cowstr().into_static()), 184 + }; 185 + 186 + if !account.active { 187 + use jacquard::api::com_atproto::sync::subscribe_repos::AccountStatus; 188 + match &account.status { 189 + Some(AccountStatus::Deleted) => { 190 + debug!("account {did} deleted, wiping data"); 191 + ops::delete_repo(batch, &state.db, did)?; 192 + return Ok(ProcessResult::Deleted); 193 + } 194 + status => { 195 + let status = match status { 196 + Some(status) => match status { 197 + AccountStatus::Deleted => { 198 + unreachable!("deleted account status is handled before") 199 + } 200 + AccountStatus::Takendown => RepoStatus::Takendown, 201 + AccountStatus::Suspended => RepoStatus::Suspended, 202 + AccountStatus::Deactivated => RepoStatus::Deactivated, 203 + AccountStatus::Throttled => { 204 + RepoStatus::Error("throttled".into()) 205 + } 206 + AccountStatus::Desynchronized => { 207 + RepoStatus::Error("desynchronized".into()) 208 + } 209 + AccountStatus::Other(s) => { 210 + warn!( 211 + "unknown account status for {did}, will put in error state: {s}" 212 + ); 213 + RepoStatus::Error(s.to_smolstr()) 214 + } 215 + }, 216 + None => { 217 + warn!("account {did} inactive but no status provided"); 218 + RepoStatus::Error("unknown".into()) 219 + } 220 + }; 221 + ops::update_repo_status(batch, &state.db, did, repo_state, status)?; 222 + } 223 + } 224 + } else { 225 + // normally we would initiate backfill here 226 + // but we don't have to do anything because: 227 + // 1. we handle changing repo status to Synced before this (in check repo state) 228 + // 2. initiating backfilling is also handled there 229 + } 230 + 231 + ops::emit_account_event(&state.db, evt); 232 + } 233 + _ => { 234 + warn!("unknown message type in buffer for {did}"); 235 + } 236 + } 237 + 238 + Ok(ProcessResult::Ok) 239 + } 240 + 241 + fn check_repo_state( 242 + batch: &mut OwnedWriteBatch, 243 + state: &AppState, 244 + did: &Did<'_>, 245 + ) -> Result<RepoCheckResult> { 246 + // check if we have this repo 247 + let repo_key = keys::repo_key(&did); 248 + let Some(state_bytes) = state.db.repos.get(&repo_key).into_diagnostic()? else { 249 + // we don't know this repo, but we are receiving events for it 250 + // this means we should backfill it before processing its events 251 + debug!("discovered new account {did} from firehose, queueing backfill"); 252 + 253 + let new_state = RepoState::backfilling(did); 254 + // using a separate batch here since we want to make it known its being backfilled 255 + // immediately. we could use the batch for the unit of work we are doing but 256 + // then we wouldn't be able to start backfilling until the unit of work is done 257 + let mut batch = state.db.inner.batch(); 258 + 259 + batch.insert( 260 + &state.db.repos, 261 + &repo_key, 262 + crate::db::ser_repo_state(&new_state)?, 263 + ); 264 + batch.insert(&state.db.pending, &repo_key, &[]); 265 + batch.commit().into_diagnostic()?; 266 + 267 + send_backfill_req(state, did.clone().into_static())?; 268 + 269 + return Ok(RepoCheckResult::Syncing); 270 + }; 271 + let mut repo_state = crate::db::deser_repo_state(&state_bytes)?.into_static(); 272 + 273 + // if we are backfilling or it is new, DON'T mark it as synced yet 274 + // the backfill worker will do that when it finishes 275 + match &repo_state.status { 276 + RepoStatus::Synced => Ok(RepoCheckResult::Ok(repo_state)), 277 + RepoStatus::Backfilling | RepoStatus::Error(_) => { 278 + // repo is being backfilled or is in error state 279 + // we dont touch the state because the backfill worker will do that 280 + // we should not really get here because the backfill worker should have marked it as 281 + // being worked on (blocked repos) meaning we would have returned earlier 282 + debug!( 283 + "ignoring active status for {did} as it is {:?}", 284 + repo_state.status 285 + ); 286 + Ok(RepoCheckResult::Syncing) 287 + } 288 + RepoStatus::Deactivated | RepoStatus::Suspended | RepoStatus::Takendown => { 289 + // if it was in deactivated/takendown/suspended state, we can mark it as synced 290 + // because we are receiving live events now 291 + repo_state = ops::update_repo_status( 292 + batch, 293 + &state.db, 294 + &did, 295 + repo_state, 296 + RepoStatus::Synced, 297 + )?; 298 + Ok(RepoCheckResult::Ok(repo_state)) 299 + } 300 + } 301 + } 302 + }
+37 -25
src/main.rs
··· 1 1 mod api; 2 2 mod backfill; 3 - mod buffer; 4 3 mod config; 5 4 mod crawler; 6 5 mod db; ··· 10 9 mod state; 11 10 mod types; 12 11 13 - use crate::backfill::Worker; 14 - use crate::buffer::processor::BufferProcessor; 15 12 use crate::config::Config; 16 13 use crate::crawler::Crawler; 17 - use crate::db::Db; 18 - use crate::ingest::Ingestor; 14 + use crate::db::set_firehose_cursor; 15 + use crate::ingest::firehose::FirehoseIngestor; 19 16 use crate::state::AppState; 17 + use crate::{backfill::BackfillWorker, ingest::worker::FirehoseWorker}; 20 18 use futures::{future::BoxFuture, FutureExt, TryFutureExt}; 21 19 use miette::IntoDiagnostic; 22 20 use mimalloc::MiMalloc; 23 21 use std::sync::atomic::Ordering; 24 22 use std::sync::Arc; 25 - use tokio::task::spawn_blocking; 23 + use tokio::{sync::mpsc, task::spawn_blocking}; 26 24 use tracing::{error, info}; 27 25 28 26 #[global_allocator] ··· 37 35 38 36 info!("{cfg}"); 39 37 40 - let (state, backfill_rx, buffer_rx) = AppState::new(&cfg)?; 38 + let (state, backfill_rx) = AppState::new(&cfg)?; 39 + let (buffer_tx, buffer_rx) = mpsc::unbounded_channel(); 41 40 let state = Arc::new(state); 42 41 43 42 tokio::spawn( ··· 54 53 tokio::spawn({ 55 54 let state = state.clone(); 56 55 let timeout = cfg.repo_fetch_timeout; 57 - Worker::new(state, backfill_rx, timeout, cfg.backfill_concurrency_limit).run() 56 + BackfillWorker::new(state, backfill_rx, timeout, cfg.backfill_concurrency_limit).run() 58 57 }); 59 58 60 - let buffer_processor_task = tokio::spawn({ 59 + let firehose_worker = std::thread::spawn({ 61 60 let state = state.clone(); 62 - BufferProcessor::new(state, buffer_rx).run() 61 + let handle = tokio::runtime::Handle::current(); 62 + move || FirehoseWorker::new(state, buffer_rx).run(handle) 63 63 }); 64 64 65 65 if let Err(e) = spawn_blocking({ ··· 70 70 .into_diagnostic()? 71 71 { 72 72 error!("failed to queue pending backfills: {e}"); 73 - Db::check_poisoned_report(&e); 73 + db::check_poisoned_report(&e); 74 74 } 75 75 76 76 if let Err(e) = spawn_blocking({ ··· 81 81 .into_diagnostic()? 82 82 { 83 83 error!("failed to queue gone backfills: {e}"); 84 - Db::check_poisoned_report(&e); 84 + db::check_poisoned_report(&e); 85 85 } 86 86 87 87 std::thread::spawn({ ··· 127 127 loop { 128 128 std::thread::sleep(persist_interval); 129 129 130 + // persist firehose cursor 130 131 let seq = state.cur_firehose.load(Ordering::SeqCst); 131 - const CURSOR_KEY: &[u8] = b"firehose_cursor"; 132 - if let Err(e) = state 133 - .db 134 - .cursors 135 - .insert(CURSOR_KEY, seq.to_string().into_bytes()) 136 - { 132 + if let Err(e) = set_firehose_cursor(&state.db, seq) { 137 133 error!("failed to save cursor: {e}"); 138 - Db::check_poisoned(&e); 134 + db::check_poisoned_report(&e); 139 135 } 140 136 137 + // persist counts 138 + // TODO: make this more durable 139 + if let Err(e) = db::persist_counts(&state.db) { 140 + error!("failed to persist counts: {e}"); 141 + db::check_poisoned_report(&e); 142 + } 143 + 144 + // persist journal 141 145 if let Err(e) = state.db.persist() { 142 146 error!("db persist failed: {e}"); 143 - Db::check_poisoned_report(&e); 147 + db::check_poisoned_report(&e); 144 148 } 145 149 } 146 150 } ··· 152 156 .run() 153 157 .inspect_err(|e| { 154 158 error!("crawler died: {e}"); 155 - Db::check_poisoned_report(&e); 159 + db::check_poisoned_report(&e); 156 160 }), 157 161 ); 158 162 } 159 163 160 - let ingestor = Ingestor::new(state.clone(), cfg.relay_host, cfg.full_network); 164 + let ingestor = 165 + FirehoseIngestor::new(state.clone(), buffer_tx, cfg.relay_host, cfg.full_network); 161 166 162 167 let res = futures::future::try_join_all::<[BoxFuture<_>; _]>([ 163 - Box::pin(buffer_processor_task.map(|r| r.into_diagnostic().flatten())), 168 + Box::pin( 169 + tokio::task::spawn_blocking(move || { 170 + firehose_worker 171 + .join() 172 + .map_err(|e| miette::miette!("buffer processor thread died: {e:?}")) 173 + }) 174 + .map(|r| r.into_diagnostic().flatten().flatten()), 175 + ), 164 176 Box::pin(ingestor.run()), 165 177 ]); 166 178 if let Err(e) = res.await { 167 179 error!("ingestor or buffer processor died: {e}"); 168 - Db::check_poisoned_report(&e); 180 + db::check_poisoned_report(&e); 169 181 } 170 182 171 183 if let Err(e) = state.db.persist() { 172 - Db::check_poisoned_report(&e); 184 + db::check_poisoned_report(&e); 173 185 return Err(e); 174 186 } 175 187
+126 -99
src/ops.rs
··· 1 - use crate::db::{keys, Db}; 2 - use crate::types::{AccountEvt, BroadcastEvent, IdentityEvt, MarshallableEvt, StoredEvent}; 1 + use crate::db::types::TrimmedDid; 2 + use crate::db::{self, keys, ser_repo_state, Db}; 3 + use crate::state::AppState; 4 + use crate::types::{ 5 + AccountEvt, BroadcastEvent, IdentityEvt, MarshallableEvt, RepoState, RepoStatus, ResyncState, 6 + StoredEvent, 7 + }; 8 + use fjall::OwnedWriteBatch; 3 9 use jacquard::api::com_atproto::sync::subscribe_repos::Commit; 4 10 use jacquard::cowstr::ToCowStr; 5 - use jacquard::IntoStatic; 11 + use jacquard::types::cid::Cid; 12 + use jacquard::CowStr; 6 13 use jacquard_repo::car::reader::parse_car_bytes; 7 - use miette::{IntoDiagnostic, Result}; 8 - use smol_str::{SmolStr, ToSmolStr}; 14 + use miette::{Context, IntoDiagnostic, Result}; 9 15 use std::collections::HashMap; 10 16 use std::sync::atomic::Ordering; 11 17 use std::time::Instant; 12 18 use tracing::{debug, trace}; 19 + 20 + pub fn send_backfill_req(state: &AppState, did: jacquard::types::did::Did<'static>) -> Result<()> { 21 + state 22 + .backfill_tx 23 + .send(did.clone()) 24 + .map_err(|_| miette::miette!("failed to send backfill request for {did}"))?; 25 + let _ = state.blocked_dids.insert_sync(did); 26 + Ok(()) 27 + } 13 28 14 29 // emitting identity is ephemeral 15 30 // we dont replay these, consumers can just fetch identity themselves if they need it ··· 37 52 let _ = db.event_tx.send(BroadcastEvent::Ephemeral(marshallable)); 38 53 } 39 54 40 - pub fn delete_repo(db: &Db, did: &jacquard::types::did::Did) -> Result<()> { 55 + pub fn delete_repo<'batch>( 56 + batch: &'batch mut OwnedWriteBatch, 57 + db: &Db, 58 + did: &jacquard::types::did::Did, 59 + ) -> Result<()> { 41 60 debug!("deleting repo {did}"); 42 - let mut batch = db.inner.batch(); 43 61 let repo_key = keys::repo_key(did); 44 62 45 63 // 1. delete from repos, pending, resync 46 - batch.remove(&db.repos, repo_key); 47 - batch.remove(&db.pending, repo_key); 48 - batch.remove(&db.resync, repo_key); 49 - 50 - // 2. delete from buffer (prefix: repo_key + SEP) 51 - let mut buffer_prefix = repo_key.to_vec(); 52 - buffer_prefix.push(keys::SEP); 53 - for guard in db.buffer.prefix(&buffer_prefix) { 54 - let k = guard.key().into_diagnostic()?; 55 - batch.remove(&db.buffer, k); 56 - } 64 + batch.remove(&db.repos, &repo_key); 65 + batch.remove(&db.pending, &repo_key); 66 + batch.remove(&db.resync, &repo_key); 57 67 58 - // 3. delete from records (prefix: repo_key + SEP) 59 - let mut records_prefix = repo_key.to_vec(); 68 + // 2. delete from records (prefix: repo_key + SEP) 69 + let mut records_prefix = repo_key.as_bytes().to_vec(); 60 70 records_prefix.push(keys::SEP); 61 - let mut deleted_count = 0; 62 - 63 71 for guard in db.records.prefix(&records_prefix) { 64 72 let k = guard.key().into_diagnostic()?; 65 73 batch.remove(&db.records, k); 66 - deleted_count += 1; 67 74 } 68 75 69 - // 4. reset collection counts 76 + // 3. reset collection counts 70 77 let mut count_prefix = Vec::new(); 71 78 count_prefix.push(b'r'); 72 79 count_prefix.push(keys::SEP); 73 - count_prefix.extend_from_slice(keys::did_prefix(did).as_bytes()); 80 + count_prefix.extend_from_slice(TrimmedDid::from(did).as_bytes()); 74 81 count_prefix.push(keys::SEP); 75 82 76 83 for guard in db.counts.prefix(&count_prefix) { ··· 78 85 batch.remove(&db.counts, k); 79 86 } 80 87 81 - batch.commit().into_diagnostic()?; 82 - 83 - // update global record count 84 - if deleted_count > 0 { 85 - tokio::spawn(db.increment_count(keys::count_keyspace_key("records"), -deleted_count)); 86 - } 87 - 88 88 Ok(()) 89 89 } 90 90 91 - pub fn update_repo_status( 91 + pub fn update_repo_status<'batch, 's>( 92 + batch: &'batch mut OwnedWriteBatch, 92 93 db: &Db, 93 94 did: &jacquard::types::did::Did, 94 - status: crate::types::RepoStatus, 95 - ) -> Result<()> { 96 - debug!("updating repo status for {did} to {status:?}"); 97 - let (updated, batch) = 98 - Db::update_repo_state(db.inner.batch(), &db.repos, did, |state, _val| { 99 - state.status = status.clone(); 100 - state.last_updated_at = chrono::Utc::now().timestamp(); 101 - Ok((true, ())) 102 - })?; 95 + mut repo_state: RepoState<'s>, 96 + new_status: RepoStatus, 97 + ) -> Result<RepoState<'s>> { 98 + debug!("updating repo status for {did} to {new_status:?}"); 103 99 104 - if updated.is_some() { 105 - batch.commit().into_diagnostic()?; 100 + let key = keys::repo_key(did); 101 + 102 + // manage queues 103 + match &new_status { 104 + RepoStatus::Synced => { 105 + batch.remove(&db.pending, &key); 106 + batch.remove(&db.resync, &key); 107 + } 108 + RepoStatus::Backfilling => { 109 + batch.insert(&db.pending, &key, &[]); 110 + batch.remove(&db.resync, &key); 111 + } 112 + RepoStatus::Error(msg) => { 113 + batch.remove(&db.pending, &key); 114 + let resync_state = ResyncState::Error { 115 + message: msg.clone(), 116 + retry_count: 0, 117 + next_retry: chrono::Utc::now().timestamp(), 118 + }; 119 + batch.insert( 120 + &db.resync, 121 + &key, 122 + rmp_serde::to_vec(&resync_state).into_diagnostic()?, 123 + ); 124 + } 125 + RepoStatus::Deactivated | RepoStatus::Takendown | RepoStatus::Suspended => { 126 + batch.remove(&db.pending, &key); 127 + let resync_state = ResyncState::Gone { 128 + status: new_status.clone(), 129 + }; 130 + batch.insert( 131 + &db.resync, 132 + &key, 133 + rmp_serde::to_vec(&resync_state).into_diagnostic()?, 134 + ); 135 + } 106 136 } 107 - Ok(()) 137 + 138 + repo_state.status = new_status; 139 + repo_state.last_updated_at = chrono::Utc::now().timestamp(); 140 + 141 + batch.insert(&db.repos, &key, ser_repo_state(&repo_state)?); 142 + 143 + Ok(repo_state) 108 144 } 109 145 110 - pub fn apply_commit(db: &Db, commit: &Commit<'_>, live: bool) -> Result<()> { 146 + pub fn apply_commit<'batch, 'db>( 147 + batch: &'batch mut OwnedWriteBatch, 148 + db: &'db Db, 149 + mut repo_state: RepoState, 150 + commit: &Commit<'_>, 151 + ) -> Result<impl FnOnce() + use<'db>> { 111 152 let did = &commit.repo; 112 153 debug!("applying commit {} for {did}", &commit.commit); 113 154 ··· 121 162 122 163 trace!("parsed car for {did} in {:?}", start.elapsed()); 123 164 124 - let (_, mut batch) = Db::update_repo_state(db.inner.batch(), &db.repos, did, |state, _| { 125 - state.rev = commit.rev.as_str().into(); 126 - state.data = parsed.root.to_smolstr(); 127 - state.last_updated_at = chrono::Utc::now().timestamp(); 128 - Ok((true, ())) 129 - })?; 165 + repo_state.rev = Some(commit.rev.clone()); 166 + repo_state.data = Some(Cid::ipld(parsed.root)); 167 + repo_state.last_updated_at = chrono::Utc::now().timestamp(); 168 + 169 + batch.insert(&db.repos, keys::repo_key(did), ser_repo_state(&repo_state)?); 130 170 131 171 // store all blocks in the CAS 132 172 for (cid, bytes) in &parsed.blocks { ··· 140 180 // 2. iterate ops and update records index 141 181 let mut records_delta = 0; 142 182 let mut events_count = 0; 143 - let mut collection_deltas: HashMap<SmolStr, i64> = HashMap::new(); 183 + let mut collection_deltas: HashMap<&str, i64> = HashMap::new(); 144 184 145 185 for op in &commit.ops { 146 - let parts: Vec<&str> = op.path.splitn(2, '/').collect(); 147 - if parts.len() != 2 { 148 - continue; 149 - } 150 - let collection = parts[0]; 151 - let rkey = parts[1]; 152 - 186 + let (collection, rkey) = parse_path(&op.path)?; 153 187 let db_key = keys::record_key(did, collection, rkey); 154 188 155 189 let event_id = db.next_event_id.fetch_add(1, Ordering::SeqCst); 156 190 157 - let mut cid_str = None; 158 - 159 191 match op.action.as_str() { 160 192 "create" | "update" => { 161 193 let Some(cid) = &op.cid else { ··· 163 195 }; 164 196 let s = smol_str::SmolStr::from(cid.as_str()); 165 197 batch.insert(&db.records, db_key, s.as_bytes().to_vec()); 166 - cid_str = Some(s); 167 198 168 199 // accumulate counts 169 200 if op.action.as_str() == "create" { 170 201 records_delta += 1; 171 - *collection_deltas 172 - .entry(collection.to_smolstr()) 173 - .or_default() += 1; 202 + *collection_deltas.entry(collection).or_default() += 1; 174 203 } 175 204 } 176 205 "delete" => { ··· 178 207 179 208 // accumulate counts 180 209 records_delta -= 1; 181 - *collection_deltas 182 - .entry(collection.to_smolstr()) 183 - .or_default() -= 1; 210 + *collection_deltas.entry(collection).or_default() -= 1; 184 211 } 185 212 _ => {} 186 213 } 187 214 188 - let evt = StoredEvent::Record { 189 - live, 190 - did: did.clone().into_static(), 191 - rev: commit.rev.as_str().into(), 192 - collection: collection.into(), 193 - rkey: rkey.into(), 194 - action: op.action.as_str().into(), 195 - cid: cid_str, 215 + let evt = StoredEvent { 216 + did: TrimmedDid::from(did), 217 + rev: CowStr::Borrowed(commit.rev.as_str()), 218 + collection: CowStr::Borrowed(collection), 219 + rkey: CowStr::Borrowed(rkey), 220 + action: CowStr::Borrowed(op.action.as_str()), 221 + cid: op.cid.as_ref().map(|c| c.0.clone()), 196 222 }; 197 223 198 224 let bytes = rmp_serde::to_vec(&evt).into_diagnostic()?; 199 - batch.insert(&db.events, keys::event_key(event_id as i64), bytes); 225 + batch.insert(&db.events, keys::event_key(event_id), bytes); 200 226 events_count += 1; 201 227 } 202 228 203 229 let start = Instant::now(); 204 230 205 - batch.commit().into_diagnostic()?; 206 231 trace!("committed sync batch for {did} in {:?}", start.elapsed()); 207 232 233 + // update counts 208 234 let blocks_count = parsed.blocks.len() as i64; 209 - tokio::spawn({ 210 - let blocks_fut = (blocks_count > 0) 211 - .then(|| db.increment_count(keys::count_keyspace_key("blocks"), blocks_count)); 212 - let records_fut = (records_delta != 0) 213 - .then(|| db.increment_count(keys::count_keyspace_key("records"), records_delta)); 214 - let events_fut = (events_count > 0) 215 - .then(|| db.increment_count(keys::count_keyspace_key("events"), events_count)); 216 - let collections_fut = collection_deltas 217 - .into_iter() 218 - .map(|(col, delta)| db.increment_count(keys::count_collection_key(&did, &col), delta)) 219 - .collect::<Vec<_>>(); 220 - futures::future::join_all( 221 - blocks_fut 222 - .into_iter() 223 - .chain(records_fut) 224 - .chain(events_fut) 225 - .chain(collections_fut), 226 - ) 227 - }); 235 + for (col, delta) in collection_deltas { 236 + db::update_record_count(batch, db, did, col, delta)?; 237 + } 228 238 229 239 let _ = db.event_tx.send(BroadcastEvent::Persisted( 230 240 db.next_event_id.load(Ordering::SeqCst) - 1, 231 241 )); 232 242 233 - Ok(()) 243 + Ok(move || { 244 + if blocks_count > 0 { 245 + db.update_count("blocks", blocks_count); 246 + } 247 + if records_delta != 0 { 248 + db.update_count("records", records_delta); 249 + } 250 + if events_count > 0 { 251 + db.update_count("events", events_count); 252 + } 253 + }) 254 + } 255 + 256 + pub fn parse_path(path: &str) -> Result<(&str, &str)> { 257 + let mut parts = path.splitn(2, '/'); 258 + let collection = parts.next().wrap_err("missing collection")?; 259 + let rkey = parts.next().wrap_err("missing rkey")?; 260 + Ok((collection, rkey)) 234 261 }
+1 -9
src/state.rs
··· 5 5 6 6 use miette::Result; 7 7 8 - use crate::buffer::BufferedMessage; 9 8 use crate::{config::Config, db::Db, resolver::Resolver}; 10 9 11 10 pub type BackfillTx = mpsc::UnboundedSender<Did<'static>>; 12 11 pub type BackfillRx = mpsc::UnboundedReceiver<Did<'static>>; 13 12 14 - pub type BufferTx = mpsc::UnboundedSender<BufferedMessage>; 15 - pub type BufferRx = mpsc::UnboundedReceiver<BufferedMessage>; 16 - 17 13 pub struct AppState { 18 14 pub db: Db, 19 15 pub backfill_tx: BackfillTx, 20 16 pub resolver: Resolver, 21 17 pub cur_firehose: AtomicI64, 22 18 pub blocked_dids: scc::HashSet<Did<'static>>, 23 - pub buffer_tx: BufferTx, 24 19 } 25 20 26 21 impl AppState { 27 - pub fn new(config: &Config) -> Result<(Self, BackfillRx, BufferRx)> { 22 + pub fn new(config: &Config) -> Result<(Self, BackfillRx)> { 28 23 let db = Db::open( 29 24 &config.database_path, 30 25 config.cache_size, ··· 32 27 )?; 33 28 let resolver = Resolver::new(config.plc_url.clone()); 34 29 let (backfill_tx, backfill_rx) = mpsc::unbounded_channel(); 35 - let (buffer_tx, buffer_rx) = mpsc::unbounded_channel(); 36 30 37 31 Ok(( 38 32 Self { ··· 41 35 resolver, 42 36 cur_firehose: AtomicI64::new(0), 43 37 blocked_dids: scc::HashSet::new(), 44 - buffer_tx, 45 38 }, 46 39 backfill_rx, 47 - buffer_rx, 48 40 )) 49 41 } 50 42 }
+70 -32
src/types.rs
··· 1 + use std::fmt::Display; 2 + 3 + use jacquard::{ 4 + types::{cid::Cid, tid::Tid}, 5 + CowStr, IntoStatic, 6 + }; 1 7 use jacquard_common::types::string::Did; 2 8 use serde::{Deserialize, Serialize}; 3 9 use serde_json::Value; 4 10 use smol_str::SmolStr; 5 11 6 - // from src/state.rs 12 + use crate::db::types::TrimmedDid; 7 13 8 14 #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 9 15 pub enum RepoStatus { 10 - New, 11 16 Backfilling, 12 17 Synced, 13 18 Error(SmolStr), ··· 16 21 Suspended, 17 22 } 18 23 24 + impl Display for RepoStatus { 25 + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 26 + match self { 27 + RepoStatus::Backfilling => write!(f, "backfilling"), 28 + RepoStatus::Synced => write!(f, "synced"), 29 + RepoStatus::Error(e) => write!(f, "error({e})"), 30 + RepoStatus::Deactivated => write!(f, "deactivated"), 31 + RepoStatus::Takendown => write!(f, "takendown"), 32 + RepoStatus::Suspended => write!(f, "suspended"), 33 + } 34 + } 35 + } 36 + 19 37 #[derive(Debug, Clone, Serialize, Deserialize)] 20 - pub struct RepoState { 21 - pub did: SmolStr, 38 + #[serde(bound(deserialize = "'i: 'de"))] 39 + pub struct RepoState<'i> { 40 + #[serde(borrow)] 41 + pub did: TrimmedDid<'i>, 22 42 pub status: RepoStatus, 23 - pub rev: SmolStr, 24 - pub data: SmolStr, 43 + pub rev: Option<Tid>, 44 + #[serde(borrow)] 45 + pub data: Option<Cid<'i>>, 25 46 pub last_seq: Option<i64>, 26 47 pub last_updated_at: i64, // unix timestamp 27 48 pub handle: Option<SmolStr>, 28 49 } 29 50 30 - impl RepoState { 31 - pub fn new(did: Did) -> Self { 51 + impl<'i> RepoState<'i> { 52 + pub fn backfilling(did: &'i Did<'i>) -> Self { 32 53 Self { 33 - did: did.as_str().into(), 34 - status: RepoStatus::New, 35 - rev: "".into(), 36 - data: "".into(), 54 + did: TrimmedDid::from(did), 55 + status: RepoStatus::Backfilling, 56 + rev: None, 57 + data: None, 37 58 last_seq: None, 38 59 last_updated_at: chrono::Utc::now().timestamp(), 39 60 handle: None, ··· 41 62 } 42 63 } 43 64 65 + impl<'i> IntoStatic for RepoState<'i> { 66 + type Output = RepoState<'static>; 67 + 68 + fn into_static(self) -> Self::Output { 69 + RepoState { 70 + did: self.did.into_static(), 71 + status: self.status, 72 + rev: self.rev, 73 + data: self.data.map(|c| c.into_static()), 74 + last_seq: self.last_seq, 75 + last_updated_at: self.last_updated_at, 76 + handle: self.handle, 77 + } 78 + } 79 + } 80 + 44 81 // from src/backfill/resync_state.rs 45 82 46 83 #[derive(Debug, Clone, Serialize, Deserialize)] ··· 86 123 87 124 #[derive(Clone, Debug)] 88 125 pub enum BroadcastEvent { 126 + #[allow(dead_code)] 89 127 Persisted(u64), 90 128 Ephemeral(MarshallableEvt<'static>), 91 129 } ··· 95 133 pub live: bool, 96 134 #[serde(borrow)] 97 135 pub did: Did<'i>, 98 - pub rev: SmolStr, 99 - pub collection: SmolStr, 100 - pub rkey: SmolStr, 101 - pub action: SmolStr, 136 + pub rev: CowStr<'i>, 137 + pub collection: CowStr<'i>, 138 + pub rkey: CowStr<'i>, 139 + pub action: CowStr<'i>, 102 140 #[serde(skip_serializing_if = "Option::is_none")] 103 141 pub record: Option<Value>, 104 142 #[serde(skip_serializing_if = "Option::is_none")] 105 - pub cid: Option<SmolStr>, 143 + pub cid: Option<Cid<'i>>, 106 144 } 107 145 108 146 #[derive(Debug, Serialize, Deserialize, Clone)] ··· 110 148 #[serde(borrow)] 111 149 pub did: Did<'i>, 112 150 #[serde(skip_serializing_if = "Option::is_none")] 113 - pub handle: Option<SmolStr>, 151 + pub handle: Option<CowStr<'i>>, 114 152 } 115 153 116 154 #[derive(Debug, Serialize, Deserialize, Clone)] ··· 119 157 pub did: Did<'i>, 120 158 pub active: bool, 121 159 #[serde(skip_serializing_if = "Option::is_none")] 122 - pub status: Option<SmolStr>, 160 + pub status: Option<CowStr<'i>>, 123 161 } 124 162 125 163 #[derive(Debug, Serialize, Deserialize, Clone)] 126 - pub enum StoredEvent<'i> { 127 - Record { 128 - live: bool, 129 - #[serde(borrow)] 130 - did: Did<'i>, 131 - rev: SmolStr, 132 - collection: SmolStr, 133 - rkey: SmolStr, 134 - action: SmolStr, 135 - cid: Option<SmolStr>, 136 - }, 164 + #[serde(bound(deserialize = "'i: 'de"))] 165 + pub struct StoredEvent<'i> { 166 + #[serde(borrow)] 167 + pub did: TrimmedDid<'i>, 168 + #[serde(borrow)] 169 + pub rev: CowStr<'i>, 170 + #[serde(borrow)] 171 + pub collection: CowStr<'i>, 172 + #[serde(borrow)] 173 + pub rkey: CowStr<'i>, 137 174 #[serde(borrow)] 138 - Identity(IdentityEvt<'i>), 175 + pub action: CowStr<'i>, 139 176 #[serde(borrow)] 140 - Account(AccountEvt<'i>), 177 + #[serde(skip_serializing_if = "Option::is_none")] 178 + pub cid: Option<Cid<'i>>, 141 179 }