at protocol indexer with flexible filtering, xrpc queries, and a cursor-backed event stream, built on fjall
at-protocol atproto indexer rust fjall

[ingest] shard worker threads and replace backfill channel with notify

dispatch firehose messages to worker shards by consistent DID hash.
replace mpsc backfill channel with Notify + pending keyspace polling.
buffer live commits for repos mid-backfill in resync_buffer, drained
on completion via BackfillFinished messages routed to the owning shard.

ptr.pet 68cc0286 5996ab14

verified
+583 -390
+3 -11
src/api/repo.rs
··· 1 1 use crate::api::AppState; 2 2 use crate::db::{Db, keys, ser_repo_state}; 3 - use crate::ops::send_backfill_req; 4 3 use crate::types::RepoState; 5 4 use axum::{Json, Router, extract::State, http::StatusCode, routing::post}; 6 5 use jacquard::types::did::Did; ··· 25 24 let db = &state.db; 26 25 let mut batch = db.inner.batch(); 27 26 let mut added = 0; 28 - let mut to_backfill = Vec::new(); 29 27 30 28 for did_str in req.dids { 31 29 let did = Did::new_owned(did_str.as_str()) ··· 43 41 batch.insert(&db.pending, &did_key, Vec::new()); 44 42 45 43 added += 1; 46 - 47 - let jacquard_did = Did::new_owned(did.as_str()) 48 - .map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?; 49 - to_backfill.push(jacquard_did); 50 44 } 51 45 } 52 46 ··· 55 49 .await 56 50 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? 57 51 .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?; 52 + 58 53 state.db.update_count_async("repos", added).await; 59 54 state.db.update_count_async("pending", added).await; 60 55 61 - // trigger backfill 62 - for did in to_backfill { 63 - send_backfill_req(&state, did) 64 - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; 65 - } 56 + // trigger backfill worker 57 + state.notify_backfill(); 66 58 } 67 59 Ok(StatusCode::OK) 68 60 }
+10 -40
src/backfill/manager.rs
··· 7 7 use std::time::Duration; 8 8 use tracing::{debug, error, info}; 9 9 10 - pub fn queue_pending_backfills(state: &AppState) -> Result<()> { 11 - info!("scanning for pending backfills..."); 12 - let mut count = 0; 13 - 14 - for guard in state.db.pending.iter() { 15 - let key = guard.key().into_diagnostic()?; 16 - let did = match TrimmedDid::try_from(key.as_ref()) { 17 - Ok(did) => did.to_did(), 18 - Err(e) => { 19 - error!("invalid did in db, skipping: {e}"); 20 - continue; 21 - } 22 - }; 23 - 24 - debug!("queuing did {did}"); 25 - if let Err(e) = state.backfill_tx.send(did.clone()) { 26 - error!("failed to queue pending backfill for did:{did}: {e}"); 27 - } else { 28 - count += 1; 29 - } 30 - } 31 - 32 - info!("queued {count} pending backfills"); 33 - Ok(()) 34 - } 35 - 36 10 pub fn queue_gone_backfills(state: &Arc<AppState>) -> Result<()> { 37 - info!("scanning for deactivated/takendown repos to retry..."); 11 + debug!("scanning for deactivated/takendown repos to retry..."); 38 12 let mut count = 0; 39 13 40 14 for guard in state.db.resync.iter() { ··· 49 23 50 24 if let Ok(resync_state) = rmp_serde::from_slice::<ResyncState>(&val) { 51 25 if matches!(resync_state, ResyncState::Gone { .. }) { 52 - info!("queuing retry for gone repo: {did}"); 26 + debug!("queuing retry for gone repo: {did}"); 53 27 54 28 // move back to pending 55 29 let mut batch = state.db.inner.batch(); ··· 64 38 batch.insert(&state.db.repos, &repo_key, ser_repo_state(&repo_state)?); 65 39 } 66 40 41 + state.db.update_count("resync", -1); 42 + state.db.update_count("pending", 1); 67 43 batch.commit().into_diagnostic()?; 68 44 69 - if let Err(e) = state.backfill_tx.send(did.clone()) { 70 - error!("failed to queue retry for {did}: {e}"); 71 - } else { 72 - count += 1; 73 - } 45 + state.notify_backfill(); 46 + count += 1; 74 47 } 75 48 } 76 49 } ··· 110 83 rmp_serde::from_slice::<ResyncState>(&value) 111 84 { 112 85 if next_retry <= now { 113 - info!("retrying backfill for {did}"); 86 + debug!("retrying backfill for {did}"); 114 87 115 88 // move back to pending 89 + state.db.update_count("pending", 1); 116 90 if let Err(e) = db.pending.insert(key, Vec::new()) { 117 91 error!("failed to move {did} to pending: {e}"); 118 92 db::check_poisoned(&e); 119 93 continue; 120 94 } 121 95 122 - // queue 123 - if let Err(e) = state.backfill_tx.send(did.clone()) { 124 - error!("failed to queue retry for {did}: {e}"); 125 - } else { 126 - count += 1; 127 - } 96 + state.notify_backfill(); 97 + count += 1; 128 98 } 129 99 } 130 100 }
+96 -30
src/backfill/mod.rs
··· 1 1 use crate::db::types::{DbAction, DbRkey, DbTid, TrimmedDid}; 2 2 use crate::db::{self, Db, keys, ser_repo_state}; 3 3 use crate::ops; 4 - use crate::state::{AppState, BackfillRx}; 4 + use crate::state::AppState; 5 5 use crate::types::{AccountEvt, BroadcastEvent, RepoState, RepoStatus, ResyncState, StoredEvent}; 6 - use futures::TryFutureExt; 6 + 7 7 use jacquard::api::com_atproto::sync::get_repo::{GetRepo, GetRepoError}; 8 8 use jacquard::error::{ClientError, ClientErrorKind}; 9 9 use jacquard::types::cid::Cid; ··· 24 24 25 25 pub mod manager; 26 26 27 + use crate::ingest::{BufferTx, IngestMessage}; 28 + 27 29 pub struct BackfillWorker { 28 30 state: Arc<AppState>, 29 - rx: BackfillRx, 31 + buffer_tx: BufferTx, 30 32 http: reqwest::Client, 31 33 semaphore: Arc<Semaphore>, 32 34 verify_signatures: bool, 35 + in_flight: Arc<scc::HashSet<Did<'static>>>, 33 36 } 34 37 35 38 impl BackfillWorker { 36 39 pub fn new( 37 40 state: Arc<AppState>, 38 - rx: BackfillRx, 41 + buffer_tx: BufferTx, 39 42 timeout: Duration, 40 43 concurrency_limit: usize, 41 44 verify_signatures: bool, 42 45 ) -> Self { 43 46 Self { 44 47 state, 45 - rx, 48 + buffer_tx, 46 49 http: reqwest::Client::builder() 47 50 .timeout(timeout) 48 51 .zstd(true) ··· 52 55 .expect("failed to build http client"), 53 56 semaphore: Arc::new(Semaphore::new(concurrency_limit)), 54 57 verify_signatures, 58 + in_flight: Arc::new(scc::HashSet::new()), 55 59 } 56 60 } 61 + } 57 62 58 - pub async fn run(mut self) { 63 + struct InFlightGuard { 64 + did: Did<'static>, 65 + set: Arc<scc::HashSet<Did<'static>>>, 66 + } 67 + 68 + impl Drop for InFlightGuard { 69 + fn drop(&mut self) { 70 + let _ = self.set.remove_sync(&self.did); 71 + } 72 + } 73 + 74 + impl BackfillWorker { 75 + pub async fn run(self) { 59 76 info!("backfill worker started"); 60 - while let Some(did) = self.rx.recv().await { 61 - let permit = self 62 - .semaphore 63 - .clone() 64 - .acquire_owned() 65 - .await 66 - .expect("semaphore closed"); 77 + loop { 78 + let mut spawned = 0; 79 + 80 + for guard in self.state.db.pending.iter() { 81 + let key = match guard.key() { 82 + Ok(k) => k, 83 + Err(e) => { 84 + error!("failed to read pending key: {e}"); 85 + db::check_poisoned(&e); 86 + continue; 87 + } 88 + }; 67 89 68 - tokio::spawn( 69 - Self::process_did_wrapper( 70 - self.state.clone(), 71 - self.http.clone(), 72 - did.clone(), 73 - permit, 74 - self.verify_signatures, 75 - ) 76 - .inspect_err(move |e| { 77 - error!("backfill process failed for {did}: {e}"); 78 - db::check_poisoned_report(e); 79 - }), 80 - ); 90 + let did = match TrimmedDid::try_from(key.as_ref()) { 91 + Ok(d) => d.to_did(), 92 + Err(e) => { 93 + error!("invalid did in pending: {e}"); 94 + continue; 95 + } 96 + }; 97 + 98 + if self.in_flight.contains_sync(&did) { 99 + continue; 100 + } 101 + let _ = self.in_flight.insert_sync(did.clone().into_static()); 102 + 103 + let permit = match self.semaphore.clone().try_acquire_owned() { 104 + Ok(p) => p, 105 + Err(_) => break, 106 + }; 107 + 108 + let guard = InFlightGuard { 109 + did: did.clone().into_static(), 110 + set: self.in_flight.clone(), 111 + }; 112 + 113 + let state = self.state.clone(); 114 + let http = self.http.clone(); 115 + let buffer_tx_clone = self.buffer_tx.clone(); 116 + let did_clone = did.clone(); 117 + let verify = self.verify_signatures; 118 + 119 + tokio::spawn(async move { 120 + let _guard = guard; 121 + Self::process_did_wrapper( 122 + state, 123 + http, 124 + buffer_tx_clone, 125 + did_clone.clone(), 126 + permit, 127 + verify, 128 + ) 129 + .await 130 + .inspect_err(move |e| { 131 + error!("backfill process failed for {did_clone}: {e}"); 132 + db::check_poisoned_report(e); 133 + }) 134 + }); 135 + 136 + spawned += 1; 137 + } 138 + 139 + if spawned == 0 { 140 + self.state.backfill_notify.notified().await; 141 + } 81 142 } 82 143 } 83 144 84 145 async fn process_did_wrapper( 85 146 state: Arc<AppState>, 86 147 http: reqwest::Client, 148 + buffer_tx: BufferTx, 87 149 did: Did<'static>, 88 150 _permit: tokio::sync::OwnedSemaphorePermit, 89 151 verify_signatures: bool, ··· 132 194 }) 133 195 .await 134 196 .into_diagnostic()??; 197 + 198 + // Notify completion to worker shard 199 + if let Err(e) = buffer_tx.send(IngestMessage::BackfillFinished(did.clone())) { 200 + error!("failed to send BackfillFinished for {did}: {e}"); 201 + } 135 202 } 136 203 Err(e) => { 137 204 let mut was_ratelimited = false; ··· 232 299 } 233 300 } 234 301 235 - // unblock buffer processing for this DID 236 - state.blocked_dids.remove_async(&did).await; 302 + // wake worker to pick up more 303 + state.backfill_notify.notify_one(); 237 304 Ok(()) 238 305 } 239 306 ··· 512 579 count += 1; 513 580 } 514 581 515 - // 6. update status to synced 516 - state.status = RepoStatus::Synced; 582 + // 6. update data, status is updated in worker shard 517 583 state.rev = Some(rev.clone().into()); 518 584 state.data = Some(root_commit.data); 519 585 state.last_updated_at = chrono::Utc::now().timestamp();
+3 -4
src/crawler/mod.rs
··· 1 1 use crate::db::{Db, keys, ser_repo_state}; 2 - use crate::ops::send_backfill_req; 3 2 use crate::state::AppState; 4 3 use crate::types::RepoState; 5 4 use jacquard::api::com_atproto::sync::list_repos::{ListRepos, ListReposOutput}; ··· 120 119 self.state.db.update_count_async("pending", count).await; 121 120 } 122 121 123 - // 5. queue for backfill 124 - for did in to_queue { 125 - send_backfill_req(&self.state, did)?; 122 + // 5. notify backfill worker 123 + if !to_queue.is_empty() { 124 + self.state.notify_backfill(); 126 125 } 127 126 128 127 if cursor.is_none() {
+4 -4
src/ingest/firehose.rs
··· 1 1 use crate::db::{self, Db, keys}; 2 - use crate::ingest::BufferTx; 2 + use crate::ingest::{BufferTx, IngestMessage}; 3 3 use crate::state::AppState; 4 4 use jacquard::api::com_atproto::sync::subscribe_repos::{SubscribeRepos, SubscribeReposMessage}; 5 5 use jacquard::types::did::Did; ··· 17 17 buffer_tx: BufferTx, 18 18 relay_host: Url, 19 19 full_network: bool, 20 - verify_signatures: bool, 20 + _verify_signatures: bool, 21 21 } 22 22 23 23 impl FirehoseIngestor { ··· 33 33 buffer_tx, 34 34 relay_host, 35 35 full_network, 36 - verify_signatures, 36 + _verify_signatures: verify_signatures, 37 37 } 38 38 } 39 39 ··· 114 114 // }); 115 115 // } 116 116 117 - if let Err(e) = self.buffer_tx.send(msg) { 117 + if let Err(e) = self.buffer_tx.send(IngestMessage::Firehose(msg)) { 118 118 error!("failed to send message to buffer processor: {e}"); 119 119 } 120 120 }
+9 -1
src/ingest/mod.rs
··· 4 4 pub mod firehose; 5 5 pub mod worker; 6 6 7 - pub type BufferedMessage = SubscribeReposMessage<'static>; 7 + use jacquard::types::did::Did; 8 + 9 + #[derive(Debug)] 10 + pub enum IngestMessage { 11 + Firehose(SubscribeReposMessage<'static>), 12 + BackfillFinished(Did<'static>), 13 + } 14 + 15 + pub type BufferedMessage = IngestMessage; 8 16 9 17 pub type BufferTx = mpsc::UnboundedSender<BufferedMessage>; 10 18 #[allow(dead_code)]
+405 -248
src/ingest/worker.rs
··· 1 1 use crate::db::{self, keys}; 2 - use crate::ingest::BufferedMessage; 3 - use crate::ops::{self, send_backfill_req}; 2 + use crate::ingest::{BufferedMessage, IngestMessage}; 3 + use crate::ops; 4 4 use crate::resolver::NoSigningKeyError; 5 5 use crate::state::AppState; 6 6 use crate::types::{AccountEvt, BroadcastEvent, IdentityEvt, RepoState, RepoStatus}; 7 7 use jacquard::api::com_atproto::sync::subscribe_repos::SubscribeReposMessage; 8 8 9 9 use fjall::OwnedWriteBatch; 10 - use futures::future::join_all; 10 + 11 11 use jacquard::cowstr::ToCowStr; 12 + use jacquard::types::crypto::PublicKey; 12 13 use jacquard::types::did::Did; 14 + use jacquard_api::com_atproto::sync::subscribe_repos::Commit; 13 15 use jacquard_common::IntoStatic; 14 - use jacquard_common::types::crypto::PublicKey; 15 16 use jacquard_repo::error::CommitError; 16 - use miette::{Diagnostic, IntoDiagnostic, Result}; 17 + use miette::{Context, Diagnostic, IntoDiagnostic, Result}; 17 18 use smol_str::ToSmolStr; 18 - use std::collections::{HashMap, HashSet}; 19 + use std::collections::{HashMap, HashSet, hash_map::DefaultHasher}; 20 + use std::hash::{Hash, Hasher}; 19 21 use std::sync::Arc; 20 - use std::time::Duration; 21 22 use tokio::sync::mpsc; 22 - use tracing::{debug, error, trace, warn}; 23 + use tracing::{debug, error, info, trace, warn}; 23 24 24 25 #[derive(Debug)] 25 26 struct KeyFetchError(miette::Report); ··· 70 71 } 71 72 } 72 73 73 - #[derive(Debug, Clone, Copy)] 74 - enum ProcessResult { 74 + #[derive(Debug)] 75 + enum RepoProcessResult<'s, 'c> { 75 76 Deleted, 76 - Ok, 77 - } 78 - 79 - enum RepoCheckResult { 80 - Syncing, 81 - Ok(RepoState<'static>), 77 + Syncing(Option<&'c Commit<'c>>), 78 + Ok(RepoState<'s>), 82 79 } 83 80 84 81 pub struct FirehoseWorker { 85 82 state: Arc<AppState>, 86 83 rx: mpsc::UnboundedReceiver<BufferedMessage>, 87 84 verify_signatures: bool, 85 + num_shards: usize, 86 + } 87 + 88 + struct WorkerContext<'a> { 89 + verify_signatures: bool, 90 + state: &'a AppState, 91 + repo_cache: &'a mut HashMap<Did<'static>, RepoState<'static>>, 92 + batch: &'a mut OwnedWriteBatch, 93 + added_blocks: &'a mut i64, 94 + records_delta: &'a mut i64, 95 + broadcast_events: &'a mut Vec<BroadcastEvent>, 96 + handle: &'a tokio::runtime::Handle, 88 97 } 89 98 90 99 impl FirehoseWorker { ··· 97 106 state, 98 107 rx, 99 108 verify_signatures, 109 + num_shards: 64, 100 110 } 101 111 } 102 112 113 + // starts the worker threads and the main dispatch loop 114 + // the dispatch loop reads from the firehose channel and distributes messages to shards 115 + // based on the consistent hash of the DID 103 116 pub fn run(mut self, handle: tokio::runtime::Handle) -> Result<()> { 104 - const BUF_SIZE: usize = 500; 105 - let mut buf = Vec::<BufferedMessage>::with_capacity(BUF_SIZE); 106 - let mut failed = Vec::<BufferedMessage>::new(); 117 + let mut shards = Vec::with_capacity(self.num_shards); 118 + 119 + for i in 0..self.num_shards { 120 + let (tx, rx) = mpsc::unbounded_channel(); 121 + shards.push(tx); 122 + 123 + let state = self.state.clone(); 124 + let verify = self.verify_signatures; 125 + let handle = handle.clone(); 126 + 127 + std::thread::Builder::new() 128 + .name(format!("ingest-shard-{}", i)) 129 + .spawn(move || { 130 + Self::worker_thread(i, rx, state, verify, handle); 131 + }) 132 + .into_diagnostic()?; 133 + } 134 + 135 + info!("started {} ingest shards", self.num_shards); 107 136 108 137 let _g = handle.enter(); 138 + 139 + // dispatch loop 140 + while let Some(msg) = self.rx.blocking_recv() { 141 + let did = match &msg { 142 + IngestMessage::Firehose(m) => match m { 143 + SubscribeReposMessage::Commit(c) => &c.repo, 144 + SubscribeReposMessage::Identity(i) => &i.did, 145 + SubscribeReposMessage::Account(a) => &a.did, 146 + SubscribeReposMessage::Sync(s) => &s.did, 147 + _ => continue, 148 + }, 149 + IngestMessage::BackfillFinished(did) => did, 150 + }; 151 + 152 + let mut hasher = DefaultHasher::new(); 153 + did.hash(&mut hasher); 154 + let hash = hasher.finish(); 155 + let shard_idx = (hash as usize) % self.num_shards; 156 + 157 + if let Err(e) = shards[shard_idx].send(msg) { 158 + error!("failed to send message to shard {shard_idx}: {e}"); 159 + // break if send fails; receiver likely closed 160 + break; 161 + } 162 + } 163 + 164 + error!("firehose worker dispatcher shutting down"); 165 + 166 + Ok(()) 167 + } 168 + 169 + // synchronous worker loop running on a dedicated thread 170 + // pulls messages from the channel, builds batches, and processes them 171 + // enters the tokio runtime only when necessary (key resolution) 172 + fn worker_thread( 173 + id: usize, 174 + mut rx: mpsc::UnboundedReceiver<BufferedMessage>, 175 + state: Arc<AppState>, 176 + verify_signatures: bool, 177 + handle: tokio::runtime::Handle, 178 + ) { 179 + let _guard = handle.enter(); 180 + debug!("shard {id} started"); 181 + 109 182 let mut repo_cache = HashMap::new(); 110 183 let mut deleted = HashSet::new(); 111 - let mut broadcast_events = Vec::<BroadcastEvent>::with_capacity(BUF_SIZE); 184 + let mut broadcast_events = Vec::new(); 112 185 113 - loop { 114 - let mut batch = self.state.db.inner.batch(); 186 + while let Some(msg) = rx.blocking_recv() { 187 + let mut batch = state.db.inner.batch(); 115 188 repo_cache.clear(); 116 189 deleted.clear(); 117 190 broadcast_events.clear(); 118 191 119 - // resolve signing keys for commits and syncs if verification is enabled 120 - let keys = if self.verify_signatures { 121 - let dids: HashSet<Did> = buf 122 - .iter() 123 - .filter_map(|msg| match msg { 124 - SubscribeReposMessage::Commit(c) => Some(c.repo.clone()), 125 - SubscribeReposMessage::Sync(s) => Some(s.did.clone()), 126 - _ => None, 127 - }) 128 - .collect(); 192 + let mut added_blocks = 0; 193 + let mut records_delta = 0; 129 194 130 - let futures = dids.into_iter().map(|did| async { 131 - let res = self.state.resolver.resolve_signing_key(&did).await; 132 - (did, res) 133 - }); 195 + let mut ctx = WorkerContext { 196 + state: &state, 197 + repo_cache: &mut repo_cache, 198 + batch: &mut batch, 199 + added_blocks: &mut added_blocks, 200 + records_delta: &mut records_delta, 201 + broadcast_events: &mut broadcast_events, 202 + handle: &handle, 203 + verify_signatures, 204 + }; 134 205 135 - handle.block_on(join_all(futures)).into_iter().collect() 136 - } else { 137 - HashMap::new() 138 - }; 206 + match msg { 207 + IngestMessage::BackfillFinished(did) => { 208 + debug!("backfill finished for {did}, verifying state and draining buffer"); 139 209 140 - let mut added_blocks = 0; 141 - let mut records_delta = 0; 142 - for msg in buf.drain(..) { 143 - let (did, seq) = match &msg { 144 - SubscribeReposMessage::Commit(c) => (&c.repo, c.seq), 145 - SubscribeReposMessage::Identity(i) => (&i.did, i.seq), 146 - SubscribeReposMessage::Account(a) => (&a.did, a.seq), 147 - SubscribeReposMessage::Sync(s) => (&s.did, s.seq), 148 - _ => continue, 149 - }; 210 + // load repo state to transition status and draining buffer 211 + let repo_key = keys::repo_key(&did); 212 + if let Ok(Some(state_bytes)) = state.db.repos.get(&repo_key).into_diagnostic() { 213 + match crate::db::deser_repo_state(&state_bytes) { 214 + Ok(repo_state) => { 215 + let repo_state = repo_state.into_static(); 150 216 151 - if self.state.blocked_dids.contains_sync(did) { 152 - failed.push(msg); 153 - continue; 154 - } 155 - if deleted.contains(did) { 156 - continue; 217 + match Self::drain_resync_buffer(&mut ctx, &did, repo_state) { 218 + Ok(res) => match res { 219 + RepoProcessResult::Ok(s) => { 220 + // TODO: there might be a race condition here where we get a new commit 221 + // while the resync buffer is being drained, we should handle that probably 222 + // but also it should still be fine since we'll sync eventually anyway 223 + match ops::update_repo_status( 224 + &mut batch, 225 + &state.db, 226 + &did, 227 + s, 228 + RepoStatus::Synced, 229 + ) { 230 + Ok(s) => { 231 + repo_cache.insert(did.clone(), s.into_static()); 232 + } 233 + Err(e) => { 234 + // this can only fail if serde retry fails which would be really weird 235 + error!( 236 + "failed to transition {did} to synced: {e}" 237 + ); 238 + } 239 + } 240 + } 241 + RepoProcessResult::Deleted => { 242 + deleted.insert(did.clone()); 243 + } 244 + // we don't have to handle this since drain_resync_buffer doesn't delete 245 + // the commits from the resync buffer so they will get retried later 246 + RepoProcessResult::Syncing(_) => {} 247 + }, 248 + Err(e) => { 249 + error!("failed to drain resync buffer for {did}: {e}") 250 + } 251 + }; 252 + } 253 + Err(e) => error!("failed to deser repo state for {did}: {e}"), 254 + } 255 + } 157 256 } 257 + IngestMessage::Firehose(msg) => { 258 + let (did, seq) = match &msg { 259 + SubscribeReposMessage::Commit(c) => (&c.repo, c.seq), 260 + SubscribeReposMessage::Identity(i) => (&i.did, i.seq), 261 + SubscribeReposMessage::Account(a) => (&a.did, a.seq), 262 + SubscribeReposMessage::Sync(s) => (&s.did, s.seq), 263 + _ => continue, 264 + }; 158 265 159 - match self.process_message( 160 - &mut repo_cache, 161 - &mut batch, 162 - &mut added_blocks, 163 - &mut records_delta, 164 - &mut broadcast_events, 165 - &msg, 166 - did, 167 - &keys, 168 - ) { 169 - Ok(ProcessResult::Ok) => {} 170 - Ok(ProcessResult::Deleted) => { 171 - deleted.insert(did.clone()); 266 + if deleted.contains(did) { 267 + continue; 172 268 } 173 - Err(e) => { 174 - error!("error processing message for {did}: {e}"); 175 - db::check_poisoned_report(&e); 176 - // dont retry commit or sync on key fetch errors 177 - // since we'll just try again later if we get commit or sync again 178 - if e.downcast_ref::<KeyFetchError>().is_none() 179 - && e.downcast_ref::<CommitError>().is_none() 180 - && e.downcast_ref::<NoSigningKeyError>().is_none() 181 - { 182 - failed.push(msg); 269 + 270 + match Self::process_message(&mut ctx, &msg, did) { 271 + Ok(RepoProcessResult::Ok(_)) => {} 272 + Ok(RepoProcessResult::Deleted) => { 273 + deleted.insert(did.clone()); 274 + } 275 + Ok(RepoProcessResult::Syncing(Some(commit))) => { 276 + if let Err(e) = ops::persist_to_resync_buffer(&state.db, did, commit) { 277 + error!("failed to persist commit to resync_buffer for {did}: {e}"); 278 + } 279 + } 280 + Ok(RepoProcessResult::Syncing(None)) => {} 281 + Err(e) => { 282 + error!("error processing message for {did}: {e}"); 283 + db::check_poisoned_report(&e); 284 + if Self::check_if_retriable_failure(&e) { 285 + if let SubscribeReposMessage::Commit(commit) = &msg { 286 + if let Err(e) = 287 + ops::persist_to_resync_buffer(&state.db, did, commit) 288 + { 289 + error!( 290 + "failed to persist commit to resync_buffer for {did}: {e}" 291 + ); 292 + } 293 + } 294 + } 183 295 } 184 296 } 185 - } 186 297 187 - self.state 188 - .cur_firehose 189 - .store(seq, std::sync::atomic::Ordering::SeqCst); 298 + state 299 + .cur_firehose 300 + .store(seq, std::sync::atomic::Ordering::SeqCst); 301 + } 190 302 } 191 303 192 - // commit all changes to db 193 - batch.commit().into_diagnostic()?; 304 + if let Err(e) = batch.commit() { 305 + error!("failed to commit batch in shard {id}: {e}"); 306 + } 194 307 195 308 if added_blocks > 0 { 196 - self.state.db.update_count("blocks", added_blocks); 309 + state.db.update_count("blocks", added_blocks); 197 310 } 198 311 if records_delta != 0 { 199 - self.state.db.update_count("records", records_delta); 312 + state.db.update_count("records", records_delta); 200 313 } 201 314 for evt in broadcast_events.drain(..) { 202 - let _ = self.state.db.event_tx.send(evt); 203 - } 204 - 205 - self.state 206 - .db 207 - .inner 208 - .persist(fjall::PersistMode::Buffer) 209 - .into_diagnostic()?; 210 - 211 - // add failed back to buf here so the ordering is preserved 212 - if !failed.is_empty() { 213 - buf.append(&mut failed); 315 + let _ = state.db.event_tx.send(evt); 214 316 } 215 317 216 - // wait until we receive some messages 217 - // this does mean we will have an up to 1 second delay, before we send events to consumers 218 - // but thats reasonable imo, could also be configured of course 219 - let _ = handle.block_on(async { 220 - tokio::time::timeout( 221 - Duration::from_secs(1), 222 - self.rx.recv_many(&mut buf, BUF_SIZE), 223 - ) 224 - .await 225 - }); 226 - if buf.is_empty() { 227 - if self.rx.is_closed() { 228 - error!("ingestor crashed? shutting down buffer processor"); 229 - break; 230 - } 231 - continue; 232 - } 318 + state.db.inner.persist(fjall::PersistMode::Buffer).ok(); 233 319 } 320 + } 234 321 235 - Ok(()) 322 + // dont retry commit or sync on key fetch errors 323 + // since we'll just try again later if we get commit or sync again 324 + fn check_if_retriable_failure(e: &miette::Report) -> bool { 325 + e.downcast_ref::<KeyFetchError>().is_none() 326 + && e.downcast_ref::<CommitError>().is_none() 327 + && e.downcast_ref::<NoSigningKeyError>().is_none() 236 328 } 237 329 238 - fn process_message( 239 - &self, 240 - repo_cache: &mut HashMap<Did<'static>, RepoState<'static>>, 241 - batch: &mut OwnedWriteBatch, 242 - added_blocks: &mut i64, 243 - records_delta: &mut i64, 244 - broadcast_events: &mut Vec<BroadcastEvent>, 245 - msg: &BufferedMessage, 330 + fn process_message<'s, 'c>( 331 + ctx: &mut WorkerContext, 332 + msg: &'c SubscribeReposMessage<'static>, 246 333 did: &Did, 247 - keys: &HashMap<Did<'static>, Result<PublicKey<'static>>>, 248 - ) -> Result<ProcessResult> { 249 - let state = &self.state; 250 - let verify_signatures = self.verify_signatures; 251 - 252 - let RepoCheckResult::Ok(repo_state) = 253 - Self::check_repo_state(repo_cache, batch, state, did, msg)? 254 - else { 255 - return Ok(ProcessResult::Ok); 256 - }; 257 - 258 - let get_key = || { 259 - if verify_signatures { 260 - let key = keys.get(did).ok_or_else(|| { 261 - KeyFetchError(miette::miette!( 262 - "!!! THIS IS A BUG !!! missing pubkey for {did}" 263 - )) 264 - })?; 265 - match key { 266 - Ok(key) => Ok(Some(key)), 267 - Err(e) => { 268 - return Err(KeyFetchError(miette::miette!( 269 - "failed to get pubkey for {did}: {e}" 270 - ))); 271 - } 272 - } 273 - } else { 274 - Ok(None) 334 + ) -> Result<RepoProcessResult<'s, 'c>> { 335 + let check_repo_res = Self::check_repo_state(ctx, did, msg)?; 336 + let mut repo_state = match check_repo_res { 337 + RepoProcessResult::Syncing(_) | RepoProcessResult::Deleted => { 338 + return Ok(check_repo_res); 275 339 } 340 + RepoProcessResult::Ok(s) => s, 276 341 }; 277 342 278 343 match msg { 279 344 SubscribeReposMessage::Commit(commit) => { 280 345 trace!("processing buffered commit for {did}"); 281 346 282 - if matches!(repo_state.rev, Some(ref rev) if commit.rev.as_str() <= rev.to_tid().as_str()) 283 - { 284 - debug!( 285 - "skipping replayed event for {}: {} <= {}", 286 - did, 287 - commit.rev, 288 - repo_state 289 - .rev 290 - .as_ref() 291 - .map(|r| r.to_tid()) 292 - .expect("we checked in if") 293 - ); 294 - return Ok(ProcessResult::Ok); 295 - } 296 - 297 - if let (Some(repo), Some(prev_commit)) = (&repo_state.data, &commit.prev_data) 298 - && repo != &prev_commit.0.to_ipld().expect("valid cid") 299 - { 300 - warn!( 301 - "gap detected for {}: repo {} != commit prev {}. triggering backfill", 302 - did, repo, prev_commit.0 303 - ); 304 - 305 - let mut batch = state.db.inner.batch(); 306 - ops::update_repo_status( 307 - &mut batch, 308 - &state.db, 309 - did, 310 - repo_state, 311 - RepoStatus::Backfilling, 312 - )?; 313 - batch.commit().into_diagnostic()?; 314 - send_backfill_req(state, did.clone().into_static())?; 315 - 316 - return Ok(ProcessResult::Ok); 317 - } 318 - 319 - let res = ops::apply_commit(batch, &state.db, repo_state, &commit, get_key()?)?; 320 - repo_cache.insert(did.clone().into_static(), res.repo_state); 321 - *added_blocks += res.blocks_count; 322 - *records_delta += res.records_delta; 323 - broadcast_events.push(BroadcastEvent::Persisted( 324 - self.state 325 - .db 326 - .next_event_id 327 - .load(std::sync::atomic::Ordering::SeqCst) 328 - - 1, 329 - )); 347 + return Self::process_commit(ctx, did, repo_state, commit); 330 348 } 331 349 SubscribeReposMessage::Sync(sync) => { 332 350 debug!("processing buffered sync for {did}"); 333 351 334 - match ops::verify_sync_event(sync.blocks.as_ref(), get_key()?) { 352 + match ops::verify_sync_event( 353 + sync.blocks.as_ref(), 354 + Self::fetch_key(ctx, did)?.as_ref(), 355 + ) { 335 356 Ok((root, rev)) => { 336 357 if let Some(current_data) = &repo_state.data { 337 358 if current_data == &root.to_ipld().expect("valid cid") { 338 359 debug!("skipping noop sync for {did}"); 339 - return Ok(ProcessResult::Ok); 360 + return Ok(RepoProcessResult::Ok(repo_state)); 340 361 } 341 362 } 342 363 343 364 if let Some(current_rev) = &repo_state.rev { 344 365 if rev.as_str() <= current_rev.to_tid().as_str() { 345 366 debug!("skipping replayed sync for {did}"); 346 - return Ok(ProcessResult::Ok); 367 + return Ok(RepoProcessResult::Ok(repo_state)); 347 368 } 348 369 } 349 370 350 371 warn!("sync event for {did}: triggering backfill"); 351 - let mut batch = state.db.inner.batch(); 352 - ops::update_repo_status( 372 + let mut batch = ctx.state.db.inner.batch(); 373 + repo_state = ops::update_repo_status( 353 374 &mut batch, 354 - &state.db, 375 + &ctx.state.db, 355 376 did, 356 377 repo_state, 357 378 RepoStatus::Backfilling, 358 379 )?; 380 + ctx.state.db.update_count("pending", 1); 359 381 batch.commit().into_diagnostic()?; 360 - 361 - send_backfill_req(state, did.clone().into_static())?; 362 - return Ok(ProcessResult::Ok); 382 + ctx.state.notify_backfill(); 383 + return Ok(RepoProcessResult::Ok(repo_state)); 363 384 } 364 385 Err(e) => { 365 386 error!("failed to process sync event for {did}: {e}"); ··· 377 398 did: did.clone().into_static(), 378 399 handle, 379 400 }; 380 - broadcast_events.push(ops::make_identity_event(&state.db, evt)); 401 + ctx.broadcast_events 402 + .push(ops::make_identity_event(&ctx.state.db, evt)); 381 403 } 382 404 SubscribeReposMessage::Account(account) => { 383 405 debug!("processing buffered account for {did}"); ··· 392 414 match &account.status { 393 415 Some(AccountStatus::Deleted) => { 394 416 debug!("account {did} deleted, wiping data"); 395 - ops::delete_repo(batch, &state.db, did)?; 396 - return Ok(ProcessResult::Deleted); 417 + ops::delete_repo(ctx.batch, &ctx.state.db, did)?; 418 + return Ok(RepoProcessResult::Deleted); 397 419 } 398 420 status => { 399 421 let target_status = match status { ··· 425 447 426 448 if repo_state.status == target_status { 427 449 debug!("account status unchanged for {did}: {target_status:?}"); 428 - return Ok(ProcessResult::Ok); 450 + return Ok(RepoProcessResult::Ok(repo_state)); 429 451 } 430 452 431 - let new_state = ops::update_repo_status( 432 - batch, 433 - &state.db, 453 + repo_state = ops::update_repo_status( 454 + ctx.batch, 455 + &ctx.state.db, 434 456 did, 435 457 repo_state, 436 458 target_status, 437 459 )?; 438 - repo_cache.insert(did.clone().into_static(), new_state); 460 + ctx.repo_cache.insert( 461 + did.clone().into_static(), 462 + repo_state.clone().into_static(), 463 + ); 439 464 } 440 465 } 441 466 } else { ··· 444 469 // 1. we handle changing repo status to Synced before this (in check repo state) 445 470 // 2. initiating backfilling is also handled there 446 471 } 447 - 448 - broadcast_events.push(ops::make_account_event(&state.db, evt)); 472 + ctx.broadcast_events 473 + .push(ops::make_account_event(&ctx.state.db, evt)); 449 474 } 450 475 _ => { 451 476 warn!("unknown message type in buffer for {did}"); 452 477 } 453 478 } 454 479 455 - Ok(ProcessResult::Ok) 480 + Ok(RepoProcessResult::Ok(repo_state)) 456 481 } 457 482 458 - fn check_repo_state( 459 - repo_cache: &mut HashMap<Did<'static>, RepoState<'static>>, 460 - batch: &mut OwnedWriteBatch, 461 - state: &AppState, 483 + fn process_commit<'c, 'ns, 's: 'ns>( 484 + ctx: &mut WorkerContext, 485 + did: &Did, 486 + repo_state: RepoState<'s>, 487 + commit: &'c Commit<'c>, 488 + ) -> Result<RepoProcessResult<'ns, 'c>> { 489 + // check for replayed events (already seen revision) 490 + if matches!(repo_state.rev, Some(ref rev) if commit.rev.as_str() <= rev.to_tid().as_str()) { 491 + debug!( 492 + "skipping replayed event for {}: {} <= {}", 493 + did, 494 + commit.rev, 495 + repo_state 496 + .rev 497 + .as_ref() 498 + .map(|r| r.to_tid()) 499 + .expect("we checked in if") 500 + ); 501 + return Ok(RepoProcessResult::Ok(repo_state)); 502 + } 503 + 504 + if let (Some(repo), Some(prev_commit)) = (&repo_state.data, &commit.prev_data) 505 + && repo 506 + != &prev_commit 507 + .0 508 + .to_ipld() 509 + .into_diagnostic() 510 + .wrap_err("invalid cid from relay")? 511 + { 512 + warn!( 513 + "gap detected for {}: repo {} != commit prev {}. triggering backfill", 514 + did, repo, prev_commit.0 515 + ); 516 + 517 + let mut batch = ctx.state.db.inner.batch(); 518 + let repo_state = ops::update_repo_status( 519 + &mut batch, 520 + &ctx.state.db, 521 + did, 522 + repo_state, 523 + RepoStatus::Backfilling, 524 + )?; 525 + ctx.state.db.update_count("pending", 1); 526 + batch.commit().into_diagnostic()?; 527 + ctx.repo_cache 528 + .insert(did.clone().into_static(), repo_state.clone().into_static()); 529 + ctx.state.notify_backfill(); 530 + return Ok(RepoProcessResult::Syncing(Some(commit))); 531 + } 532 + 533 + let res = ops::apply_commit( 534 + ctx.batch, 535 + &ctx.state.db, 536 + repo_state, 537 + &commit, 538 + Self::fetch_key(ctx, did)?.as_ref(), 539 + )?; 540 + let repo_state = res.repo_state; 541 + ctx.repo_cache 542 + .insert(did.clone().into_static(), repo_state.clone().into_static()); 543 + *ctx.added_blocks += res.blocks_count; 544 + *ctx.records_delta += res.records_delta; 545 + ctx.broadcast_events.push(BroadcastEvent::Persisted( 546 + ctx.state 547 + .db 548 + .next_event_id 549 + .load(std::sync::atomic::Ordering::SeqCst) 550 + - 1, 551 + )); 552 + 553 + Ok(RepoProcessResult::Ok(repo_state)) 554 + } 555 + 556 + // checks the current state of the repo in the database 557 + // if the repo is new, creates initial state and triggers backfill 558 + // handles transitions between states (backfilling -> synced, etc) 559 + fn check_repo_state<'s, 'c>( 560 + ctx: &mut WorkerContext, 462 561 did: &Did<'_>, 463 - msg: &BufferedMessage, 464 - ) -> Result<RepoCheckResult> { 562 + msg: &'c SubscribeReposMessage<'static>, 563 + ) -> Result<RepoProcessResult<'s, 'c>> { 465 564 // check if we have this repo 466 - if let Some(state) = repo_cache.get(did) { 467 - return Ok(RepoCheckResult::Ok(state.clone())); 565 + if let Some(state) = ctx.repo_cache.get(did) { 566 + return Ok(RepoProcessResult::Ok(state.clone())); 468 567 } 469 568 470 569 let repo_key = keys::repo_key(&did); 471 - let Some(state_bytes) = state.db.repos.get(&repo_key).into_diagnostic()? else { 570 + let Some(state_bytes) = ctx.state.db.repos.get(&repo_key).into_diagnostic()? else { 472 571 // we don't know this repo, but we are receiving events for it 473 572 // this means we should backfill it before processing its events 474 573 debug!("discovered new account {did} from firehose, queueing backfill"); ··· 477 576 // using a separate batch here since we want to make it known its being backfilled 478 577 // immediately. we could use the batch for the unit of work we are doing but 479 578 // then we wouldn't be able to start backfilling until the unit of work is done 480 - let mut batch = state.db.inner.batch(); 481 579 580 + let mut batch = ctx.state.db.inner.batch(); 482 581 batch.insert( 483 - &state.db.repos, 582 + &ctx.state.db.repos, 484 583 &repo_key, 485 584 crate::db::ser_repo_state(&new_state)?, 486 585 ); 487 - batch.insert(&state.db.pending, &repo_key, &[]); 586 + batch.insert(&ctx.state.db.pending, &repo_key, &[]); 587 + ctx.state.db.update_count("repos", 1); 588 + ctx.state.db.update_count("pending", 1); 488 589 batch.commit().into_diagnostic()?; 489 590 490 - send_backfill_req(state, did.clone().into_static())?; 591 + ctx.state.notify_backfill(); 491 592 492 - return Ok(RepoCheckResult::Syncing); 593 + return Ok(RepoProcessResult::Syncing(None)); 493 594 }; 494 595 let mut repo_state = crate::db::deser_repo_state(&state_bytes)?.into_static(); 495 596 496 597 // if we are backfilling or it is new, DON'T mark it as synced yet 497 598 // the backfill worker will do that when it finishes 498 599 match &repo_state.status { 499 - RepoStatus::Synced => Ok(RepoCheckResult::Ok(repo_state)), 600 + RepoStatus::Synced => { 601 + // lazy drain: if there are buffered commits, drain them now 602 + if ops::has_buffered_commits(&ctx.state.db, did) { 603 + Self::drain_resync_buffer(ctx, did, repo_state) 604 + } else { 605 + Ok(RepoProcessResult::Ok(repo_state)) 606 + } 607 + } 500 608 RepoStatus::Backfilling | RepoStatus::Error(_) => { 501 - // repo is being backfilled or is in error state 502 - // we dont touch the state because the backfill worker will do that 503 - // we should not really get here because the backfill worker should have marked it as 504 - // being worked on (blocked repos) meaning we would have returned earlier 505 609 debug!( 506 610 "ignoring active status for {did} as it is {:?}", 507 611 repo_state.status 508 612 ); 509 - Ok(RepoCheckResult::Syncing) 613 + Ok(RepoProcessResult::Syncing(None)) 510 614 } 511 615 RepoStatus::Deactivated | RepoStatus::Suspended | RepoStatus::Takendown => { 512 616 // if it was in deactivated/takendown/suspended state, we can mark it as synced ··· 514 618 // UNLESS it is an account status event that keeps it deactivated 515 619 if let SubscribeReposMessage::Account(acc) = msg { 516 620 if !acc.active { 517 - return Ok(RepoCheckResult::Ok(repo_state)); 621 + return Ok(RepoProcessResult::Ok(repo_state)); 518 622 } 519 623 } 520 - 521 624 repo_state = ops::update_repo_status( 522 - batch, 523 - &state.db, 524 - &did, 625 + ctx.batch, 626 + &ctx.state.db, 627 + did, 525 628 repo_state, 526 629 RepoStatus::Synced, 527 630 )?; 528 - repo_cache.insert(did.clone().into_static(), repo_state.clone()); 529 - Ok(RepoCheckResult::Ok(repo_state)) 631 + ctx.repo_cache 632 + .insert(did.clone().into_static(), repo_state.clone()); 633 + Ok(RepoProcessResult::Ok(repo_state)) 634 + } 635 + } 636 + } 637 + 638 + fn drain_resync_buffer<'s>( 639 + ctx: &mut WorkerContext, 640 + did: &Did, 641 + mut repo_state: RepoState<'s>, 642 + ) -> Result<RepoProcessResult<'s, 'static>> { 643 + let prefix = keys::resync_buffer_prefix(did); 644 + 645 + for guard in ctx.state.db.resync_buffer.prefix(&prefix) { 646 + let (key, value) = guard.into_inner().into_diagnostic()?; 647 + let commit: Commit = rmp_serde::from_slice(&value).into_diagnostic()?; 648 + 649 + let res = Self::process_commit(ctx, did, repo_state, &commit); 650 + let res = match res { 651 + Ok(r) => r, 652 + Err(e) => { 653 + if !Self::check_if_retriable_failure(&e) { 654 + ctx.batch.remove(&ctx.state.db.resync_buffer, key); 655 + } 656 + return Err(e); 657 + } 658 + }; 659 + match res { 660 + RepoProcessResult::Ok(rs) => { 661 + ctx.batch.remove(&ctx.state.db.resync_buffer, key); 662 + repo_state = rs; 663 + } 664 + RepoProcessResult::Syncing(_) => { 665 + return Ok(RepoProcessResult::Syncing(None)); 666 + } 667 + RepoProcessResult::Deleted => { 668 + ctx.batch.remove(&ctx.state.db.resync_buffer, key); 669 + return Ok(RepoProcessResult::Deleted); 670 + } 530 671 } 672 + } 673 + 674 + Ok(RepoProcessResult::Ok(repo_state)) 675 + } 676 + 677 + fn fetch_key(ctx: &WorkerContext, did: &Did) -> Result<Option<PublicKey<'static>>> { 678 + if ctx.verify_signatures { 679 + let key = ctx 680 + .handle 681 + .block_on(ctx.state.resolver.resolve_signing_key(did)) 682 + .map_err(|e| { 683 + KeyFetchError(miette::miette!("failed to get pubkey for {did}: {e}")) 684 + })?; 685 + Ok(Some(key)) 686 + } else { 687 + Ok(None) 531 688 } 532 689 } 533 690 }
+19 -25
src/main.rs
··· 24 24 25 25 info!("{cfg}"); 26 26 27 - let (state, backfill_rx) = AppState::new(&cfg)?; 27 + let state = AppState::new(&cfg)?; 28 28 let (buffer_tx, buffer_rx) = mpsc::unbounded_channel(); 29 29 let state = Arc::new(state); 30 - 31 - tokio::spawn( 32 - api::serve(state.clone(), cfg.api_port).inspect_err(|e| error!("API server failed: {e}")), 33 - ); 34 - 35 - if cfg.enable_debug { 36 - tokio::spawn( 37 - api::serve_debug(state.clone(), cfg.debug_port) 38 - .inspect_err(|e| error!("debug server failed: {e}")), 39 - ); 40 - } 41 30 42 31 if !cfg.disable_backfill { 43 32 tokio::spawn({ ··· 45 34 let timeout = cfg.repo_fetch_timeout; 46 35 BackfillWorker::new( 47 36 state, 48 - backfill_rx, 37 + buffer_tx.clone(), 49 38 timeout, 50 39 cfg.backfill_concurrency_limit, 51 40 matches!( ··· 55 44 ) 56 45 .run() 57 46 }); 58 - } 59 - 60 - if let Err(e) = spawn_blocking({ 61 - let state = state.clone(); 62 - move || hydrant::backfill::manager::queue_pending_backfills(&state) 63 - }) 64 - .await 65 - .into_diagnostic()? 66 - { 67 - error!("failed to queue pending backfills: {e}"); 68 - db::check_poisoned_report(&e); 69 47 } 70 48 71 49 if let Err(e) = spawn_blocking({ ··· 156 134 ); 157 135 } 158 136 159 - let tasks = if !cfg.disable_firehose { 137 + let mut tasks = if !cfg.disable_firehose { 160 138 let firehose_worker = std::thread::spawn({ 161 139 let state = state.clone(); 162 140 let handle = tokio::runtime::Handle::current(); ··· 195 173 // essentially we just want to keep the main thread alive for the other components 196 174 vec![Box::pin(futures::future::pending::<miette::Result<()>>()) as BoxFuture<_>] 197 175 }; 176 + 177 + let state_api = state.clone(); 178 + tasks.push(Box::pin(async move { 179 + api::serve(state_api, cfg.api_port) 180 + .await 181 + .map_err(|e| miette::miette!("API server failed: {e}")) 182 + }) as BoxFuture<_>); 183 + 184 + if cfg.enable_debug { 185 + let state_debug = state.clone(); 186 + tasks.push(Box::pin(async move { 187 + api::serve_debug(state_debug, cfg.debug_port) 188 + .await 189 + .map_err(|e| miette::miette!("debug server failed: {e}")) 190 + }) as BoxFuture<_>); 191 + } 198 192 199 193 let res = futures::future::select_all(tasks); 200 194 if let (Err(e), _, _) = res.await {
+21 -7
src/ops.rs
··· 1 1 use crate::db::types::{DbAction, DbRkey, DbTid, TrimmedDid}; 2 2 use crate::db::{self, Db, keys, ser_repo_state}; 3 - use crate::state::AppState; 4 3 use crate::types::{ 5 4 AccountEvt, BroadcastEvent, IdentityEvt, MarshallableEvt, RepoState, RepoStatus, ResyncState, 6 5 StoredEvent, ··· 10 9 use jacquard::IntoStatic; 11 10 12 11 use jacquard::types::cid::Cid; 12 + use jacquard::types::did::Did; 13 13 use jacquard_api::com_atproto::sync::subscribe_repos::Commit; 14 14 use jacquard_common::types::crypto::PublicKey; 15 15 use jacquard_repo::car::reader::parse_car_bytes; ··· 19 19 use std::time::Instant; 20 20 use tracing::{debug, trace, warn}; 21 21 22 - pub fn send_backfill_req(state: &AppState, did: jacquard::types::did::Did<'static>) -> Result<()> { 23 - state 24 - .backfill_tx 25 - .send(did.clone()) 26 - .map_err(|_| miette::miette!("failed to send backfill request for {did}"))?; 27 - let _ = state.blocked_dids.insert_sync(did); 22 + pub fn persist_to_resync_buffer(db: &Db, did: &Did, commit: &Commit) -> Result<()> { 23 + let key = keys::resync_buffer_key(did, DbTid::from(&commit.rev)); 24 + let value = rmp_serde::to_vec(commit).into_diagnostic()?; 25 + db.resync_buffer.insert(key, value).into_diagnostic()?; 26 + debug!( 27 + "buffered commit seq {} for {did} to resync_buffer", 28 + commit.seq 29 + ); 28 30 Ok(()) 31 + } 32 + 33 + pub fn has_buffered_commits(db: &Db, did: &Did) -> bool { 34 + let prefix = keys::resync_buffer_prefix(did); 35 + db.resync_buffer.prefix(&prefix).next().is_some() 29 36 } 30 37 31 38 // emitting identity is ephemeral ··· 66 73 batch.remove(&db.repos, &repo_key); 67 74 batch.remove(&db.pending, &repo_key); 68 75 batch.remove(&db.resync, &repo_key); 76 + 77 + let resync_prefix = keys::resync_buffer_prefix(did); 78 + for guard in db.resync_buffer.prefix(&resync_prefix) { 79 + let k = guard.key().into_diagnostic()?; 80 + batch.remove(&db.resync_buffer, k); 81 + } 69 82 70 83 // 2. delete from records (all partitions) 71 84 let mut partitions = Vec::new(); ··· 151 164 152 165 Ok(repo_state) 153 166 } 167 + 154 168 pub fn verify_sync_event(blocks: &[u8], key: Option<&PublicKey>) -> Result<(Cid<'static>, String)> { 155 169 let parsed = tokio::task::block_in_place(|| { 156 170 tokio::runtime::Handle::current()
+13 -20
src/state.rs
··· 1 1 use std::sync::atomic::AtomicI64; 2 2 3 - use jacquard_common::types::string::Did; 4 - use tokio::sync::mpsc; 5 - 6 3 use miette::Result; 4 + use tokio::sync::Notify; 7 5 8 6 use crate::{config::Config, db::Db, resolver::Resolver}; 9 7 10 - pub type BackfillTx = mpsc::UnboundedSender<Did<'static>>; 11 - pub type BackfillRx = mpsc::UnboundedReceiver<Did<'static>>; 12 - 13 8 pub struct AppState { 14 9 pub db: Db, 15 - pub backfill_tx: BackfillTx, 16 10 pub resolver: Resolver, 17 11 pub cur_firehose: AtomicI64, 18 - pub blocked_dids: scc::HashSet<Did<'static>>, 12 + pub backfill_notify: Notify, 19 13 } 20 14 21 15 impl AppState { 22 - pub fn new(config: &Config) -> Result<(Self, BackfillRx)> { 16 + pub fn new(config: &Config) -> Result<Self> { 23 17 let db = Db::open( 24 18 &config.database_path, 25 19 config.cache_size, 26 20 config.disable_lz4_compression, 27 21 )?; 28 22 let resolver = Resolver::new(config.plc_url.clone(), config.identity_cache_size); 29 - let (backfill_tx, backfill_rx) = mpsc::unbounded_channel(); 23 + 24 + Ok(Self { 25 + db, 26 + resolver, 27 + cur_firehose: AtomicI64::new(0), 28 + backfill_notify: Notify::new(), 29 + }) 30 + } 30 31 31 - Ok(( 32 - Self { 33 - db, 34 - backfill_tx, 35 - resolver, 36 - cur_firehose: AtomicI64::new(0), 37 - blocked_dids: scc::HashSet::new(), 38 - }, 39 - backfill_rx, 40 - )) 32 + pub fn notify_backfill(&self) { 33 + self.backfill_notify.notify_one(); 41 34 } 42 35 }