···13PDS_HOSTNAME=localhost:3000
14PLC_URL=plc.directory
15000016# Notification Service Configuration
17# At least one notification channel should be configured for user notifications to work.
18# Email notifications (via sendmail/msmtp)
···13PDS_HOSTNAME=localhost:3000
14PLC_URL=plc.directory
1516+# A comma-separated list of WebSocket URLs for firehose relays to push updates to.
17+# e.g., RELAYS=wss://relay.bsky.social,wss://another-relay.com
18+RELAYS=
19+20# Notification Service Configuration
21# At least one notification channel should be configured for user notifications to work.
22# Email notifications (via sendmail/msmtp)
···8async-trait = "0.1.89"
9aws-config = "1.8.11"
10aws-sdk-s3 = "1.116.0"
11+axum = { version = "0.8.7", features = ["ws", "macros"] }
12base64 = "0.22.1"
13bcrypt = "0.17.1"
14bytes = "1.11.0"
15chrono = { version = "0.4.42", features = ["serde"] }
16cid = "0.11.1"
17dotenvy = "0.15.7"
18+futures = "0.3.30"
19jacquard = { version = "0.9.3", default-features = false, features = ["api", "api_bluesky", "api_full", "derive", "dns"] }
20jacquard-axum = "0.9.2"
21jacquard-repo = "0.9.2"
···26rand = "0.8.5"
27reqwest = { version = "0.12.24", features = ["json"] }
28serde = { version = "1.0.228", features = ["derive"] }
29+serde_bytes = "0.11.14"
30serde_ipld_dagcbor = "0.6.4"
31serde_json = "1.0.145"
32sha2 = "0.10.9"
···35tokio = { version = "1.48.0", features = ["macros", "rt-multi-thread", "time", "signal", "process"] }
36tracing = "0.1.43"
37tracing-subscriber = "0.3.22"
38+tokio-tungstenite = { version = "0.28.0", features = ["native-tls"] }
39uuid = { version = "1.19.0", features = ["v4", "fast-rng"] }
4041[dev-dependencies]
42ctor = "0.6.3"
43+iroh-car = "0.5.1"
44testcontainers = "0.26.0"
45testcontainers-modules = { version = "0.14.0", features = ["postgres"] }
46wiremock = "0.6.5"
+23-24
TODO.md
···43 - [x] Implement `com.atproto.server.confirmEmail`.
4445## Repository Operations (`com.atproto.repo`)
46-- [ ] Record CRUD
47 - [x] Implement `com.atproto.repo.createRecord`.
48- - [ ] Validate schema against Lexicon (just structure, not complex logic).
49 - [x] Generate `rkey` (TID) if not provided.
50 - [x] Handle MST (Merkle Search Tree) insertion.
51- - [ ] **Trigger Firehose Event**.
52 - [x] Implement `com.atproto.repo.putRecord`.
53 - [x] Implement `com.atproto.repo.getRecord`.
54 - [x] Implement `com.atproto.repo.deleteRecord`.
···57 - [x] Implement `com.atproto.repo.applyWrites` (Batch writes).
58 - [ ] Implement `com.atproto.repo.importRepo` (Migration).
59 - [x] Implement `com.atproto.repo.listMissingBlobs`.
60-- [ ] Blob Management
61 - [x] Implement `com.atproto.repo.uploadBlob`.
62 - [x] Store blob (S3).
63 - [x] return `blob` ref (CID + MimeType).
6465## Sync & Federation (`com.atproto.sync`)
66-- [ ] The Firehose (WebSocket)
67- - [ ] Implement `com.atproto.sync.subscribeRepos`.
68- - [ ] Broadcast real-time commit events.
69- - [ ] Handle cursor replay (backfill).
70-- [ ] Bulk Export
71 - [x] Implement `com.atproto.sync.getRepo` (Return full CAR file of repo).
72 - [x] Implement `com.atproto.sync.getBlocks` (Return specific blocks via CIDs).
73 - [x] Implement `com.atproto.sync.getLatestCommit`.
···75 - [x] Implement `com.atproto.sync.getRepoStatus`.
76 - [x] Implement `com.atproto.sync.listRepos`.
77 - [x] Implement `com.atproto.sync.notifyOfUpdate`.
78-- [ ] Blob Sync
79 - [x] Implement `com.atproto.sync.getBlob`.
80 - [x] Implement `com.atproto.sync.listBlobs`.
81- [x] Crawler Interaction
···110- [ ] Handle this generically.
111112## Infrastructure & Core Components
113-- [ ] Sequencer (Event Log)
114- - [ ] Implement a `Sequencer` (backed by `repo_seq` table? Like in ref impl).
115- - [ ] Implement event formatting (`commit`, `handle`, `identity`, `account`).
116- - [ ] Implement database polling / event emission mechanism.
117- - [ ] Implement cursor-based event replay (`requestSeqRange`).
118-- [ ] Repo Storage & Consistency (in postgres)
119- - [ ] Implement `RepoStorage` for postgres (replaces per-user SQLite).
120- - [ ] Read/Write IPLD blocks to `blocks` table (global deduplication).
121- - [ ] Manage Repo Root in `repos` table.
122- - [ ] Implement Atomic Repo Transactions.
123- - [ ] Ensure `blocks` write, `repo_root` update, `records` index update, and `sequencer` event are committed in a single transaction.
124 - [ ] Implement concurrency control (row-level locking on `repos` table) to prevent concurrent writes to the same repo.
125- [ ] DID Cache
126 - [ ] Implement caching layer for DID resolution (Redis or in-memory).
···138 - [x] Helper functions for common notification types (welcome, password reset, email verification, etc.)
139- [ ] Image Processing
140 - [ ] Implement image resize/formatting pipeline (for blob uploads).
141-- [ ] IPLD & MST
142- - [ ] Implement Merkle Search Tree logic for repo signing.
143- - [ ] Implement CAR (Content Addressable Archive) encoding/decoding.
144- [ ] Validation
145 - [ ] DID PLC Operations (Sign rotation keys).
146- [ ] Fix any remaining TODOs in the code, everywhere, full stop.
···43 - [x] Implement `com.atproto.server.confirmEmail`.
4445## Repository Operations (`com.atproto.repo`)
46+- [x] Record CRUD
47 - [x] Implement `com.atproto.repo.createRecord`.
048 - [x] Generate `rkey` (TID) if not provided.
49 - [x] Handle MST (Merkle Search Tree) insertion.
50+ - [x] **Trigger Firehose Event**.
51 - [x] Implement `com.atproto.repo.putRecord`.
52 - [x] Implement `com.atproto.repo.getRecord`.
53 - [x] Implement `com.atproto.repo.deleteRecord`.
···56 - [x] Implement `com.atproto.repo.applyWrites` (Batch writes).
57 - [ ] Implement `com.atproto.repo.importRepo` (Migration).
58 - [x] Implement `com.atproto.repo.listMissingBlobs`.
59+- [x] Blob Management
60 - [x] Implement `com.atproto.repo.uploadBlob`.
61 - [x] Store blob (S3).
62 - [x] return `blob` ref (CID + MimeType).
6364## Sync & Federation (`com.atproto.sync`)
65+- [x] The Firehose (WebSocket)
66+ - [x] Implement `com.atproto.sync.subscribeRepos`.
67+ - [x] Broadcast real-time commit events.
68+ - [x] Handle cursor replay (backfill).
69+- [x] Bulk Export
70 - [x] Implement `com.atproto.sync.getRepo` (Return full CAR file of repo).
71 - [x] Implement `com.atproto.sync.getBlocks` (Return specific blocks via CIDs).
72 - [x] Implement `com.atproto.sync.getLatestCommit`.
···74 - [x] Implement `com.atproto.sync.getRepoStatus`.
75 - [x] Implement `com.atproto.sync.listRepos`.
76 - [x] Implement `com.atproto.sync.notifyOfUpdate`.
77+- [x] Blob Sync
78 - [x] Implement `com.atproto.sync.getBlob`.
79 - [x] Implement `com.atproto.sync.listBlobs`.
80- [x] Crawler Interaction
···109- [ ] Handle this generically.
110111## Infrastructure & Core Components
112+- [x] Sequencer (Event Log)
113+ - [x] Implement a `Sequencer` (backed by `repo_seq` table).
114+ - [x] Implement event formatting (`commit`, `handle`, `identity`, `account`).
115+ - [x] Implement database polling / event emission mechanism.
116+ - [x] Implement cursor-based event replay (`requestSeqRange`).
117+- [x] Repo Storage & Consistency (in postgres)
118+ - [x] Implement `RepoStorage` for postgres (replaces per-user SQLite).
119+ - [x] Read/Write IPLD blocks to `blocks` table (global deduplication).
120+ - [x] Manage Repo Root in `repos` table.
121+ - [x] Implement Atomic Repo Transactions.
122+ - [x] Ensure `blocks` write, `repo_root` update, `records` index update, and `sequencer` event are committed in a single transaction.
123 - [ ] Implement concurrency control (row-level locking on `repos` table) to prevent concurrent writes to the same repo.
124- [ ] DID Cache
125 - [ ] Implement caching layer for DID resolution (Redis or in-memory).
···137 - [x] Helper functions for common notification types (welcome, password reset, email verification, etc.)
138- [ ] Image Processing
139 - [ ] Implement image resize/formatting pipeline (for blob uploads).
140+- [x] IPLD & MST
141+ - [x] Implement Merkle Search Tree logic for repo signing.
142+ - [x] Implement CAR (Content Addressable Archive) encoding/decoding.
143- [ ] Validation
144 - [ ] DID PLC Operations (Sign rotation keys).
145- [ ] Fix any remaining TODOs in the code, everywhere, full stop.
+13
migrations/202512211402_repo_sequencer.sql
···0000000000000
···1+CREATE TABLE repo_seq (
2+ seq BIGSERIAL PRIMARY KEY,
3+ did TEXT NOT NULL,
4+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
5+ event_type TEXT NOT NULL,
6+ commit_cid TEXT,
7+ prev_cid TEXT,
8+ ops JSONB,
9+ blobs TEXT[]
10+);
11+12+CREATE INDEX idx_repo_seq_seq ON repo_seq(seq);
13+CREATE INDEX idx_repo_seq_did ON repo_seq(did);
···0001use crate::state::AppState;
2use axum::{
3- Json,
4 extract::State,
5- http::StatusCode,
6 response::{IntoResponse, Response},
07};
8use cid::Cid;
9-use jacquard::types::{
10- did::Did,
11- integer::LimitedU32,
12- string::{Nsid, Tid},
13-};
14use jacquard_repo::{commit::Commit, mst::Mst, storage::BlockStore};
15use serde::Deserialize;
16use serde_json::json;
···3132pub async fn delete_record(
33 State(state): State<AppState>,
34- headers: axum::http::HeaderMap,
35 Json(input): Json<DeleteRecordInput>,
36) -> Response {
37- let auth_header = headers.get("Authorization");
38- if auth_header.is_none() {
39- return (
40- StatusCode::UNAUTHORIZED,
41- Json(json!({"error": "AuthenticationRequired"})),
42- )
43- .into_response();
44- }
45- let token = auth_header
46- .unwrap()
47- .to_str()
48- .unwrap_or("")
49- .replace("Bearer ", "");
50-51- let session = sqlx::query!(
52- "SELECT s.did, k.key_bytes FROM sessions s JOIN users u ON s.did = u.did JOIN user_keys k ON u.id = k.user_id WHERE s.access_jwt = $1",
53- token
54- )
55- .fetch_optional(&state.db)
56- .await
57- .unwrap_or(None);
5859- let (did, key_bytes) = match session {
60- Some(row) => (
61- row.did,
62- row.key_bytes,
63- ),
64- None => {
65 return (
66- StatusCode::UNAUTHORIZED,
67- Json(json!({"error": "AuthenticationFailed"})),
68 )
69 .into_response();
70 }
71- };
72-73- if let Err(_) = crate::auth::verify_token(&token, &key_bytes) {
74- return (
75- StatusCode::UNAUTHORIZED,
76- Json(json!({"error": "AuthenticationFailed", "message": "Invalid token signature"})),
77- )
78- .into_response();
79- }
80-81- if input.repo != did {
82- return (StatusCode::FORBIDDEN, Json(json!({"error": "InvalidRepo", "message": "Repo does not match authenticated user"}))).into_response();
83 }
8485- let user_query = sqlx::query!("SELECT id FROM users WHERE did = $1", did)
86- .fetch_optional(&state.db)
87- .await;
88-89- let user_id: uuid::Uuid = match user_query {
90- Ok(Some(row)) => row.id,
91- _ => {
92- return (
93- StatusCode::INTERNAL_SERVER_ERROR,
94- Json(json!({"error": "InternalError", "message": "User not found"})),
95- )
96- .into_response();
97- }
98- };
99-100- let repo_root_query = sqlx::query!("SELECT repo_root_cid FROM repos WHERE user_id = $1", user_id)
101- .fetch_optional(&state.db)
102- .await;
103-104- let current_root_cid = match repo_root_query {
105- Ok(Some(row)) => {
106- let cid_str: String = row.repo_root_cid;
107- Cid::from_str(&cid_str).ok()
108- }
109- _ => None,
110- };
111112- if current_root_cid.is_none() {
113- return (
114- StatusCode::INTERNAL_SERVER_ERROR,
115- Json(json!({"error": "InternalError", "message": "Repo root not found"})),
116- )
117- .into_response();
118- }
119- let current_root_cid = current_root_cid.unwrap();
120-121- let commit_bytes = match state.block_store.get(¤t_root_cid).await {
122 Ok(Some(b)) => b,
123- Ok(None) => return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": "Commit block not found"}))).into_response(),
124- Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": format!("Failed to load commit block: {:?}", e)}))).into_response(),
125 };
126-127 let commit = match Commit::from_cbor(&commit_bytes) {
128 Ok(c) => c,
129- Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": format!("Failed to parse commit: {:?}", e)}))).into_response(),
130 };
131132- let mst_root = commit.data;
133- let store = Arc::new(state.block_store.clone());
134- let mst = Mst::load(store.clone(), mst_root, None);
135-0136 let collection_nsid = match input.collection.parse::<Nsid>() {
137 Ok(n) => n,
138- Err(_) => {
139- return (
140- StatusCode::BAD_REQUEST,
141- Json(json!({"error": "InvalidCollection"})),
142- )
143- .into_response();
144- }
145 };
146-147 let key = format!("{}/{}", collection_nsid, input.rkey);
148149- // TODO: Check swapRecord if provided? Skipping for brevity/robustness
0000000000150151 let new_mst = match mst.delete(&key).await {
152 Ok(m) => m,
···160 Ok(c) => c,
161 Err(e) => {
162 error!("Failed to persist MST: {:?}", e);
163- return (
164- StatusCode::INTERNAL_SERVER_ERROR,
165- Json(json!({"error": "InternalError", "message": "Failed to persist MST"})),
166- )
167- .into_response();
168 }
169 };
170171- let did_obj = match Did::new(&did) {
172- Ok(d) => d,
173- Err(_) => {
174- return (
175- StatusCode::INTERNAL_SERVER_ERROR,
176- Json(json!({"error": "InternalError", "message": "Invalid DID"})),
177- )
178- .into_response();
179- }
180- };
181182- let rev = Tid::now(LimitedU32::MIN);
183-184- let new_commit = Commit::new_unsigned(did_obj, new_mst_root, rev, Some(current_root_cid));
185-186- let new_commit_bytes =
187- match new_commit.to_cbor() {
188- Ok(b) => b,
189- Err(_e) => return (
190- StatusCode::INTERNAL_SERVER_ERROR,
191- Json(
192- json!({"error": "InternalError", "message": "Failed to serialize new commit"}),
193- ),
194- )
195- .into_response(),
196- };
197-198- let new_root_cid = match state.block_store.put(&new_commit_bytes).await {
199- Ok(c) => c,
200- Err(_e) => {
201- return (
202- StatusCode::INTERNAL_SERVER_ERROR,
203- Json(json!({"error": "InternalError", "message": "Failed to save new commit"})),
204- )
205- .into_response();
206- }
207 };
208-209- let update_repo = sqlx::query!("UPDATE repos SET repo_root_cid = $1 WHERE user_id = $2", new_root_cid.to_string(), user_id)
210- .execute(&state.db)
211- .await;
212-213- if let Err(e) = update_repo {
214- error!("Failed to update repo root in DB: {:?}", e);
215- return (
216- StatusCode::INTERNAL_SERVER_ERROR,
217- Json(json!({"error": "InternalError", "message": "Failed to update repo root in DB"})),
218- )
219- .into_response();
220- }
221-222- let record_delete =
223- sqlx::query!("DELETE FROM records WHERE repo_id = $1 AND collection = $2 AND rkey = $3", user_id, input.collection, input.rkey)
224- .execute(&state.db)
225- .await;
226-227- if let Err(e) = record_delete {
228- error!("Error deleting record index: {:?}", e);
229- }
230231 (StatusCode::OK, Json(json!({}))).into_response()
232}
···1+use crate::api::repo::record::utils::{commit_and_log, RecordOp};
2+use crate::api::repo::record::write::prepare_repo_write;
3+use crate::repo::tracking::TrackingBlockStore;
4use crate::state::AppState;
5use axum::{
06 extract::State,
7+ http::{HeaderMap, StatusCode},
8 response::{IntoResponse, Response},
9+ Json,
10};
11use cid::Cid;
12+use jacquard::types::string::Nsid;
000013use jacquard_repo::{commit::Commit, mst::Mst, storage::BlockStore};
14use serde::Deserialize;
15use serde_json::json;
···3031pub async fn delete_record(
32 State(state): State<AppState>,
33+ headers: HeaderMap,
34 Json(input): Json<DeleteRecordInput>,
35) -> Response {
36+ let (did, user_id, current_root_cid) =
37+ match prepare_repo_write(&state, &headers, &input.repo).await {
38+ Ok(res) => res,
39+ Err(err_res) => return err_res,
40+ };
00000000000000004142+ if let Some(swap_commit) = &input.swap_commit {
43+ if Cid::from_str(swap_commit).ok() != Some(current_root_cid) {
000044 return (
45+ StatusCode::CONFLICT,
46+ Json(json!({"error": "InvalidSwap", "message": "Repo has been modified"})),
47 )
48 .into_response();
49 }
00000000000050 }
5152+ let tracking_store = TrackingBlockStore::new(state.block_store.clone());
00000000000000000000000005354+ let commit_bytes = match tracking_store.get(¤t_root_cid).await {
00000000055 Ok(Some(b)) => b,
56+ _ => return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": "Commit block not found"}))).into_response(),
057 };
058 let commit = match Commit::from_cbor(&commit_bytes) {
59 Ok(c) => c,
60+ _ => return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": "Failed to parse commit"}))).into_response(),
61 };
6263+ let mst = Mst::load(
64+ Arc::new(tracking_store.clone()),
65+ commit.data,
66+ None,
67+ );
68 let collection_nsid = match input.collection.parse::<Nsid>() {
69 Ok(n) => n,
70+ Err(_) => return (StatusCode::BAD_REQUEST, Json(json!({"error": "InvalidCollection"}))).into_response(),
00000071 };
072 let key = format!("{}/{}", collection_nsid, input.rkey);
7374+ if let Some(swap_record_str) = &input.swap_record {
75+ let expected_cid = Cid::from_str(swap_record_str).ok();
76+ let actual_cid = mst.get(&key).await.ok().flatten();
77+ if expected_cid != actual_cid {
78+ return (StatusCode::CONFLICT, Json(json!({"error": "InvalidSwap", "message": "Record has been modified or does not exist"}))).into_response();
79+ }
80+ }
81+82+ if mst.get(&key).await.ok().flatten().is_none() {
83+ return (StatusCode::OK, Json(json!({}))).into_response();
84+ }
8586 let new_mst = match mst.delete(&key).await {
87 Ok(m) => m,
···95 Ok(c) => c,
96 Err(e) => {
97 error!("Failed to persist MST: {:?}", e);
98+ return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": "Failed to persist MST"}))).into_response();
000099 }
100 };
101102+ let op = RecordOp::Delete { collection: input.collection, rkey: input.rkey };
103+ let written_cids = tracking_store.get_written_cids();
104+ let written_cids_str = written_cids.iter().map(|c| c.to_string()).collect::<Vec<_>>();
0000000105106+ if let Err(e) = commit_and_log(&state, &did, user_id, Some(current_root_cid), new_mst_root, vec![op], &written_cids_str).await {
107+ return (StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": "InternalError", "message": e}))).into_response();
00000000000000000000000108 };
0000000000000000000000109110 (StatusCode::OK, Json(json!({}))).into_response()
111}
+3-1
src/api/repo/record/mod.rs
···1pub mod batch;
2pub mod delete;
3pub mod read;
04pub mod write;
56pub use batch::apply_writes;
7pub use delete::{DeleteRecordInput, delete_record};
8pub use read::{GetRecordInput, ListRecordsInput, ListRecordsOutput, get_record, list_records};
09pub use write::{
10 CreateRecordInput, CreateRecordOutput, PutRecordInput, PutRecordOutput, create_record,
11- put_record,
12};
···1pub mod batch;
2pub mod delete;
3pub mod read;
4+pub mod utils;
5pub mod write;
67pub use batch::apply_writes;
8pub use delete::{DeleteRecordInput, delete_record};
9pub use read::{GetRecordInput, ListRecordsInput, ListRecordsOutput, get_record, list_records};
10+pub use utils::*;
11pub use write::{
12 CreateRecordInput, CreateRecordOutput, PutRecordInput, PutRecordOutput, create_record,
13+ put_record, prepare_repo_write,
14};
···2425 let state = AppState::new(pool.clone()).await;
2600000000027 let (shutdown_tx, shutdown_rx) = watch::channel(false);
2829 let mut notification_service = NotificationService::new(pool);
···2425 let state = AppState::new(pool.clone()).await;
2627+ bspds::sync::listener::start_sequencer_listener(state.clone()).await;
28+ let relays = std::env::var("RELAYS")
29+ .unwrap_or_default()
30+ .split(',')
31+ .filter(|s| !s.is_empty())
32+ .map(|s| s.to_string())
33+ .collect();
34+ bspds::sync::relay_client::start_relay_clients(state.clone(), relays, None).await;
35+36 let (shutdown_tx, shutdown_rx) = watch::channel(false);
3738 let mut notification_service = NotificationService::new(pool);
···1+use crate::state::AppState;
2+use crate::sync::firehose::SequencedEvent;
3+use sqlx::postgres::PgListener;
4+use tracing::{error, info, warn};
5+6+pub async fn start_sequencer_listener(state: AppState) {
7+ tokio::spawn(async move {
8+ info!("Starting sequencer listener background task");
9+ loop {
10+ if let Err(e) = listen_loop(state.clone()).await {
11+ error!("Sequencer listener failed: {}. Restarting in 5s...", e);
12+ tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
13+ }
14+ }
15+ });
16+}
17+18+async fn listen_loop(state: AppState) -> anyhow::Result<()> {
19+ let mut listener = PgListener::connect_with(&state.db).await?;
20+ listener.listen("repo_updates").await?;
21+ info!("Connected to Postgres and listening for 'repo_updates'");
22+23+ loop {
24+ let notification = listener.recv().await?;
25+ let payload = notification.payload();
26+27+ let seq_id: i64 = match payload.parse() {
28+ Ok(id) => id,
29+ Err(e) => {
30+ warn!("Received invalid payload in repo_updates: '{}'. Error: {}", payload, e);
31+ continue;
32+ }
33+ };
34+35+ let event = sqlx::query_as!(
36+ SequencedEvent,
37+ r#"
38+ SELECT seq, did, created_at, event_type, commit_cid, prev_cid, ops, blobs, blocks_cids
39+ FROM repo_seq
40+ WHERE seq = $1
41+ "#,
42+ seq_id
43+ )
44+ .fetch_optional(&state.db)
45+ .await?;
46+47+ if let Some(event) = event {
48+ let _ = state.firehose_tx.send(event);
49+ } else {
50+ warn!("Received notification for seq {} but could not find row in repo_seq", seq_id);
51+ }
52+ }
53+}
+8-1
src/sync/mod.rs
···2pub mod car;
3pub mod commit;
4pub mod crawl;
00005pub mod repo;
0067pub use blob::{get_blob, list_blobs};
8pub use commit::{get_latest_commit, get_repo_status, list_repos};
9pub use crawl::{notify_of_update, request_crawl};
10-pub use repo::{get_blocks, get_record, get_repo};
0
···2pub mod car;
3pub mod commit;
4pub mod crawl;
5+pub mod firehose;
6+pub mod frame;
7+pub mod listener;
8+pub mod relay_client;
9pub mod repo;
10+pub mod subscribe_repos;
11+pub mod util;
1213pub use blob::{get_blob, list_blobs};
14pub use commit::{get_latest_commit, get_repo_status, list_repos};
15pub use crawl::{notify_of_update, request_crawl};
16+pub use repo::{get_blocks, get_repo, get_record};
17+pub use subscribe_repos::subscribe_repos;