···1515 --wrap-pg "postgresql://user:pass@pg-host:5432/plc-db"
1616 ```
17171818+- Run a fully self-contained mirror using an embedded fjall database (no postgres needed):
1919+2020+ ```bash
2121+ # backfill first
2222+ allegedly backfill --to-fjall ./plc-data
2323+2424+ # then run the mirror
2525+ allegedly mirror --wrap-fjall ./plc-data
2626+ ```
2727+1828- Wrap a plc server, maximalist edition:
19292030 ```bash
···8999- [ ] experimental: websocket version of /export
90100- [x] experimental: accept writes by forwarding them upstream
91101- [ ] experimental: serve a tlog
9292-- [ ] experimental: embed a log database directly for fast and efficient mirroring
102102+- [x] experimental: embed a log database directly for fast and efficient mirroring
93103- [ ] experimental: support multiple upstreams?
9410495105- [ ] new command todo: `zip` or `check` or `diff`: compare two plc logs over some time range
+30-3
src/bin/backfill.rs
···11use allegedly::{
22- Db, Dt, ExportPage, FolderSource, HttpSource, backfill, backfill_to_pg,
22+ Db, Dt, ExportPage, FjallDb, FolderSource, HttpSource, backfill, backfill_to_fjall,
33+ backfill_to_pg,
34 bin::{GlobalArgs, bin_init},
44- full_pages, logo, pages_to_pg, pages_to_stdout, poll_upstream,
55+ full_pages, logo, pages_to_fjall, pages_to_pg, pages_to_stdout, poll_upstream,
56};
67use clap::Parser;
78use reqwest::Url;
···4546 /// only used if `--to-postgres` is present
4647 #[arg(long, action)]
4748 postgres_reset: bool,
4949+ /// Bulk load into a local fjall embedded database
5050+ ///
5151+ /// Pass a directory path for the fjall database
5252+ #[arg(long, conflicts_with_all = ["to_postgres", "postgres_cert", "postgres_reset"])]
5353+ to_fjall: Option<PathBuf>,
5454+ /// Delete all operations from the fjall db before starting
5555+ ///
5656+ /// only used if `--to-fjall` is present
5757+ #[arg(long, action, requires = "to_fjall")]
5858+ fjall_reset: bool,
4859 /// Stop at the week ending before this date
4960 #[arg(long)]
5061 until: Option<Dt>,
···6677 to_postgres,
6778 postgres_cert,
6879 postgres_reset,
8080+ to_fjall,
8181+ fjall_reset,
6982 until,
7083 catch_up,
7184 }: Args,
···143156 }
144157145158 // set up sinks
146146- if let Some(pg_url) = to_postgres {
159159+ if let Some(fjall_path) = to_fjall {
160160+ log::trace!("opening fjall db at {fjall_path:?}...");
161161+ let db = FjallDb::open(&fjall_path)?;
162162+ log::trace!("opened fjall db");
163163+164164+ tasks.spawn(backfill_to_fjall(
165165+ db.clone(),
166166+ fjall_reset,
167167+ bulk_out,
168168+ found_last_tx,
169169+ ));
170170+ if catch_up {
171171+ tasks.spawn(pages_to_fjall(db, full_out));
172172+ }
173173+ } else if let Some(pg_url) = to_postgres {
147174 log::trace!("connecting to postgres...");
148175 let db = Db::new(pg_url.as_str(), postgres_cert).await?;
149176 log::trace!("connected to postgres");
+46-25
src/bin/mirror.rs
···11use allegedly::{
22- Db, ExperimentalConf, ListenConf,
22+ Db, ExperimentalConf, FjallDb, ListenConf,
33 bin::{GlobalArgs, InstrumentationArgs, bin_init},
44- logo, pages_to_pg, poll_upstream, serve,
44+ logo, pages_to_fjall, pages_to_pg, poll_upstream, serve, serve_fjall,
55};
66use clap::Parser;
77use reqwest::Url;
···10101111#[derive(Debug, clap::Args)]
1212pub struct Args {
1313- /// the wrapped did-method-plc server
1313+ /// the wrapped did-method-plc server (not needed when using --wrap-fjall)
1414 #[arg(long, env = "ALLEGEDLY_WRAP")]
1515- wrap: Url,
1515+ wrap: Option<Url>,
1616 /// the wrapped did-method-plc server's database (write access required)
1717- #[arg(long, env = "ALLEGEDLY_WRAP_PG")]
1717+ #[arg(long, env = "ALLEGEDLY_WRAP_PG", conflicts_with = "wrap_fjall")]
1818 wrap_pg: Option<Url>,
1919 /// path to tls cert for the wrapped postgres db, if needed
2020 #[arg(long, env = "ALLEGEDLY_WRAP_PG_CERT")]
2121 wrap_pg_cert: Option<PathBuf>,
2222+ /// path to a local fjall database directory (alternative to postgres)
2323+ #[arg(long, env = "ALLEGEDLY_WRAP_FJALL", conflicts_with_all = ["wrap_pg", "wrap_pg_cert"])]
2424+ wrap_fjall: Option<PathBuf>,
2225 /// wrapping server listen address
2326 #[arg(short, long, env = "ALLEGEDLY_BIND")]
2427 #[clap(default_value = "127.0.0.1:8000")]
···7073 wrap,
7174 wrap_pg,
7275 wrap_pg_cert,
7676+ wrap_fjall,
7377 bind,
7478 acme_domain,
7579 acme_cache_path,
···106110107111 let mut tasks = JoinSet::new();
108112109109- let db = if sync {
110110- let wrap_pg = wrap_pg.ok_or(anyhow::anyhow!(
111111- "a wrapped reference postgres must be provided to sync"
112112- ))?;
113113- let db = Db::new(wrap_pg.as_str(), wrap_pg_cert).await?;
113113+ if let Some(fjall_path) = wrap_fjall {
114114+ let db = FjallDb::open(&fjall_path)?;
114115115115- // TODO: allow starting up with polling backfill from beginning?
116116- log::debug!("getting the latest op from the db...");
116116+ log::debug!("getting the latest op from fjall...");
117117 let latest = db
118118- .get_latest()
119119- .await?
118118+ .get_latest()?
120119 .expect("there to be at least one op in the db. did you backfill?");
121120122121 let (send_page, recv_page) = mpsc::channel(8);
···126125 let throttle = Duration::from_millis(upstream_throttle_ms);
127126128127 tasks.spawn(poll_upstream(Some(latest), poll_url, throttle, send_page));
129129- tasks.spawn(pages_to_pg(db.clone(), recv_page));
130130- Some(db)
128128+ tasks.spawn(pages_to_fjall(db.clone(), recv_page));
129129+130130+ tasks.spawn(serve_fjall(upstream, listen_conf, experimental_conf, db));
131131 } else {
132132- None
133133- };
132132+ let wrap = wrap.ok_or(anyhow::anyhow!(
133133+ "--wrap is required unless using --wrap-fjall"
134134+ ))?;
135135+136136+ let db: Option<Db> = if sync {
137137+ let wrap_pg = wrap_pg.ok_or(anyhow::anyhow!(
138138+ "a wrapped reference postgres (--wrap-pg) or fjall db (--wrap-fjall) must be provided to sync"
139139+ ))?;
140140+ let db = Db::new(wrap_pg.as_str(), wrap_pg_cert).await?;
141141+142142+ log::debug!("getting the latest op from the db...");
143143+ let latest = db
144144+ .get_latest()
145145+ .await?
146146+ .expect("there to be at least one op in the db. did you backfill?");
147147+148148+ let (send_page, recv_page) = mpsc::channel(8);
149149+150150+ let mut poll_url = upstream.clone();
151151+ poll_url.set_path("/export");
152152+ let throttle = Duration::from_millis(upstream_throttle_ms);
153153+154154+ tasks.spawn(poll_upstream(Some(latest), poll_url, throttle, send_page));
155155+ tasks.spawn(pages_to_pg(db.clone(), recv_page));
156156+ Some(db)
157157+ } else {
158158+ None
159159+ };
134160135135- tasks.spawn(serve(
136136- upstream,
137137- wrap,
138138- listen_conf,
139139- experimental_conf,
140140- db.clone(),
141141- ));
161161+ tasks.spawn(serve(upstream, wrap, listen_conf, experimental_conf, db));
162162+ }
142163143164 while let Some(next) = tasks.join_next().await {
144165 match next {
+399
src/doc.rs
···11+use serde::{Deserialize, Serialize};
22+use serde_json::Value;
33+use std::borrow::Cow;
44+use std::collections::BTreeMap;
55+66+pub type CowStr<'a> = Cow<'a, str>;
77+88+#[derive(Debug, Clone, Serialize, Deserialize)]
99+pub struct Service<'a> {
1010+ pub r#type: CowStr<'a>,
1111+ pub endpoint: CowStr<'a>,
1212+}
1313+1414+#[derive(Debug, Clone, Serialize, Deserialize)]
1515+#[serde(rename_all = "camelCase")]
1616+pub struct DocumentData<'a> {
1717+ pub did: CowStr<'a>,
1818+ pub rotation_keys: Vec<CowStr<'a>>,
1919+ pub verification_methods: BTreeMap<CowStr<'a>, CowStr<'a>>,
2020+ pub also_known_as: Vec<CowStr<'a>>,
2121+ pub services: BTreeMap<CowStr<'a>, Service<'a>>,
2222+}
2323+2424+#[derive(Debug, Clone, Serialize)]
2525+#[serde(rename_all = "camelCase")]
2626+pub struct DidDocument<'a> {
2727+ #[serde(rename = "@context")]
2828+ pub context: Vec<CowStr<'a>>,
2929+ pub id: CowStr<'a>,
3030+ pub also_known_as: Vec<CowStr<'a>>,
3131+ pub verification_method: Vec<VerificationMethod<'a>>,
3232+ pub service: Vec<DocService<'a>>,
3333+}
3434+3535+#[derive(Debug, Clone, Serialize)]
3636+#[serde(rename_all = "camelCase")]
3737+pub struct VerificationMethod<'a> {
3838+ pub id: CowStr<'a>,
3939+ pub r#type: CowStr<'a>,
4040+ pub controller: CowStr<'a>,
4141+ pub public_key_multibase: CowStr<'a>,
4242+}
4343+4444+#[derive(Debug, Clone, Serialize)]
4545+#[serde(rename_all = "camelCase")]
4646+pub struct DocService<'a> {
4747+ pub id: CowStr<'a>,
4848+ pub r#type: CowStr<'a>,
4949+ pub service_endpoint: CowStr<'a>,
5050+}
5151+5252+const P256_PREFIX: &str = "zDn";
5353+const SECP256K1_PREFIX: &str = "zQ3";
5454+5555+fn key_context(multibase: &str) -> Option<&'static str> {
5656+ if multibase.starts_with(P256_PREFIX) {
5757+ Some("https://w3id.org/security/suites/ecdsa-2019/v1")
5858+ } else if multibase.starts_with(SECP256K1_PREFIX) {
5959+ Some("https://w3id.org/security/suites/secp256k1-2019/v1")
6060+ } else {
6161+ None
6262+ }
6363+}
6464+6565+pub fn format_did_doc<'a>(data: &'a DocumentData<'a>) -> DidDocument<'a> {
6666+ let mut context = vec![
6767+ "https://www.w3.org/ns/did/v1".into(),
6868+ "https://w3id.org/security/multikey/v1".into(),
6969+ ];
7070+7171+ let verification_method = data
7272+ .verification_methods
7373+ .iter()
7474+ .map(|(keyid, did_key)| {
7575+ let multibase: CowStr = did_key.strip_prefix("did:key:").unwrap_or(did_key).into();
7676+7777+ if let Some(ctx) = key_context(&multibase) {
7878+ if !context.iter().any(|c| c == ctx) {
7979+ context.push(ctx.into());
8080+ }
8181+ }
8282+ VerificationMethod {
8383+ id: format!("{}#{keyid}", data.did).into(),
8484+ r#type: "Multikey".into(),
8585+ controller: data.did.clone(),
8686+ public_key_multibase: multibase,
8787+ }
8888+ })
8989+ .collect();
9090+9191+ let service = data
9292+ .services
9393+ .iter()
9494+ .map(|(service_id, svc)| DocService {
9595+ id: format!("#{service_id}").into(),
9696+ r#type: svc.r#type.clone(),
9797+ service_endpoint: svc.endpoint.clone(),
9898+ })
9999+ .collect();
100100+101101+ DidDocument {
102102+ context,
103103+ id: data.did.clone(),
104104+ also_known_as: data.also_known_as.clone(),
105105+ verification_method,
106106+ service,
107107+ }
108108+}
109109+110110+fn ensure_atproto_prefix(s: &str) -> CowStr<'_> {
111111+ if s.starts_with("at://") {
112112+ return s.into();
113113+ }
114114+ let stripped = s
115115+ .strip_prefix("http://")
116116+ .or_else(|| s.strip_prefix("https://"))
117117+ .unwrap_or(s);
118118+ format!("at://{stripped}").into()
119119+}
120120+121121+fn ensure_http_prefix(s: &str) -> CowStr<'_> {
122122+ if s.starts_with("http://") || s.starts_with("https://") {
123123+ return s.into();
124124+ }
125125+ format!("https://{s}").into()
126126+}
127127+128128+/// extract DocumentData from a single operation json blob.
129129+/// returns None for tombstones.
130130+pub fn op_to_doc_data<'a>(did: &'a str, op: &'a Value) -> Option<DocumentData<'a>> {
131131+ // TODO: this shouldnt just short circuit to None, we should provide better information about whats missing in an error
132132+ let obj = op.as_object()?;
133133+ let op_type = obj.get("type")?.as_str()?;
134134+135135+ match op_type {
136136+ "plc_tombstone" => None,
137137+ "create" => {
138138+ let signing_key = obj.get("signingKey")?.as_str()?;
139139+ let recovery_key = obj.get("recoveryKey")?.as_str()?;
140140+ let handle = obj.get("handle")?.as_str()?;
141141+ let service = obj.get("service")?.as_str()?;
142142+143143+ let mut verification_methods = BTreeMap::new();
144144+ verification_methods.insert("atproto".into(), signing_key.into());
145145+146146+ let mut services = BTreeMap::new();
147147+ services.insert(
148148+ "atproto_pds".into(),
149149+ Service {
150150+ r#type: "AtprotoPersonalDataServer".into(),
151151+ endpoint: ensure_http_prefix(service),
152152+ },
153153+ );
154154+155155+ Some(DocumentData {
156156+ did: Cow::Borrowed(did),
157157+ rotation_keys: vec![Cow::Borrowed(recovery_key), Cow::Borrowed(signing_key)],
158158+ verification_methods,
159159+ also_known_as: vec![ensure_atproto_prefix(handle)],
160160+ services,
161161+ })
162162+ }
163163+ "plc_operation" => {
164164+ let rotation_keys = obj
165165+ .get("rotationKeys")?
166166+ .as_array()?
167167+ .iter()
168168+ .filter_map(|v| v.as_str().map(Cow::Borrowed))
169169+ .collect();
170170+171171+ let verification_methods = obj
172172+ .get("verificationMethods")?
173173+ .as_object()?
174174+ .iter()
175175+ .filter_map(|(k, v)| Some((k.as_str().into(), v.as_str()?.into())))
176176+ .collect();
177177+178178+ let also_known_as = obj
179179+ .get("alsoKnownAs")?
180180+ .as_array()?
181181+ .iter()
182182+ .filter_map(|v| v.as_str().map(Cow::Borrowed))
183183+ .collect();
184184+185185+ let services = obj
186186+ .get("services")?
187187+ .as_object()?
188188+ .iter()
189189+ .filter_map(|(k, v)| {
190190+ let svc: Service = Service::deserialize(v).ok()?;
191191+ Some((k.as_str().into(), svc))
192192+ })
193193+ .collect();
194194+195195+ Some(DocumentData {
196196+ did: did.into(),
197197+ rotation_keys,
198198+ verification_methods,
199199+ also_known_as,
200200+ services,
201201+ })
202202+ }
203203+ _ => None,
204204+ }
205205+}
206206+207207+/// apply a sequence of operation JSON blobs and return the current document data.
208208+/// returns None if the DID is tombstoned (last op is a tombstone).
209209+pub fn apply_op_log<'a>(did: &'a str, ops: &'a [Value]) -> Option<DocumentData<'a>> {
210210+ // TODO: we don't verify signature chain, we should do that...
211211+ ops.last().and_then(|op| op_to_doc_data(did, op))
212212+}
213213+214214+#[cfg(test)]
215215+mod tests {
216216+ use super::*;
217217+218218+ #[test]
219219+ fn normalize_legacy_create() {
220220+ let op = serde_json::json!({
221221+ "type": "create",
222222+ "signingKey": "did:key:zDnaeSigningKey",
223223+ "recoveryKey": "did:key:zQ3shRecoveryKey",
224224+ "handle": "alice.bsky.social",
225225+ "service": "pds.example.com",
226226+ "prev": null,
227227+ "sig": "abc"
228228+ });
229229+230230+ let data = op_to_doc_data("did:plc:test", &op).unwrap();
231231+ assert_eq!(data.rotation_keys.len(), 2);
232232+ assert_eq!(data.rotation_keys[0], "did:key:zQ3shRecoveryKey");
233233+ assert_eq!(data.rotation_keys[1], "did:key:zDnaeSigningKey");
234234+ assert_eq!(
235235+ data.verification_methods.get("atproto").unwrap(),
236236+ "did:key:zDnaeSigningKey"
237237+ );
238238+ assert_eq!(data.also_known_as, vec!["at://alice.bsky.social"]);
239239+ let pds = data.services.get("atproto_pds").unwrap();
240240+ assert_eq!(pds.endpoint, "https://pds.example.com");
241241+ }
242242+243243+ #[test]
244244+ fn format_doc_p256_context() {
245245+ let data = DocumentData {
246246+ did: "did:plc:test123".into(),
247247+ rotation_keys: vec!["did:key:zDnaeXYZ".into()],
248248+ verification_methods: {
249249+ let mut m = BTreeMap::new();
250250+ m.insert("atproto".into(), "did:key:zDnaeXYZ".into());
251251+ m
252252+ },
253253+ also_known_as: vec!["at://alice.test".into()],
254254+ services: {
255255+ let mut m = BTreeMap::new();
256256+ m.insert(
257257+ "atproto_pds".into(),
258258+ Service {
259259+ r#type: "AtprotoPersonalDataServer".into(),
260260+ endpoint: "https://pds.test".into(),
261261+ },
262262+ );
263263+ m
264264+ },
265265+ };
266266+267267+ let doc = format_did_doc(&data);
268268+ assert_eq!(doc.context.len(), 3);
269269+ assert!(
270270+ doc.context
271271+ .iter()
272272+ .any(|c| c == "https://w3id.org/security/suites/ecdsa-2019/v1")
273273+ );
274274+ assert_eq!(doc.verification_method[0].public_key_multibase, "zDnaeXYZ");
275275+ assert_eq!(doc.verification_method[0].id, "did:plc:test123#atproto");
276276+ }
277277+278278+ #[test]
279279+ fn tombstone_returns_none() {
280280+ let op = serde_json::json!({
281281+ "type": "plc_tombstone",
282282+ "prev": "bafyabc",
283283+ "sig": "xyz"
284284+ });
285285+ assert!(op_to_doc_data("did:plc:test", &op).is_none());
286286+ }
287287+288288+ #[test]
289289+ fn apply_log_with_tombstone() {
290290+ let create = serde_json::json!({
291291+ "type": "plc_operation",
292292+ "rotationKeys": ["did:key:zQ3shKey1"],
293293+ "verificationMethods": {"atproto": "did:key:zDnaeKey1"},
294294+ "alsoKnownAs": ["at://alice.test"],
295295+ "services": {
296296+ "atproto_pds": {"type": "AtprotoPersonalDataServer", "service_endpoint": "https://pds.test"}
297297+ },
298298+ "prev": null,
299299+ "sig": "abc"
300300+ });
301301+ let tombstone = serde_json::json!({
302302+ "type": "plc_tombstone",
303303+ "prev": "bafyabc",
304304+ "sig": "xyz"
305305+ });
306306+307307+ let ops = vec![create.clone()];
308308+ let result = apply_op_log("did:plc:test", &ops);
309309+ assert!(result.is_some());
310310+311311+ let ops = vec![create, tombstone];
312312+ let result = apply_op_log("did:plc:test", &ops);
313313+ assert!(result.is_none());
314314+ }
315315+316316+ fn load_fixture(name: &str) -> (String, Vec<Value>) {
317317+ let path = format!("tests/fixtures/{name}");
318318+ let data = std::fs::read_to_string(&path).unwrap_or_else(|e| panic!("{path}: {e}"));
319319+ let entries: Vec<Value> = serde_json::from_str(&data).unwrap();
320320+ let did = entries[0]["did"].as_str().unwrap().to_string();
321321+ let ops: Vec<Value> = entries
322322+ .iter()
323323+ .filter(|e| !e["nullified"].as_bool().unwrap_or(false))
324324+ .map(|e| e["operation"].clone())
325325+ .collect();
326326+ (did, ops)
327327+ }
328328+329329+ #[test]
330330+ fn interop_legacy_dholms() {
331331+ let (did, ops) = load_fixture("log_legacy_dholms.json");
332332+ assert_eq!(did, "did:plc:yk4dd2qkboz2yv6tpubpc6co");
333333+334334+ let data = apply_op_log(&did, &ops).expect("should reconstruct");
335335+ assert_eq!(data.did, did);
336336+ assert_eq!(data.also_known_as, vec!["at://dholms.xyz"]);
337337+ assert_eq!(
338338+ data.services.get("atproto_pds").unwrap().endpoint,
339339+ "https://bsky.social"
340340+ );
341341+ assert_eq!(
342342+ data.verification_methods.get("atproto").unwrap(),
343343+ "did:key:zQ3shXjHeiBuRCKmM36cuYnm7YEMzhGnCmCyW92sRJ9pribSF"
344344+ );
345345+346346+ let doc = format_did_doc(&data);
347347+ assert_eq!(doc.id, did);
348348+ assert!(
349349+ doc.context
350350+ .iter()
351351+ .any(|c| c == "https://w3id.org/security/suites/secp256k1-2019/v1")
352352+ );
353353+ }
354354+355355+ #[test]
356356+ fn interop_bskyapp() {
357357+ let (did, ops) = load_fixture("log_bskyapp.json");
358358+ assert_eq!(did, "did:plc:z72i7hdynmk6r22z27h6tvur");
359359+360360+ let data = apply_op_log(&did, &ops).expect("should reconstruct");
361361+ println!("{:?}", data);
362362+ assert_eq!(data.also_known_as, vec!["at://bsky.app"]);
363363+ assert_eq!(
364364+ data.verification_methods.get("atproto").unwrap(),
365365+ "did:key:zQ3shXjHeiBuRCKmM36cuYnm7YEMzhGnCmCyW92sRJ9pribSF"
366366+ );
367367+ assert_eq!(
368368+ data.services.get("atproto_pds").unwrap().endpoint,
369369+ "https://bsky.social"
370370+ );
371371+ }
372372+373373+ #[test]
374374+ fn interop_tombstone() {
375375+ let path = "tests/fixtures/log_tombstone.json";
376376+ let data = std::fs::read_to_string(path).unwrap();
377377+ let entries: Vec<Value> = serde_json::from_str(&data).unwrap();
378378+ let did = entries[0]["did"].as_str().unwrap();
379379+ let ops: Vec<Value> = entries.iter().map(|e| e["operation"].clone()).collect();
380380+381381+ assert_eq!(did, "did:plc:6adr3q2labdllanslzhqkqd3");
382382+ let result = apply_op_log(did, &ops);
383383+ assert!(result.is_none(), "tombstoned DID should return None");
384384+ }
385385+386386+ #[test]
387387+ fn interop_nullification() {
388388+ let (did, ops) = load_fixture("log_nullification.json");
389389+ assert_eq!(did, "did:plc:2s2mvm52ttz6r4hocmrq7x27");
390390+391391+ let data = apply_op_log(&did, &ops).expect("should reconstruct");
392392+ assert_eq!(data.did, did);
393393+ assert_eq!(data.rotation_keys.len(), 2);
394394+ assert_eq!(
395395+ data.rotation_keys[0],
396396+ "did:key:zQ3shwPdax6jKMbhtzbueGwSjc7RnjsmPcNB1vQUpbKUCN1t1"
397397+ );
398398+ }
399399+}
+5-1
src/lib.rs
···11use serde::{Deserialize, Serialize};
22+23use tokio::sync::{mpsc, oneshot};
3445mod backfill;
56mod cached_value;
67mod client;
88+pub mod doc;
79mod mirror;
1010+mod plc_fjall;
811mod plc_pg;
912mod poll;
1013mod ratelimit;
···1518pub use backfill::backfill;
1619pub use cached_value::{CachedValue, Fetcher};
1720pub use client::{CLIENT, UA};
1818-pub use mirror::{ExperimentalConf, ListenConf, serve};
2121+pub use mirror::{ExperimentalConf, ListenConf, serve, serve_fjall};
2222+pub use plc_fjall::{FjallDb, backfill_to_fjall, pages_to_fjall};
1923pub use plc_pg::{Db, backfill_to_pg, pages_to_pg};
2024pub use poll::{PageBoundaryState, get_page, poll_upstream};
2125pub use ratelimit::{CreatePlcOpLimiter, GovernorMiddleware, IpLimiters};