···1415It is a superset of the reference PDS, including: passkeys and 2FA (WebAuthn/FIDO2, TOTP, backup codes, trusted devices), SSO login and signup, did:web support (PDS-hosted subdomains or bring-your-own), multi-channel communication (email, discord, telegram, signal) for verification and alerts, granular OAuth scopes with a consent UI showing human-readable descriptions, app passwords with granular permissions (read-only, post-only, or custom scopes), account delegation (letting others manage an account with configurable permission levels), automatic backups (configurable retention and frequency, one-click restore), and a built-in web UI for account management, OAuth consent, repo browsing, and admin.
1617-The PDS itself is a single small binary with no node/npm runtime. It requires postgres and stores blobs on the local filesystem. Valkey is optional (enables distributed rate limiting for multi-node setups).
1819## Quick Start
20
···1415It is a superset of the reference PDS, including: passkeys and 2FA (WebAuthn/FIDO2, TOTP, backup codes, trusted devices), SSO login and signup, did:web support (PDS-hosted subdomains or bring-your-own), multi-channel communication (email, discord, telegram, signal) for verification and alerts, granular OAuth scopes with a consent UI showing human-readable descriptions, app passwords with granular permissions (read-only, post-only, or custom scopes), account delegation (letting others manage an account with configurable permission levels), automatic backups (configurable retention and frequency, one-click restore), and a built-in web UI for account management, OAuth consent, repo browsing, and admin.
1617+The PDS itself is a single small binary with no node/npm runtime. It requires postgres. Blobs are stored on the local filesystem by default (S3 optional). Valkey is optional (supported as an alternative to the built-in cache).
1819## Quick Start
20
···12 let (user_did, _) = setup_new_user("search-target").await;
13 let mut found = false;
14 let mut cursor: Option<String> = None;
15- for _ in 0..10 {
16 let url = match &cursor {
17 Some(c) => format!(
18 "{}/xrpc/com.atproto.admin.searchAccounts?limit=100&cursor={}",
···12 let (user_did, _) = setup_new_user("search-target").await;
13 let mut found = false;
14 let mut cursor: Option<String> = None;
15+ for _ in 0..100 {
16 let url = match &cursor {
17 Some(c) => format!(
18 "{}/xrpc/com.atproto.admin.searchAccounts?limit=100&cursor={}",
+270-72
crates/tranquil-pds/tests/common/mod.rs
···9use serde_json::{Value, json};
10use sqlx::postgres::PgPoolOptions;
11use std::collections::HashMap;
012use std::path::PathBuf;
13use std::sync::{Arc, OnceLock, RwLock};
14#[allow(unused_imports)]
15use std::time::Duration;
16use tokio::net::TcpListener;
17use tokio_util::sync::CancellationToken;
018use tranquil_pds::state::AppState;
19use wiremock::matchers::{method, path};
20use wiremock::{Mock, MockServer, Request, Respond, ResponseTemplate};
···25static MOCK_PLC: OnceLock<MockServer> = OnceLock::new();
26static TEST_DB_POOL: OnceLock<sqlx::PgPool> = OnceLock::new();
27static TEST_TEMP_DIR: OnceLock<PathBuf> = OnceLock::new();
00000000000000002829#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
30use testcontainers::GenericImage;
···139 std::env::var("DATABASE_URL").expect("DATABASE_URL must be set when using external infra");
140 let plc_url = setup_mock_plc_directory().await;
141 unsafe {
142- if std::env::var("S3_ENDPOINT").is_ok() {
143- let s3_endpoint = std::env::var("S3_ENDPOINT").unwrap();
144- std::env::set_var("BLOB_STORAGE_BACKEND", "s3");
145- std::env::set_var("BACKUP_STORAGE_BACKEND", "s3");
146- std::env::set_var("BACKUP_S3_BUCKET", "test-backups");
147- std::env::set_var(
148- "S3_BUCKET",
149- std::env::var("S3_BUCKET").unwrap_or_else(|_| "test-bucket".to_string()),
150- );
151- std::env::set_var(
152- "AWS_ACCESS_KEY_ID",
153- std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "minioadmin".to_string()),
154- );
155- std::env::set_var(
156- "AWS_SECRET_ACCESS_KEY",
157- std::env::var("AWS_SECRET_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()),
158- );
159- std::env::set_var(
160- "AWS_REGION",
161- std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()),
162- );
163- std::env::set_var("S3_ENDPOINT", &s3_endpoint);
164- } else if std::env::var("BLOB_STORAGE_PATH").is_ok() {
165- std::env::set_var("BLOB_STORAGE_BACKEND", "filesystem");
166- std::env::set_var("BACKUP_STORAGE_BACKEND", "filesystem");
167- } else {
168- panic!("Either S3_ENDPOINT or BLOB_STORAGE_PATH must be set for external-infra");
169- }
170- std::env::set_var("MAX_IMPORT_SIZE", "100000000");
171- std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
172 std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
173 }
174- let mock_server = MockServer::start().await;
175- setup_mock_appview(&mock_server).await;
176- let mock_uri = mock_server.uri();
177- let mock_host = mock_uri.strip_prefix("http://").unwrap_or(&mock_uri);
178- let mock_did = format!("did:web:{}", mock_host.replace(':', "%3A"));
179- setup_mock_did_document(&mock_server, &mock_did, &mock_uri).await;
180- MOCK_APPVIEW.set(mock_server).ok();
181 spawn_app(database_url).await
182}
183···199 std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
200 std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
201 }
202- let mock_server = MockServer::start().await;
203- setup_mock_appview(&mock_server).await;
204- let mock_uri = mock_server.uri();
205- let mock_host = mock_uri.strip_prefix("http://").unwrap_or(&mock_uri);
206- let mock_did = format!("did:web:{}", mock_host.replace(':', "%3A"));
207- setup_mock_did_document(&mock_server, &mock_did, &mock_uri).await;
208- MOCK_APPVIEW.set(mock_server).ok();
209 let container = Postgres::default()
210 .with_tag("18-alpine")
211 .with_label("tranquil_pds_test", "true")
···275 .bucket("test-backups")
276 .send()
277 .await;
278- let mock_server = MockServer::start().await;
279- setup_mock_appview(&mock_server).await;
280- let mock_uri = mock_server.uri();
281- let mock_host = mock_uri.strip_prefix("http://").unwrap_or(&mock_uri);
282- let mock_did = format!("did:web:{}", mock_host.replace(':', "%3A"));
283- setup_mock_did_document(&mock_server, &mock_did, &mock_uri).await;
284- MOCK_APPVIEW.set(mock_server).ok();
285 S3_CONTAINER.set(s3_container).ok();
286 let container = Postgres::default()
287 .with_tag("18-alpine")
···324325async fn setup_mock_appview(_mock_server: &MockServer) {}
326000000000000000000000000000000000000000000000000000000327type PlcOperationStore = Arc<RwLock<HashMap<String, Value>>>;
328329struct PlcPostResponder {
···515 plc_url
516}
517518-async fn spawn_app(database_url: String) -> String {
519 use tranquil_pds::rate_limit::RateLimiters;
520- let pool = PgPoolOptions::new()
521- .max_connections(10)
522- .acquire_timeout(std::time::Duration::from_secs(30))
523- .connect(&database_url)
524- .await
525- .expect("Failed to connect to Postgres. Make sure the database is running.");
526- sqlx::migrate!("./migrations")
527- .run(&pool)
528- .await
529- .expect("Failed to run migrations");
530- let test_pool = PgPoolOptions::new()
531- .max_connections(5)
532- .acquire_timeout(std::time::Duration::from_secs(30))
533- .connect(&database_url)
534- .await
535- .expect("Failed to create test pool");
536- TEST_DB_POOL.set(test_pool).ok();
537 let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
538 let addr = listener.local_addr().unwrap();
539- APP_PORT.set(addr.port()).ok();
540 unsafe {
541 std::env::set_var("PDS_HOSTNAME", format!("pds.test:{}", addr.port()));
542 }
···547 .with_email_update_limit(10000)
548 .with_oauth_authorize_limit(10000)
549 .with_oauth_token_limit(10000);
550- let state = AppState::from_db(pool, CancellationToken::new())
0551 .await
552 .with_rate_limiters(rate_limiters);
000553 tranquil_pds::sync::listener::start_sequencer_listener(state.clone()).await;
554 let app = tranquil_pds::app(state);
555 tokio::spawn(async move {
556 axum::serve(listener, app).await.unwrap();
557 });
558- format!("http://localhost:{}", addr.port())
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000559}
560561#[allow(dead_code)]
···9use serde_json::{Value, json};
10use sqlx::postgres::PgPoolOptions;
11use std::collections::HashMap;
12+use std::net::SocketAddr;
13use std::path::PathBuf;
14use std::sync::{Arc, OnceLock, RwLock};
15#[allow(unused_imports)]
16use std::time::Duration;
17use tokio::net::TcpListener;
18use tokio_util::sync::CancellationToken;
19+use tranquil_pds::cache::{Cache, DistributedRateLimiter};
20use tranquil_pds::state::AppState;
21use wiremock::matchers::{method, path};
22use wiremock::{Mock, MockServer, Request, Respond, ResponseTemplate};
···27static MOCK_PLC: OnceLock<MockServer> = OnceLock::new();
28static TEST_DB_POOL: OnceLock<sqlx::PgPool> = OnceLock::new();
29static TEST_TEMP_DIR: OnceLock<PathBuf> = OnceLock::new();
30+static CLUSTER: OnceLock<Vec<ServerInstance>> = OnceLock::new();
31+32+#[allow(dead_code)]
33+pub struct ServerConfig {
34+ pub pool: sqlx::PgPool,
35+ pub cache: Option<(Arc<dyn Cache>, Arc<dyn DistributedRateLimiter>)>,
36+}
37+38+#[allow(dead_code)]
39+#[derive(Clone)]
40+pub struct ServerInstance {
41+ pub url: String,
42+ pub port: u16,
43+ pub cache: Option<Arc<dyn Cache>>,
44+ pub distributed_rate_limiter: Option<Arc<dyn DistributedRateLimiter>>,
45+}
4647#[cfg(all(not(feature = "external-infra"), feature = "s3-storage"))]
48use testcontainers::GenericImage;
···157 std::env::var("DATABASE_URL").expect("DATABASE_URL must be set when using external infra");
158 let plc_url = setup_mock_plc_directory().await;
159 unsafe {
160+ configure_external_storage_env();
00000000000000000000000000000161 std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
162 }
163+ register_mock_appview().await;
000000164 spawn_app(database_url).await
165}
166···182 std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
183 std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
184 }
185+ register_mock_appview().await;
000000186 let container = Postgres::default()
187 .with_tag("18-alpine")
188 .with_label("tranquil_pds_test", "true")
···252 .bucket("test-backups")
253 .send()
254 .await;
255+ register_mock_appview().await;
000000256 S3_CONTAINER.set(s3_container).ok();
257 let container = Postgres::default()
258 .with_tag("18-alpine")
···295296async fn setup_mock_appview(_mock_server: &MockServer) {}
297298+async fn register_mock_appview() {
299+ let mock_server = MockServer::start().await;
300+ setup_mock_appview(&mock_server).await;
301+ let mock_uri = mock_server.uri();
302+ let mock_host = mock_uri.strip_prefix("http://").unwrap_or(&mock_uri);
303+ let mock_did = format!("did:web:{}", mock_host.replace(':', "%3A"));
304+ setup_mock_did_document(&mock_server, &mock_did, &mock_uri).await;
305+ MOCK_APPVIEW.set(mock_server).ok();
306+}
307+308+unsafe fn configure_external_storage_env() {
309+ unsafe {
310+ if std::env::var("S3_ENDPOINT").is_ok() {
311+ let s3_endpoint = std::env::var("S3_ENDPOINT").unwrap();
312+ std::env::set_var("BLOB_STORAGE_BACKEND", "s3");
313+ std::env::set_var("BACKUP_STORAGE_BACKEND", "s3");
314+ std::env::set_var("BACKUP_S3_BUCKET", "test-backups");
315+ std::env::set_var(
316+ "S3_BUCKET",
317+ std::env::var("S3_BUCKET").unwrap_or_else(|_| "test-bucket".to_string()),
318+ );
319+ std::env::set_var(
320+ "AWS_ACCESS_KEY_ID",
321+ std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "minioadmin".to_string()),
322+ );
323+ std::env::set_var(
324+ "AWS_SECRET_ACCESS_KEY",
325+ std::env::var("AWS_SECRET_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()),
326+ );
327+ std::env::set_var(
328+ "AWS_REGION",
329+ std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()),
330+ );
331+ std::env::set_var("S3_ENDPOINT", &s3_endpoint);
332+ } else {
333+ let process_dir = std::env::temp_dir().join(format!(
334+ "tranquil-pds-test-{}",
335+ std::process::id()
336+ ));
337+ let blob_path = process_dir.join("blobs");
338+ let backup_path = process_dir.join("backups");
339+ std::fs::create_dir_all(&blob_path).expect("Failed to create blob directory");
340+ std::fs::create_dir_all(&backup_path).expect("Failed to create backup directory");
341+ TEST_TEMP_DIR.set(process_dir).ok();
342+ std::env::set_var("BLOB_STORAGE_BACKEND", "filesystem");
343+ std::env::set_var("BLOB_STORAGE_PATH", blob_path.to_str().unwrap());
344+ std::env::set_var("BACKUP_STORAGE_BACKEND", "filesystem");
345+ std::env::set_var("BACKUP_STORAGE_PATH", backup_path.to_str().unwrap());
346+ }
347+ std::env::set_var("MAX_IMPORT_SIZE", "100000000");
348+ std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
349+ }
350+}
351+352type PlcOperationStore = Arc<RwLock<HashMap<String, Value>>>;
353354struct PlcPostResponder {
···540 plc_url
541}
542543+async fn spawn_server(config: ServerConfig) -> ServerInstance {
544 use tranquil_pds::rate_limit::RateLimiters;
00000000000000000545 let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
546 let addr = listener.local_addr().unwrap();
0547 unsafe {
548 std::env::set_var("PDS_HOSTNAME", format!("pds.test:{}", addr.port()));
549 }
···554 .with_email_update_limit(10000)
555 .with_oauth_authorize_limit(10000)
556 .with_oauth_token_limit(10000);
557+ let cache_refs = config.cache.as_ref().map(|(c, r)| (c.clone(), r.clone()));
558+ let mut state = AppState::from_db(config.pool, CancellationToken::new())
559 .await
560 .with_rate_limiters(rate_limiters);
561+ if let Some((cache, distributed_rate_limiter)) = config.cache {
562+ state = state.with_cache(cache, distributed_rate_limiter);
563+ }
564 tranquil_pds::sync::listener::start_sequencer_listener(state.clone()).await;
565 let app = tranquil_pds::app(state);
566 tokio::spawn(async move {
567 axum::serve(listener, app).await.unwrap();
568 });
569+ let (cache, distributed_rate_limiter) = cache_refs
570+ .map(|(c, r)| (Some(c), Some(r)))
571+ .unwrap_or((None, None));
572+ ServerInstance {
573+ url: format!("http://localhost:{}", addr.port()),
574+ port: addr.port(),
575+ cache,
576+ distributed_rate_limiter,
577+ }
578+}
579+580+async fn spawn_app(database_url: String) -> String {
581+ let pool = PgPoolOptions::new()
582+ .max_connections(10)
583+ .acquire_timeout(std::time::Duration::from_secs(30))
584+ .connect(&database_url)
585+ .await
586+ .expect("Failed to connect to Postgres. Make sure the database is running.");
587+ sqlx::migrate!("./migrations")
588+ .run(&pool)
589+ .await
590+ .expect("Failed to run migrations");
591+ let test_pool = PgPoolOptions::new()
592+ .max_connections(2)
593+ .acquire_timeout(std::time::Duration::from_secs(30))
594+ .connect(&database_url)
595+ .await
596+ .expect("Failed to create test pool");
597+ TEST_DB_POOL.set(test_pool).ok();
598+ let instance = spawn_server(ServerConfig { pool, cache: None }).await;
599+ APP_PORT.set(instance.port).ok();
600+ instance.url
601+}
602+603+#[allow(dead_code)]
604+pub async fn spawn_cluster(database_url: String, node_count: usize) -> Vec<ServerInstance> {
605+ use tranquil_ripple::{RippleConfig, RippleEngine};
606+607+ let pool = PgPoolOptions::new()
608+ .max_connections(10)
609+ .acquire_timeout(std::time::Duration::from_secs(30))
610+ .connect(&database_url)
611+ .await
612+ .expect("Failed to connect to Postgres for cluster");
613+ sqlx::migrate!("./migrations")
614+ .run(&pool)
615+ .await
616+ .expect("Failed to run migrations for cluster");
617+ let test_pool = PgPoolOptions::new()
618+ .max_connections(2)
619+ .acquire_timeout(std::time::Duration::from_secs(30))
620+ .connect(&database_url)
621+ .await
622+ .expect("Failed to create test pool for cluster");
623+ TEST_DB_POOL.set(test_pool).ok();
624+625+ let shutdown = CancellationToken::new();
626+627+ let mut ripple_nodes: Vec<(Arc<dyn Cache>, Arc<dyn DistributedRateLimiter>)> =
628+ Vec::with_capacity(node_count);
629+ let mut bound_addrs: Vec<SocketAddr> = Vec::with_capacity(node_count);
630+631+ for i in 0..node_count {
632+ let config = RippleConfig {
633+ bind_addr: "127.0.0.1:0".parse().unwrap(),
634+ seed_peers: bound_addrs.clone(),
635+ machine_id: i as u64 + 1,
636+ gossip_interval_ms: 100,
637+ cache_max_bytes: 64 * 1024 * 1024,
638+ };
639+ let (cache, rate_limiter, addr) = RippleEngine::start(config, shutdown.clone())
640+ .await
641+ .expect("failed to start ripple node");
642+ bound_addrs.push(addr);
643+ ripple_nodes.push((cache, rate_limiter));
644+ }
645+646+ let mut instances: Vec<ServerInstance> = Vec::with_capacity(node_count);
647+ for (cache, rate_limiter) in ripple_nodes {
648+ let server_config = ServerConfig {
649+ pool: pool.clone(),
650+ cache: Some((cache, rate_limiter)),
651+ };
652+ let instance = spawn_server(server_config).await;
653+ instances.push(instance);
654+ }
655+656+ let first = &instances[0];
657+ APP_PORT.set(first.port).ok();
658+659+ tokio::time::sleep(Duration::from_millis(2000)).await;
660+661+ instances
662+}
663+664+#[allow(dead_code)]
665+pub async fn cluster() -> &'static [ServerInstance] {
666+ CLUSTER.get_or_init(|| {
667+ let (tx, rx) = std::sync::mpsc::channel();
668+ std::thread::spawn(move || {
669+ unsafe {
670+ std::env::set_var("TRANQUIL_PDS_ALLOW_INSECURE_SECRETS", "1");
671+ }
672+ if std::env::var("DOCKER_HOST").is_err()
673+ && let Ok(runtime_dir) = std::env::var("XDG_RUNTIME_DIR")
674+ {
675+ let podman_sock = std::path::Path::new(&runtime_dir).join("podman/podman.sock");
676+ if podman_sock.exists() {
677+ unsafe {
678+ std::env::set_var(
679+ "DOCKER_HOST",
680+ format!("unix://{}", podman_sock.display()),
681+ );
682+ }
683+ }
684+ }
685+ let rt = tokio::runtime::Runtime::new().unwrap();
686+ rt.block_on(async move {
687+ unsafe {
688+ std::env::remove_var("DISABLE_RATE_LIMITING");
689+ }
690+ let database_url = if has_external_infra() {
691+ setup_cluster_external_infra().await
692+ } else {
693+ setup_cluster_testcontainers().await
694+ };
695+ let nodes = spawn_cluster(database_url, 3).await;
696+ tx.send(nodes).unwrap();
697+ std::future::pending::<()>().await;
698+ });
699+ });
700+ rx.recv().expect("Failed to start test cluster")
701+ })
702+}
703+704+async fn setup_cluster_external_infra() -> String {
705+ let database_url =
706+ std::env::var("DATABASE_URL").expect("DATABASE_URL must be set when using external infra");
707+ let plc_url = setup_mock_plc_directory().await;
708+ unsafe {
709+ configure_external_storage_env();
710+ std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
711+ }
712+ register_mock_appview().await;
713+ database_url
714+}
715+716+#[cfg(not(feature = "external-infra"))]
717+async fn setup_cluster_testcontainers() -> String {
718+ let temp_dir = std::env::temp_dir().join(format!("tranquil-pds-cluster-{}", uuid::Uuid::new_v4()));
719+ let blob_path = temp_dir.join("blobs");
720+ let backup_path = temp_dir.join("backups");
721+ std::fs::create_dir_all(&blob_path).expect("Failed to create blob temp directory");
722+ std::fs::create_dir_all(&backup_path).expect("Failed to create backup temp directory");
723+ TEST_TEMP_DIR.set(temp_dir).ok();
724+ let plc_url = setup_mock_plc_directory().await;
725+ unsafe {
726+ std::env::set_var("BLOB_STORAGE_BACKEND", "filesystem");
727+ std::env::set_var("BLOB_STORAGE_PATH", blob_path.to_str().unwrap());
728+ std::env::set_var("BACKUP_STORAGE_BACKEND", "filesystem");
729+ std::env::set_var("BACKUP_STORAGE_PATH", backup_path.to_str().unwrap());
730+ std::env::set_var("MAX_IMPORT_SIZE", "100000000");
731+ std::env::set_var("SKIP_IMPORT_VERIFICATION", "true");
732+ std::env::set_var("PLC_DIRECTORY_URL", &plc_url);
733+ }
734+ register_mock_appview().await;
735+ let container = Postgres::default()
736+ .with_tag("18-alpine")
737+ .with_label("tranquil_pds_test", "true")
738+ .start()
739+ .await
740+ .expect("Failed to start Postgres for cluster");
741+ let connection_string = format!(
742+ "postgres://postgres:postgres@127.0.0.1:{}",
743+ container
744+ .get_host_port_ipv4(5432)
745+ .await
746+ .expect("Failed to get port")
747+ );
748+ DB_CONTAINER.set(container).ok();
749+ connection_string
750+}
751+752+#[cfg(feature = "external-infra")]
753+async fn setup_cluster_testcontainers() -> String {
754+ panic!(
755+ "Testcontainers disabled with external-infra feature. Set DATABASE_URL and BLOB_STORAGE_PATH (or S3_ENDPOINT)."
756+ );
757}
758759#[allow(dead_code)]
+6-1
crates/tranquil-pds/tests/firehose_validation.rs
···800801 tokio::time::sleep(std::time::Duration::from_millis(100)).await;
802803- let outdated_cursor = 1i64;
00000804 let url = format!(
805 "ws://127.0.0.1:{}/xrpc/com.atproto.sync.subscribeRepos?cursor={}",
806 app_port(),
···800801 tokio::time::sleep(std::time::Duration::from_millis(100)).await;
802803+ let pool = get_test_db_pool().await;
804+ let max_seq: i64 = sqlx::query_scalar::<_, i64>("SELECT COALESCE(MAX(seq), 0) FROM repo_seq")
805+ .fetch_one(pool)
806+ .await
807+ .unwrap();
808+ let outdated_cursor = (max_seq - 100).max(1);
809 let url = format!(
810 "ws://127.0.0.1:{}/xrpc/com.atproto.sync.subscribeRepos?cursor={}",
811 app_port(),
+4-2
crates/tranquil-pds/tests/repo_blob.rs
···25async fn test_upload_blob_success() {
26 let client = client();
27 let (token, _) = create_account_and_login(&client).await;
028 let res = client
29 .post(format!(
30 "{}/xrpc/com.atproto.repo.uploadBlob",
···32 ))
33 .header(header::CONTENT_TYPE, "text/plain")
34 .bearer_auth(token)
35- .body("This is our blob data")
36 .send()
37 .await
38 .expect("Failed to send request");
39- assert_eq!(res.status(), StatusCode::OK);
40 let body: Value = res.json().await.expect("Response was not valid JSON");
041 assert!(body["blob"]["ref"]["$link"].as_str().is_some());
42}
43
···25async fn test_upload_blob_success() {
26 let client = client();
27 let (token, _) = create_account_and_login(&client).await;
28+ let blob_data = format!("blob-{}", uuid::Uuid::new_v4());
29 let res = client
30 .post(format!(
31 "{}/xrpc/com.atproto.repo.uploadBlob",
···33 ))
34 .header(header::CONTENT_TYPE, "text/plain")
35 .bearer_auth(token)
36+ .body(blob_data)
37 .send()
38 .await
39 .expect("Failed to send request");
40+ let status = res.status();
41 let body: Value = res.json().await.expect("Response was not valid JSON");
42+ assert_eq!(status, StatusCode::OK, "uploadBlob failed: {body}");
43 assert!(body["blob"]["ref"]["$link"].as_str().is_some());
44}
45
···1+pub mod cache;
2+pub mod config;
3+pub mod crdt;
4+pub mod engine;
5+pub mod eviction;
6+pub mod gossip;
7+pub mod rate_limiter;
8+pub mod transport;
9+10+pub use config::RippleConfig;
11+pub use engine::{RippleEngine, RippleStartError};
···4344## Standalone Containers (No Compose)
4546-If you already have postgres and valkey running on the host (eg., from the [Debian install guide](install-debian.md)), you can run just the app containers.
4748Build the images:
49```sh
···51podman build -t tranquil-pds-frontend:latest ./frontend
52```
5354-Run the backend with host networking (so it can access postgres/valkey on localhost) and mount the blob storage:
55```sh
56podman run -d --name tranquil-pds \
57 --network=host \
···106107```bash
108mkdir -p /etc/containers/systemd
109-mkdir -p /srv/tranquil-pds/{postgres,valkey,blobs,backups,certs,acme,config}
110```
111112## Create Environment File
···127128Copy the quadlet files from the repository:
129```bash
130-cp /opt/tranquil-pds/deploy/quadlets/*.pod /etc/containers/systemd/
131-cp /opt/tranquil-pds/deploy/quadlets/*.container /etc/containers/systemd/
000132```
13300134Note: Systemd doesn't support shell-style variable expansion in `Environment=` lines. The quadlet files expect DATABASE_URL to be set in the environment file.
135136## Create nginx Configuration
···160161```bash
162systemctl daemon-reload
163-systemctl start tranquil-pds-db tranquil-pds-valkey
164sleep 10
165```
166···172173## Obtain Wildcard SSL Certificate
174175-User handles are served as subdomains (eg., `alice.pds.example.com`), so you need a wildcard certificate. Wildcard certs require DNS-01 validation.
176177Create temporary self-signed cert to start services:
178```bash
···195196Follow the prompts to add TXT records to your DNS. Note: manual mode doesn't auto-renew.
197198-For automated renewal, use a DNS provider plugin (eg., cloudflare, route53).
199200Link certificates and restart:
201```bash
···207## Enable All Services
208209```bash
210-systemctl enable tranquil-pds-db tranquil-pds-valkey tranquil-pds-app tranquil-pds-frontend tranquil-pds-nginx
211```
212213## Configure Firewall
···252253```sh
254mkdir -p /srv/tranquil-pds/{data,config}
255-mkdir -p /srv/tranquil-pds/data/{postgres,valkey,blobs,backups,certs,acme}
256```
257258## Clone Repository and Build Images
···346347## Obtain Wildcard SSL Certificate
348349-User handles are served as subdomains (eg., `alice.pds.example.com`), so you need a wildcard certificate. Wildcard certs require DNS-01 validation.
350351Create temporary self-signed cert to start services:
352```sh
···4344## Standalone Containers (No Compose)
4546+If you already have postgres running on the host (eg. from the [Debian install guide](install-debian.md)), you can run just the app containers.
4748Build the images:
49```sh
···51podman build -t tranquil-pds-frontend:latest ./frontend
52```
5354+Run the backend with host networking (so it can access postgres on localhost) and mount the blob storage:
55```sh
56podman run -d --name tranquil-pds \
57 --network=host \
···106107```bash
108mkdir -p /etc/containers/systemd
109+mkdir -p /srv/tranquil-pds/{postgres,blobs,backups,certs,acme,config}
110```
111112## Create Environment File
···127128Copy the quadlet files from the repository:
129```bash
130+cp /opt/tranquil-pds/deploy/quadlets/tranquil-pds.pod /etc/containers/systemd/
131+cp /opt/tranquil-pds/deploy/quadlets/tranquil-pds-db.container /etc/containers/systemd/
132+cp /opt/tranquil-pds/deploy/quadlets/tranquil-pds-app.container /etc/containers/systemd/
133+cp /opt/tranquil-pds/deploy/quadlets/tranquil-pds-frontend.container /etc/containers/systemd/
134+cp /opt/tranquil-pds/deploy/quadlets/tranquil-pds-nginx.container /etc/containers/systemd/
135```
136137+Optional quadlets for valkey and minio are also available in `deploy/quadlets/` if you need them.
138+139Note: Systemd doesn't support shell-style variable expansion in `Environment=` lines. The quadlet files expect DATABASE_URL to be set in the environment file.
140141## Create nginx Configuration
···165166```bash
167systemctl daemon-reload
168+systemctl start tranquil-pds-db
169sleep 10
170```
171···177178## Obtain Wildcard SSL Certificate
179180+User handles are served as subdomains (eg. `alice.pds.example.com`), so you need a wildcard certificate. Wildcard certs require DNS-01 validation.
181182Create temporary self-signed cert to start services:
183```bash
···200201Follow the prompts to add TXT records to your DNS. Note: manual mode doesn't auto-renew.
202203+For automated renewal, use a DNS provider plugin (eg. cloudflare, route53).
204205Link certificates and restart:
206```bash
···212## Enable All Services
213214```bash
215+systemctl enable tranquil-pds-db tranquil-pds-app tranquil-pds-frontend tranquil-pds-nginx
216```
217218## Configure Firewall
···257258```sh
259mkdir -p /srv/tranquil-pds/{data,config}
260+mkdir -p /srv/tranquil-pds/data/{postgres,blobs,backups,certs,acme}
261```
262263## Clone Repository and Build Images
···351352## Obtain Wildcard SSL Certificate
353354+User handles are served as subdomains (eg. `alice.pds.example.com`), so you need a wildcard certificate. Wildcard certs require DNS-01 validation.
355356Create temporary self-signed cert to start services:
357```sh
-8
docs/install-debian.md
···4647We'll set ownership after creating the service user.
4849-## Install valkey
50-51-```bash
52-apt install -y valkey
53-systemctl enable valkey-server
54-systemctl start valkey-server
55-```
56-57## Install deno (for frontend build)
5859```bash
···4647We'll set ownership after creating the service user.
480000000049## Install deno (for frontend build)
5051```bash
-3
docs/install-kubernetes.md
···3If you're reaching for kubernetes for this app, you're experienced enough to know how to spin up:
45- cloudnativepg (or your preferred postgres operator)
6-- valkey
7- a PersistentVolume for blob storage
8- the app itself (it's just a container with some env vars)
9···13- `DATABASE_URL` - postgres connection string
14- `BLOB_STORAGE_PATH` - path to blob storage (mount a PV here)
15- `BACKUP_STORAGE_PATH` - path for repo backups (optional but recommended)
16-- `VALKEY_URL` - redis:// connection string
17- `PDS_HOSTNAME` - your PDS hostname (without protocol)
18- `JWT_SECRET`, `DPOP_SECRET`, `MASTER_KEY` - generate with `openssl rand -base64 48`
19- `CRAWLERS` - typically `https://bsky.network`
···41 </body>
42 </html>
43```
44-
···3If you're reaching for kubernetes for this app, you're experienced enough to know how to spin up:
45- cloudnativepg (or your preferred postgres operator)
06- a PersistentVolume for blob storage
7- the app itself (it's just a container with some env vars)
8···12- `DATABASE_URL` - postgres connection string
13- `BLOB_STORAGE_PATH` - path to blob storage (mount a PV here)
14- `BACKUP_STORAGE_PATH` - path for repo backups (optional but recommended)
015- `PDS_HOSTNAME` - your PDS hostname (without protocol)
16- `JWT_SECRET`, `DPOP_SECRET`, `MASTER_KEY` - generate with `openssl rand -base64 48`
17- `CRAWLERS` - typically `https://bsky.network`
···39 </body>
40 </html>
41```
0