···1-// https://stackoverflow.com/questions/73077972/how-to-deploy-app-service-with-managed-ssl-certificate-using-arm
2-//
3-// TLDR: Azure requires a circular dependency in order to define an app service with a custom domain with SSL enabled.
4-// Terrific user experience. Really makes me love using Azure in my free time.
5-param webAppName string
6-param location string
7-param appServicePlanResourceId string
8-param customHostnames array
9-10-// Managed certificates can only be created once the hostname is added to the web app.
11-resource certificates 'Microsoft.Web/certificates@2022-03-01' = [for (fqdn, i) in customHostnames: {
12- name: '${fqdn}-${webAppName}'
13- location: location
14- properties: {
15- serverFarmId: appServicePlanResourceId
16- canonicalName: fqdn
17- }
18-}]
19-20-// sslState and thumbprint can only be set once the managed certificate is created
21-@batchSize(1)
22-resource customHostname 'Microsoft.web/sites/hostnameBindings@2019-08-01' = [for (fqdn, i) in customHostnames: {
23- name: '${webAppName}/${fqdn}'
24- properties: {
25- siteName: webAppName
26- hostNameType: 'Verified'
27- sslState: 'SniEnabled'
28- thumbprint: certificates[i].properties.thumbprint
29- }
30-}]
···2//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3//!
4//! Modified for SQLite backend
005use anyhow::Result;
6use chrono::DateTime;
7use chrono::offset::Utc as UtcOffset;
···15 AccountStatus, ActorAccount, ActorJoinAccount, AvailabilityFlags, FormattedAccountStatus,
16 GetAccountAdminStatusOutput, format_account_status,
17};
18-use rsky_pds::schema::pds::account::dsl as AccountSchema;
19-use rsky_pds::schema::pds::actor::dsl as ActorSchema;
20use std::ops::Add;
21use std::time::SystemTime;
22use thiserror::Error;
···253 deadpool_diesel::sqlite::Object,
254 >,
255) -> Result<()> {
256- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
257- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
258- use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
259260 let did = did.to_owned();
261 _ = db
···410 deadpool_diesel::sqlite::Object,
411 >,
412) -> Result<()> {
413- use rsky_pds::schema::pds::actor;
414415 let actor2 = diesel::alias!(actor as actor2);
416
···2//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3//!
4//! Modified for SQLite backend
5+use crate::schema::pds::account::dsl as AccountSchema;
6+use crate::schema::pds::actor::dsl as ActorSchema;
7use anyhow::Result;
8use chrono::DateTime;
9use chrono::offset::Utc as UtcOffset;
···17 AccountStatus, ActorAccount, ActorJoinAccount, AvailabilityFlags, FormattedAccountStatus,
18 GetAccountAdminStatusOutput, format_account_status,
19};
0020use std::ops::Add;
21use std::time::SystemTime;
22use thiserror::Error;
···253 deadpool_diesel::sqlite::Object,
254 >,
255) -> Result<()> {
256+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
257+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
258+ use crate::schema::pds::repo_root::dsl as RepoRootSchema;
259260 let did = did.to_owned();
261 _ = db
···410 deadpool_diesel::sqlite::Object,
411 >,
412) -> Result<()> {
413+ use crate::schema::pds::actor;
414415 let actor2 = diesel::alias!(actor as actor2);
416
+7-7
src/account_manager/helpers/auth.rs
···22 deadpool_diesel::sqlite::Object,
23 >,
24) -> Result<()> {
25- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
2627 let exp = from_micros_to_utc((payload.exp.as_millis() / 1000) as i64);
28···53 deadpool_diesel::sqlite::Object,
54 >,
55) -> Result<bool> {
56- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
57 db.get()
58 .await?
59 .interact(move |conn| {
···74 deadpool_diesel::sqlite::Object,
75 >,
76) -> Result<bool> {
77- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
78 let did = did.to_owned();
79 db.get()
80 .await?
···97 deadpool_diesel::sqlite::Object,
98 >,
99) -> Result<bool> {
100- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
101102 let did = did.to_owned();
103 let app_pass_name = app_pass_name.to_owned();
···122 deadpool_diesel::sqlite::Object,
123 >,
124) -> Result<Option<models::RefreshToken>> {
125- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
126 let id = id.to_owned();
127 db.get()
128 .await?
···144 deadpool_diesel::sqlite::Object,
145 >,
146) -> Result<()> {
147- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
148 let did = did.to_owned();
149150 db.get()
···175 expires_at,
176 next_id,
177 } = opts;
178- use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
179180 drop(
181 update(RefreshTokenSchema::refresh_token)
···22 deadpool_diesel::sqlite::Object,
23 >,
24) -> Result<()> {
25+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
2627 let exp = from_micros_to_utc((payload.exp.as_millis() / 1000) as i64);
28···53 deadpool_diesel::sqlite::Object,
54 >,
55) -> Result<bool> {
56+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
57 db.get()
58 .await?
59 .interact(move |conn| {
···74 deadpool_diesel::sqlite::Object,
75 >,
76) -> Result<bool> {
77+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
78 let did = did.to_owned();
79 db.get()
80 .await?
···97 deadpool_diesel::sqlite::Object,
98 >,
99) -> Result<bool> {
100+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
101102 let did = did.to_owned();
103 let app_pass_name = app_pass_name.to_owned();
···122 deadpool_diesel::sqlite::Object,
123 >,
124) -> Result<Option<models::RefreshToken>> {
125+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
126 let id = id.to_owned();
127 db.get()
128 .await?
···144 deadpool_diesel::sqlite::Object,
145 >,
146) -> Result<()> {
147+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
148 let did = did.to_owned();
149150 db.get()
···175 expires_at,
176 next_id,
177 } = opts;
178+ use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
179180 drop(
181 update(RefreshTokenSchema::refresh_token)
+5-5
src/account_manager/helpers/email_token.rs
···17 deadpool_diesel::sqlite::Object,
18 >,
19) -> Result<String> {
20- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
21 let token = get_random_token().to_uppercase();
22 let now = rsky_common::now();
23···56 >,
57) -> Result<()> {
58 let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
59- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
6061 let did = did.to_owned();
62 let token = token.to_owned();
···96 >,
97) -> Result<String> {
98 let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
99- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
100101 let token = token.to_owned();
102 let res = db
···210 deadpool_diesel::sqlite::Object,
211 >,
212) -> Result<()> {
213- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
214 let did = did.to_owned();
215 _ = db
216 .get()
···233 deadpool_diesel::sqlite::Object,
234 >,
235) -> Result<()> {
236- use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
237238 let did = did.to_owned();
239 _ = db
···17 deadpool_diesel::sqlite::Object,
18 >,
19) -> Result<String> {
20+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
21 let token = get_random_token().to_uppercase();
22 let now = rsky_common::now();
23···56 >,
57) -> Result<()> {
58 let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
59+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
6061 let did = did.to_owned();
62 let token = token.to_owned();
···96 >,
97) -> Result<String> {
98 let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
99+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
100101 let token = token.to_owned();
102 let res = db
···210 deadpool_diesel::sqlite::Object,
211 >,
212) -> Result<()> {
213+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
214 let did = did.to_owned();
215 _ = db
216 .get()
···233 deadpool_diesel::sqlite::Object,
234 >,
235) -> Result<()> {
236+ use crate::schema::pds::email_token::dsl as EmailTokenSchema;
237238 let did = did.to_owned();
239 _ = db
+12-12
src/account_manager/helpers/invite.rs
···23 deadpool_diesel::sqlite::Object,
24 >,
25) -> Result<()> {
26- use rsky_pds::schema::pds::actor::dsl as ActorSchema;
27- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
28- use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
2930 db.get().await?.interact(move |conn| {
31 let invite: Option<models::InviteCode> = InviteCodeSchema::invite_code
···72 >,
73) -> Result<()> {
74 if let Some(invite_code) = invite_code {
75- use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
7677 _ = db
78 .get()
···100 deadpool_diesel::sqlite::Object,
101 >,
102) -> Result<()> {
103- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
104 let created_at = rsky_common::now();
105106 _ = db
···144 deadpool_diesel::sqlite::Object,
145 >,
146) -> Result<Vec<CodeDetail>> {
147- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
148149 let for_account = for_account.to_owned();
150 let rows = db
···201 deadpool_diesel::sqlite::Object,
202 >,
203) -> Result<Vec<CodeDetail>> {
204- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
205206 let did = did.to_owned();
207 let res: Vec<models::InviteCode> = db
···239 deadpool_diesel::sqlite::Object,
240 >,
241) -> Result<BTreeMap<String, Vec<CodeUse>>> {
242- use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
243244 let mut uses: BTreeMap<String, Vec<CodeUse>> = BTreeMap::new();
245 if !codes.is_empty() {
···282 if dids.is_empty() {
283 return Ok(BTreeMap::new());
284 }
285- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
286- use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
287288 let dids = dids.clone();
289 let res: Vec<models::InviteCode> = db
···339 deadpool_diesel::sqlite::Object,
340 >,
341) -> Result<()> {
342- use rsky_pds::schema::pds::account::dsl as AccountSchema;
343344 let disabled: i16 = if disabled { 1 } else { 0 };
345 let did = did.to_owned();
···364 deadpool_diesel::sqlite::Object,
365 >,
366) -> Result<()> {
367- use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
368369 let DisableInviteCodesOpts { codes, accounts } = opts;
370 if !codes.is_empty() {
···23 deadpool_diesel::sqlite::Object,
24 >,
25) -> Result<()> {
26+ use crate::schema::pds::actor::dsl as ActorSchema;
27+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
28+ use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
2930 db.get().await?.interact(move |conn| {
31 let invite: Option<models::InviteCode> = InviteCodeSchema::invite_code
···72 >,
73) -> Result<()> {
74 if let Some(invite_code) = invite_code {
75+ use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
7677 _ = db
78 .get()
···100 deadpool_diesel::sqlite::Object,
101 >,
102) -> Result<()> {
103+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
104 let created_at = rsky_common::now();
105106 _ = db
···144 deadpool_diesel::sqlite::Object,
145 >,
146) -> Result<Vec<CodeDetail>> {
147+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
148149 let for_account = for_account.to_owned();
150 let rows = db
···201 deadpool_diesel::sqlite::Object,
202 >,
203) -> Result<Vec<CodeDetail>> {
204+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
205206 let did = did.to_owned();
207 let res: Vec<models::InviteCode> = db
···239 deadpool_diesel::sqlite::Object,
240 >,
241) -> Result<BTreeMap<String, Vec<CodeUse>>> {
242+ use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
243244 let mut uses: BTreeMap<String, Vec<CodeUse>> = BTreeMap::new();
245 if !codes.is_empty() {
···282 if dids.is_empty() {
283 return Ok(BTreeMap::new());
284 }
285+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
286+ use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
287288 let dids = dids.clone();
289 let res: Vec<models::InviteCode> = db
···339 deadpool_diesel::sqlite::Object,
340 >,
341) -> Result<()> {
342+ use crate::schema::pds::account::dsl as AccountSchema;
343344 let disabled: i16 = if disabled { 1 } else { 0 };
345 let did = did.to_owned();
···364 deadpool_diesel::sqlite::Object,
365 >,
366) -> Result<()> {
367+ use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
368369 let DisableInviteCodesOpts { codes, accounts } = opts;
370 if !codes.is_empty() {
+6-6
src/account_manager/helpers/password.rs
···21 deadpool_diesel::sqlite::Object,
22 >,
23) -> Result<bool> {
24- use rsky_pds::schema::pds::account::dsl as AccountSchema;
2526 let did = did.to_owned();
27 let found = db
···51 deadpool_diesel::sqlite::Object,
52 >,
53) -> Result<Option<String>> {
54- use rsky_pds::schema::pds::app_password::dsl as AppPasswordSchema;
5556 let did = did.to_owned();
57 let password = password.to_owned();
···91 let password = chunks.join("-");
92 let password_encrypted = hash_app_password(&did, &password).await?;
9394- use rsky_pds::schema::pds::app_password::dsl as AppPasswordSchema;
9596 let created_at = now();
97···129 deadpool_diesel::sqlite::Object,
130 >,
131) -> Result<Vec<(String, String)>> {
132- use rsky_pds::schema::pds::app_password::dsl as AppPasswordSchema;
133134 let did = did.to_owned();
135 db.get()
···151 deadpool_diesel::sqlite::Object,
152 >,
153) -> Result<()> {
154- use rsky_pds::schema::pds::account::dsl as AccountSchema;
155156 db.get()
157 .await?
···174 deadpool_diesel::sqlite::Object,
175 >,
176) -> Result<()> {
177- use rsky_pds::schema::pds::app_password::dsl as AppPasswordSchema;
178179 let did = did.to_owned();
180 let name = name.to_owned();
···21 deadpool_diesel::sqlite::Object,
22 >,
23) -> Result<bool> {
24+ use crate::schema::pds::account::dsl as AccountSchema;
2526 let did = did.to_owned();
27 let found = db
···51 deadpool_diesel::sqlite::Object,
52 >,
53) -> Result<Option<String>> {
54+ use crate::schema::pds::app_password::dsl as AppPasswordSchema;
5556 let did = did.to_owned();
57 let password = password.to_owned();
···91 let password = chunks.join("-");
92 let password_encrypted = hash_app_password(&did, &password).await?;
9394+ use crate::schema::pds::app_password::dsl as AppPasswordSchema;
9596 let created_at = now();
97···129 deadpool_diesel::sqlite::Object,
130 >,
131) -> Result<Vec<(String, String)>> {
132+ use crate::schema::pds::app_password::dsl as AppPasswordSchema;
133134 let did = did.to_owned();
135 db.get()
···151 deadpool_diesel::sqlite::Object,
152 >,
153) -> Result<()> {
154+ use crate::schema::pds::account::dsl as AccountSchema;
155156 db.get()
157 .await?
···174 deadpool_diesel::sqlite::Object,
175 >,
176) -> Result<()> {
177+ use crate::schema::pds::app_password::dsl as AppPasswordSchema;
178179 let did = did.to_owned();
180 let name = name.to_owned();
+1-1
src/account_manager/helpers/repo.rs
···16 >,
17) -> Result<()> {
18 // @TODO balance risk of a race in the case of a long retry
19- use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
2021 let now = rsky_common::now();
22
···16 >,
17) -> Result<()> {
18 // @TODO balance risk of a race in the case of a long retry
19+ use crate::schema::pds::repo_root::dsl as RepoRootSchema;
2021 let now = rsky_common::now();
22
+15-15
src/actor_store/blob.rs
···6768 /// Get metadata for a blob by CID
69 pub async fn get_blob_metadata(&self, cid: Cid) -> Result<GetBlobMetadataOutput> {
70- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
7172 let did = self.did.clone();
73 let found = self
···112113 /// Get all records that reference a specific blob
114 pub async fn get_records_for_blob(&self, cid: Cid) -> Result<Vec<String>> {
115- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
116117 let did = self.did.clone();
118 let res = self
···169170 /// Track a blob that hasn't been associated with any records yet
171 pub async fn track_untethered_blob(&self, metadata: BlobMetadata) -> Result<BlobRef> {
172- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
173174 let did = self.did.clone();
175 self.db.get().await?.interact(move |conn| {
···254255 /// Delete blobs that are no longer referenced by any records
256 pub async fn delete_dereferenced_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
257- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
258- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
259260 // Extract URIs
261 let uris: Vec<String> = writes
···386387 /// Verify a blob and make it permanent
388 pub async fn verify_blob_and_make_permanent(&self, blob: PreparedBlobRef) -> Result<()> {
389- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
390391 let found = self
392 .db
···433434 /// Associate a blob with a record
435 pub async fn associate_blob(&self, blob: PreparedBlobRef, record_uri: String) -> Result<()> {
436- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
437438 let cid = blob.cid.to_string();
439 let did = self.did.clone();
···460461 /// Count all blobs for this actor
462 pub async fn blob_count(&self) -> Result<i64> {
463- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
464465 let did = self.did.clone();
466 self.db
···479480 /// Count blobs associated with records
481 pub async fn record_blob_count(&self) -> Result<i64> {
482- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
483484 let did = self.did.clone();
485 self.db
···501 &self,
502 opts: ListMissingBlobsOpts,
503 ) -> Result<Vec<ListMissingBlobsRefRecordBlob>> {
504- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
505- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
506507 let did = self.did.clone();
508 self.db
···563564 /// List all blobs with optional filtering
565 pub async fn list_blobs(&self, opts: ListBlobsOpts) -> Result<Vec<String>> {
566- use rsky_pds::schema::pds::record::dsl as RecordSchema;
567- use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
568569 let ListBlobsOpts {
570 since,
···617618 /// Get the takedown status of a blob
619 pub async fn get_blob_takedown_status(&self, cid: Cid) -> Result<Option<StatusAttr>> {
620- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
621622 self.db
623 .get()
···653654 /// Update the takedown status of a blob
655 pub async fn update_blob_takedown_status(&self, blob: Cid, takedown: StatusAttr) -> Result<()> {
656- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
657658 let takedown_ref: Option<String> = match takedown.applied {
659 true => takedown.r#ref.map_or_else(|| Some(now()), Some),
···6768 /// Get metadata for a blob by CID
69 pub async fn get_blob_metadata(&self, cid: Cid) -> Result<GetBlobMetadataOutput> {
70+ use crate::schema::pds::blob::dsl as BlobSchema;
7172 let did = self.did.clone();
73 let found = self
···112113 /// Get all records that reference a specific blob
114 pub async fn get_records_for_blob(&self, cid: Cid) -> Result<Vec<String>> {
115+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
116117 let did = self.did.clone();
118 let res = self
···169170 /// Track a blob that hasn't been associated with any records yet
171 pub async fn track_untethered_blob(&self, metadata: BlobMetadata) -> Result<BlobRef> {
172+ use crate::schema::pds::blob::dsl as BlobSchema;
173174 let did = self.did.clone();
175 self.db.get().await?.interact(move |conn| {
···254255 /// Delete blobs that are no longer referenced by any records
256 pub async fn delete_dereferenced_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
257+ use crate::schema::pds::blob::dsl as BlobSchema;
258+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
259260 // Extract URIs
261 let uris: Vec<String> = writes
···386387 /// Verify a blob and make it permanent
388 pub async fn verify_blob_and_make_permanent(&self, blob: PreparedBlobRef) -> Result<()> {
389+ use crate::schema::pds::blob::dsl as BlobSchema;
390391 let found = self
392 .db
···433434 /// Associate a blob with a record
435 pub async fn associate_blob(&self, blob: PreparedBlobRef, record_uri: String) -> Result<()> {
436+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
437438 let cid = blob.cid.to_string();
439 let did = self.did.clone();
···460461 /// Count all blobs for this actor
462 pub async fn blob_count(&self) -> Result<i64> {
463+ use crate::schema::pds::blob::dsl as BlobSchema;
464465 let did = self.did.clone();
466 self.db
···479480 /// Count blobs associated with records
481 pub async fn record_blob_count(&self) -> Result<i64> {
482+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
483484 let did = self.did.clone();
485 self.db
···501 &self,
502 opts: ListMissingBlobsOpts,
503 ) -> Result<Vec<ListMissingBlobsRefRecordBlob>> {
504+ use crate::schema::pds::blob::dsl as BlobSchema;
505+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
506507 let did = self.did.clone();
508 self.db
···563564 /// List all blobs with optional filtering
565 pub async fn list_blobs(&self, opts: ListBlobsOpts) -> Result<Vec<String>> {
566+ use crate::schema::pds::record::dsl as RecordSchema;
567+ use crate::schema::pds::record_blob::dsl as RecordBlobSchema;
568569 let ListBlobsOpts {
570 since,
···617618 /// Get the takedown status of a blob
619 pub async fn get_blob_takedown_status(&self, cid: Cid) -> Result<Option<StatusAttr>> {
620+ use crate::schema::pds::blob::dsl as BlobSchema;
621622 self.db
623 .get()
···653654 /// Update the takedown status of a blob
655 pub async fn update_blob_takedown_status(&self, blob: Cid, takedown: StatusAttr) -> Result<()> {
656+ use crate::schema::pds::blob::dsl as BlobSchema;
657658 let takedown_ref: Option<String> = match takedown.applied {
659 true => takedown.r#ref.map_or_else(|| Some(now()), Some),
+2-2
src/actor_store/mod.rs
···460461 pub async fn destroy(&mut self) -> Result<()> {
462 let did: String = self.did.clone();
463- use rsky_pds::schema::pds::blob::dsl as BlobSchema;
464465 let blob_rows: Vec<String> = self
466 .storage
···499 return Ok(vec![]);
500 }
501 let did: String = self.did.clone();
502- use rsky_pds::schema::pds::record::dsl as RecordSchema;
503504 let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
505 let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
···460461 pub async fn destroy(&mut self) -> Result<()> {
462 let did: String = self.did.clone();
463+ use crate::schema::pds::blob::dsl as BlobSchema;
464465 let blob_rows: Vec<String> = self
466 .storage
···499 return Ok(vec![]);
500 }
501 let did: String = self.did.clone();
502+ use crate::schema::pds::record::dsl as RecordSchema;
503504 let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
505 let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
+2-2
src/actor_store/preference.rs
···36 namespace: Option<String>,
37 scope: AuthScope,
38 ) -> Result<Vec<RefPreferences>> {
39- use rsky_pds::schema::pds::account_pref::dsl as AccountPrefSchema;
4041 let did = self.did.clone();
42 self.db
···99 bail!("Do not have authorization to set preferences.");
100 }
101 // get all current prefs for user and prep new pref rows
102- use rsky_pds::schema::pds::account_pref::dsl as AccountPrefSchema;
103 let all_prefs = AccountPrefSchema::account_pref
104 .filter(AccountPrefSchema::did.eq(&did))
105 .select(AccountPref::as_select())
···36 namespace: Option<String>,
37 scope: AuthScope,
38 ) -> Result<Vec<RefPreferences>> {
39+ use crate::schema::pds::account_pref::dsl as AccountPrefSchema;
4041 let did = self.did.clone();
42 self.db
···99 bail!("Do not have authorization to set preferences.");
100 }
101 // get all current prefs for user and prep new pref rows
102+ use crate::schema::pds::account_pref::dsl as AccountPrefSchema;
103 let all_prefs = AccountPrefSchema::account_pref
104 .filter(AccountPrefSchema::did.eq(&did))
105 .select(AccountPref::as_select())
+17-17
src/actor_store/record.rs
···4344 /// Count the total number of records.
45 pub(crate) async fn record_count(&mut self) -> Result<i64> {
46- use rsky_pds::schema::pds::record::dsl::*;
4748 let other_did = self.did.clone();
49 self.db
···5960 /// List all collections in the repository.
61 pub(crate) async fn list_collections(&self) -> Result<Vec<String>> {
62- use rsky_pds::schema::pds::record::dsl::*;
6364 let other_did = self.did.clone();
65 self.db
···90 rkey_end: Option<String>,
91 include_soft_deleted: Option<bool>,
92 ) -> Result<Vec<RecordsForCollection>> {
93- use rsky_pds::schema::pds::record::dsl as RecordSchema;
94- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
9596 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
97 let mut builder = RecordSchema::record
···149 cid: Option<String>,
150 include_soft_deleted: Option<bool>,
151 ) -> Result<Option<GetRecord>> {
152- use rsky_pds::schema::pds::record::dsl as RecordSchema;
153- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
154155 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
156 let mut builder = RecordSchema::record
···191 cid: Option<String>,
192 include_soft_deleted: Option<bool>,
193 ) -> Result<bool> {
194- use rsky_pds::schema::pds::record::dsl as RecordSchema;
195196 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
197 let mut builder = RecordSchema::record
···219 &self,
220 uri: String,
221 ) -> Result<Option<StatusAttr>> {
222- use rsky_pds::schema::pds::record::dsl as RecordSchema;
223224 let res = self
225 .db
···257258 /// Get the current CID for a record URI.
259 pub(crate) async fn get_current_record_cid(&self, uri: String) -> Result<Option<Cid>> {
260- use rsky_pds::schema::pds::record::dsl as RecordSchema;
261262 let res = self
263 .db
···286 path: String,
287 link_to: String,
288 ) -> Result<Vec<Record>> {
289- use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
290- use rsky_pds::schema::pds::record::dsl as RecordSchema;
291292 let res = self
293 .db
···385 bail!("Expected indexed URI to contain a record key")
386 }
387388- use rsky_pds::schema::pds::record::dsl as RecordSchema;
389390 // Track current version of record
391 let (record, uri) = self
···426 #[tracing::instrument(skip_all)]
427 pub(crate) async fn delete_record(&self, uri: &AtUri) -> Result<()> {
428 tracing::debug!("@LOG DEBUG RecordReader::delete_record, deleting indexed record {uri}");
429- use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
430- use rsky_pds::schema::pds::record::dsl as RecordSchema;
431 let uri = uri.to_string();
432 self.db
433 .get()
···450451 /// Remove backlinks for a URI.
452 pub(crate) async fn remove_backlinks_by_uri(&self, uri: &AtUri) -> Result<()> {
453- use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
454 let uri = uri.to_string();
455 self.db
456 .get()
···470 if backlinks.is_empty() {
471 Ok(())
472 } else {
473- use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
474 self.db
475 .get()
476 .await?
···491 uri: &AtUri,
492 takedown: StatusAttr,
493 ) -> Result<()> {
494- use rsky_pds::schema::pds::record::dsl as RecordSchema;
495496 let takedown_ref: Option<String> = match takedown.applied {
497 true => takedown
···4344 /// Count the total number of records.
45 pub(crate) async fn record_count(&mut self) -> Result<i64> {
46+ use crate::schema::pds::record::dsl::*;
4748 let other_did = self.did.clone();
49 self.db
···5960 /// List all collections in the repository.
61 pub(crate) async fn list_collections(&self) -> Result<Vec<String>> {
62+ use crate::schema::pds::record::dsl::*;
6364 let other_did = self.did.clone();
65 self.db
···90 rkey_end: Option<String>,
91 include_soft_deleted: Option<bool>,
92 ) -> Result<Vec<RecordsForCollection>> {
93+ use crate::schema::pds::record::dsl as RecordSchema;
94+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
9596 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
97 let mut builder = RecordSchema::record
···149 cid: Option<String>,
150 include_soft_deleted: Option<bool>,
151 ) -> Result<Option<GetRecord>> {
152+ use crate::schema::pds::record::dsl as RecordSchema;
153+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
154155 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
156 let mut builder = RecordSchema::record
···191 cid: Option<String>,
192 include_soft_deleted: Option<bool>,
193 ) -> Result<bool> {
194+ use crate::schema::pds::record::dsl as RecordSchema;
195196 let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
197 let mut builder = RecordSchema::record
···219 &self,
220 uri: String,
221 ) -> Result<Option<StatusAttr>> {
222+ use crate::schema::pds::record::dsl as RecordSchema;
223224 let res = self
225 .db
···257258 /// Get the current CID for a record URI.
259 pub(crate) async fn get_current_record_cid(&self, uri: String) -> Result<Option<Cid>> {
260+ use crate::schema::pds::record::dsl as RecordSchema;
261262 let res = self
263 .db
···286 path: String,
287 link_to: String,
288 ) -> Result<Vec<Record>> {
289+ use crate::schema::pds::backlink::dsl as BacklinkSchema;
290+ use crate::schema::pds::record::dsl as RecordSchema;
291292 let res = self
293 .db
···385 bail!("Expected indexed URI to contain a record key")
386 }
387388+ use crate::schema::pds::record::dsl as RecordSchema;
389390 // Track current version of record
391 let (record, uri) = self
···426 #[tracing::instrument(skip_all)]
427 pub(crate) async fn delete_record(&self, uri: &AtUri) -> Result<()> {
428 tracing::debug!("@LOG DEBUG RecordReader::delete_record, deleting indexed record {uri}");
429+ use crate::schema::pds::backlink::dsl as BacklinkSchema;
430+ use crate::schema::pds::record::dsl as RecordSchema;
431 let uri = uri.to_string();
432 self.db
433 .get()
···450451 /// Remove backlinks for a URI.
452 pub(crate) async fn remove_backlinks_by_uri(&self, uri: &AtUri) -> Result<()> {
453+ use crate::schema::pds::backlink::dsl as BacklinkSchema;
454 let uri = uri.to_string();
455 self.db
456 .get()
···470 if backlinks.is_empty() {
471 Ok(())
472 } else {
473+ use crate::schema::pds::backlink::dsl as BacklinkSchema;
474 self.db
475 .get()
476 .await?
···491 uri: &AtUri,
492 takedown: StatusAttr,
493 ) -> Result<()> {
494+ use crate::schema::pds::record::dsl as RecordSchema;
495496 let takedown_ref: Option<String> = match takedown.applied {
497 true => takedown
+10-10
src/actor_store/sql_repo.rs
···53 let cid = *cid;
5455 Box::pin(async move {
56- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
57 let cached = {
58 let cache_guard = self.cache.read().await;
59 cache_guard.get(cid).cloned()
···104 let did: String = self.did.clone();
105106 Box::pin(async move {
107- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
108 let cached = {
109 let mut cache_guard = self.cache.write().await;
110 cache_guard.get_many(cids)?
···202 let did: String = self.did.clone();
203 let bytes_cloned = bytes.clone();
204 Box::pin(async move {
205- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
206207 _ = self
208 .db
···235 let did: String = self.did.clone();
236237 Box::pin(async move {
238- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
239240 let blocks: Vec<RepoBlock> = to_put
241 .map
···277 let now: String = self.now.clone();
278279 Box::pin(async move {
280- use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
281282 let is_create = is_create.unwrap_or(false);
283 if is_create {
···381 let did: String = self.did.clone();
382 let since = since.clone();
383 let cursor = cursor.clone();
384- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
385386 Ok(self
387 .db
···418419 pub async fn count_blocks(&self) -> Result<i64> {
420 let did: String = self.did.clone();
421- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
422423 let res = self
424 .db
···439 /// Proactively cache all blocks from a particular commit (to prevent multiple roundtrips)
440 pub async fn cache_rev(&mut self, rev: String) -> Result<()> {
441 let did: String = self.did.clone();
442- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
443444 let result: Vec<(String, Vec<u8>)> = self
445 .db
···465 return Ok(());
466 }
467 let did: String = self.did.clone();
468- use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
469470 let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
471 _ = self
···483484 pub async fn get_root_detailed(&self) -> Result<CidAndRev> {
485 let did: String = self.did.clone();
486- use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
487488 let res = self
489 .db
···53 let cid = *cid;
5455 Box::pin(async move {
56+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
57 let cached = {
58 let cache_guard = self.cache.read().await;
59 cache_guard.get(cid).cloned()
···104 let did: String = self.did.clone();
105106 Box::pin(async move {
107+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
108 let cached = {
109 let mut cache_guard = self.cache.write().await;
110 cache_guard.get_many(cids)?
···202 let did: String = self.did.clone();
203 let bytes_cloned = bytes.clone();
204 Box::pin(async move {
205+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
206207 _ = self
208 .db
···235 let did: String = self.did.clone();
236237 Box::pin(async move {
238+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
239240 let blocks: Vec<RepoBlock> = to_put
241 .map
···277 let now: String = self.now.clone();
278279 Box::pin(async move {
280+ use crate::schema::pds::repo_root::dsl as RepoRootSchema;
281282 let is_create = is_create.unwrap_or(false);
283 if is_create {
···381 let did: String = self.did.clone();
382 let since = since.clone();
383 let cursor = cursor.clone();
384+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
385386 Ok(self
387 .db
···418419 pub async fn count_blocks(&self) -> Result<i64> {
420 let did: String = self.did.clone();
421+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
422423 let res = self
424 .db
···439 /// Proactively cache all blocks from a particular commit (to prevent multiple roundtrips)
440 pub async fn cache_rev(&mut self, rev: String) -> Result<()> {
441 let did: String = self.did.clone();
442+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
443444 let result: Vec<(String, Vec<u8>)> = self
445 .db
···465 return Ok(());
466 }
467 let did: String = self.did.clone();
468+ use crate::schema::pds::repo_block::dsl as RepoBlockSchema;
469470 let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
471 _ = self
···483484 pub async fn get_root_detailed(&self) -> Result<CidAndRev> {
485 let did: String = self.did.clone();
486+ use crate::schema::pds::repo_root::dsl as RepoRootSchema;
487488 let res = self
489 .db
+2-2
src/auth.rs
···130131 // Extract subject (DID)
132 if let Some(did) = claims.get("sub").and_then(serde_json::Value::as_str) {
133- use rsky_pds::schema::pds::account::dsl as AccountSchema;
134 let did_clone = did.to_owned();
135136 let _did = state
···395396 // Extract subject (DID) from access token
397 if let Some(did) = claims.get("sub").and_then(|v| v.as_str()) {
398- use rsky_pds::schema::pds::account::dsl as AccountSchema;
399400 let did_clone = did.to_owned();
401
···130131 // Extract subject (DID)
132 if let Some(did) = claims.get("sub").and_then(serde_json::Value::as_str) {
133+ use crate::schema::pds::account::dsl as AccountSchema;
134 let did_clone = did.to_owned();
135136 let _did = state
···395396 // Extract subject (DID) from access token
397 if let Some(did) = claims.get("sub").and_then(|v| v.as_str()) {
398+ use crate::schema::pds::account::dsl as AccountSchema;
399400 let did_clone = did.to_owned();
401
···1+//! PDS implementation.
2+mod account_manager;
3+mod actor_endpoints;
4+mod actor_store;
5+mod auth;
6+mod config;
7+mod db;
8+mod did;
9+mod endpoints;
10+pub mod error;
11+mod firehose;
12+mod metrics;
13+mod mmap;
14+mod oauth;
15+mod plc;
16+mod schema;
17+mod service_proxy;
18+#[cfg(test)]
19+mod tests;
20+21+use anyhow::{Context as _, anyhow};
22+use atrium_api::types::string::Did;
23+use atrium_crypto::keypair::{Export as _, Secp256k1Keypair};
24+use auth::AuthenticatedUser;
25+use axum::{
26+ Router,
27+ body::Body,
28+ extract::{FromRef, Request, State},
29+ http::{self, HeaderMap, Response, StatusCode, Uri},
30+ response::IntoResponse,
31+ routing::get,
32+};
33+use azure_core::credentials::TokenCredential;
34+use clap::Parser;
35+use clap_verbosity_flag::{InfoLevel, Verbosity, log::LevelFilter};
36+use config::AppConfig;
37+use db::establish_pool;
38+use deadpool_diesel::sqlite::Pool;
39+use diesel::prelude::*;
40+use diesel_migrations::{EmbeddedMigrations, embed_migrations};
41+pub use error::Error;
42+use figment::{Figment, providers::Format as _};
43+use firehose::FirehoseProducer;
44+use http_cache_reqwest::{CacheMode, HttpCacheOptions, MokaManager};
45+use rand::Rng as _;
46+use serde::{Deserialize, Serialize};
47+use service_proxy::service_proxy;
48+use std::{
49+ net::{IpAddr, Ipv4Addr, SocketAddr},
50+ path::PathBuf,
51+ str::FromStr as _,
52+ sync::Arc,
53+};
54+use tokio::net::TcpListener;
55+use tower_http::{cors::CorsLayer, trace::TraceLayer};
56+use tracing::{info, warn};
57+use uuid::Uuid;
58+59+/// The application user agent. Concatenates the package name and version. e.g. `bluepds/0.0.0`.
60+pub const APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
61+62+/// Embedded migrations
63+pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
64+65+/// The application-wide result type.
66+pub type Result<T> = std::result::Result<T, Error>;
67+/// The reqwest client type with middleware.
68+pub type Client = reqwest_middleware::ClientWithMiddleware;
69+/// The Azure credential type.
70+pub type Cred = Arc<dyn TokenCredential>;
71+72+#[expect(
73+ clippy::arbitrary_source_item_ordering,
74+ reason = "serialized data might be structured"
75+)]
76+#[derive(Serialize, Deserialize, Debug, Clone)]
77+/// The key data structure.
78+struct KeyData {
79+ /// Primary signing key for all repo operations.
80+ skey: Vec<u8>,
81+ /// Primary signing (rotation) key for all PLC operations.
82+ rkey: Vec<u8>,
83+}
84+85+// FIXME: We should use P256Keypair instead. SecP256K1 is primarily used for cryptocurrencies,
86+// and the implementations of this algorithm are much more limited as compared to P256.
87+//
88+// Reference: https://soatok.blog/2022/05/19/guidance-for-choosing-an-elliptic-curve-signature-algorithm-in-2022/
89+#[derive(Clone)]
90+/// The signing key for PLC/DID operations.
91+pub struct SigningKey(Arc<Secp256k1Keypair>);
92+#[derive(Clone)]
93+/// The rotation key for PLC operations.
94+pub struct RotationKey(Arc<Secp256k1Keypair>);
95+96+impl std::ops::Deref for SigningKey {
97+ type Target = Secp256k1Keypair;
98+99+ fn deref(&self) -> &Self::Target {
100+ &self.0
101+ }
102+}
103+104+impl SigningKey {
105+ /// Import from a private key.
106+ pub fn import(key: &[u8]) -> Result<Self> {
107+ let key = Secp256k1Keypair::import(key).context("failed to import signing key")?;
108+ Ok(Self(Arc::new(key)))
109+ }
110+}
111+112+impl std::ops::Deref for RotationKey {
113+ type Target = Secp256k1Keypair;
114+115+ fn deref(&self) -> &Self::Target {
116+ &self.0
117+ }
118+}
119+120+#[derive(Parser, Debug, Clone)]
121+/// Command line arguments.
122+pub struct Args {
123+ /// Path to the configuration file
124+ #[arg(short, long, default_value = "default.toml")]
125+ pub config: PathBuf,
126+ /// The verbosity level.
127+ #[command(flatten)]
128+ pub verbosity: Verbosity<InfoLevel>,
129+}
130+131+pub struct ActorPools {
132+ pub repo: Pool,
133+ pub blob: Pool,
134+}
135+136+impl Clone for ActorPools {
137+ fn clone(&self) -> Self {
138+ Self {
139+ repo: self.repo.clone(),
140+ blob: self.blob.clone(),
141+ }
142+ }
143+}
144+145+#[expect(clippy::arbitrary_source_item_ordering, reason = "arbitrary")]
146+#[derive(Clone, FromRef)]
147+pub struct AppState {
148+ /// The application configuration.
149+ pub config: AppConfig,
150+ /// The Azure credential.
151+ pub cred: Cred,
152+ /// The main database connection pool. Used for common PDS data, like invite codes.
153+ pub db: Pool,
154+ /// Actor-specific database connection pools. Hashed by DID.
155+ pub db_actors: std::collections::HashMap<String, ActorPools>,
156+157+ /// The HTTP client with middleware.
158+ pub client: Client,
159+ /// The simple HTTP client.
160+ pub simple_client: reqwest::Client,
161+ /// The firehose producer.
162+ pub firehose: FirehoseProducer,
163+164+ /// The signing key.
165+ pub signing_key: SigningKey,
166+ /// The rotation key.
167+ pub rotation_key: RotationKey,
168+}
169+170+/// The index (/) route.
171+async fn index() -> impl IntoResponse {
172+ r"
173+ __ __
174+ /\ \__ /\ \__
175+ __ \ \ ,_\ _____ _ __ ___\ \ ,_\ ___
176+ /'__'\ \ \ \/ /\ '__'\/\''__\/ __'\ \ \/ / __'\
177+ /\ \L\.\_\ \ \_\ \ \L\ \ \ \//\ \L\ \ \ \_/\ \L\ \
178+ \ \__/.\_\\ \__\\ \ ,__/\ \_\\ \____/\ \__\ \____/
179+ \/__/\/_/ \/__/ \ \ \/ \/_/ \/___/ \/__/\/___/
180+ \ \_\
181+ \/_/
182+183+184+This is an AT Protocol Personal Data Server (aka, an atproto PDS)
185+186+Most API routes are under /xrpc/
187+188+ Code: https://github.com/DrChat/bluepds
189+ Protocol: https://atproto.com
190+ "
191+}
192+193+/// The main application entry point.
194+#[expect(
195+ clippy::cognitive_complexity,
196+ clippy::too_many_lines,
197+ unused_qualifications,
198+ reason = "main function has high complexity"
199+)]
200+pub async fn run() -> anyhow::Result<()> {
201+ let args = Args::parse();
202+203+ // Set up trace logging to console and account for the user-provided verbosity flag.
204+ if args.verbosity.log_level_filter() != LevelFilter::Off {
205+ let lvl = match args.verbosity.log_level_filter() {
206+ LevelFilter::Error => tracing::Level::ERROR,
207+ LevelFilter::Warn => tracing::Level::WARN,
208+ LevelFilter::Info | LevelFilter::Off => tracing::Level::INFO,
209+ LevelFilter::Debug => tracing::Level::DEBUG,
210+ LevelFilter::Trace => tracing::Level::TRACE,
211+ };
212+ tracing_subscriber::fmt().with_max_level(lvl).init();
213+ }
214+215+ if !args.config.exists() {
216+ // Throw up a warning if the config file does not exist.
217+ //
218+ // This is not fatal because users can specify all configuration settings via
219+ // the environment, but the most likely scenario here is that a user accidentally
220+ // omitted the config file for some reason (e.g. forgot to mount it into Docker).
221+ warn!(
222+ "configuration file {} does not exist",
223+ args.config.display()
224+ );
225+ }
226+227+ // Read and parse the user-provided configuration.
228+ let config: AppConfig = Figment::new()
229+ .admerge(figment::providers::Toml::file(args.config))
230+ .admerge(figment::providers::Env::prefixed("BLUEPDS_"))
231+ .extract()
232+ .context("failed to load configuration")?;
233+234+ if config.test {
235+ warn!("BluePDS starting up in TEST mode.");
236+ warn!("This means the application will not federate with the rest of the network.");
237+ warn!(
238+ "If you want to turn this off, either set `test` to false in the config or define `BLUEPDS_TEST = false`"
239+ );
240+ }
241+242+ // Initialize metrics reporting.
243+ metrics::setup(config.metrics.as_ref()).context("failed to set up metrics exporter")?;
244+245+ // Create a reqwest client that will be used for all outbound requests.
246+ let simple_client = reqwest::Client::builder()
247+ .user_agent(APP_USER_AGENT)
248+ .build()
249+ .context("failed to build requester client")?;
250+ let client = reqwest_middleware::ClientBuilder::new(simple_client.clone())
251+ .with(http_cache_reqwest::Cache(http_cache_reqwest::HttpCache {
252+ mode: CacheMode::Default,
253+ manager: MokaManager::default(),
254+ options: HttpCacheOptions::default(),
255+ }))
256+ .build();
257+258+ tokio::fs::create_dir_all(&config.key.parent().context("should have parent")?)
259+ .await
260+ .context("failed to create key directory")?;
261+262+ // Check if crypto keys exist. If not, create new ones.
263+ let (skey, rkey) = if let Ok(f) = std::fs::File::open(&config.key) {
264+ let keys: KeyData = serde_ipld_dagcbor::from_reader(std::io::BufReader::new(f))
265+ .context("failed to deserialize crypto keys")?;
266+267+ let skey = Secp256k1Keypair::import(&keys.skey).context("failed to import signing key")?;
268+ let rkey = Secp256k1Keypair::import(&keys.rkey).context("failed to import rotation key")?;
269+270+ (SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
271+ } else {
272+ info!("signing keys not found, generating new ones");
273+274+ let skey = Secp256k1Keypair::create(&mut rand::thread_rng());
275+ let rkey = Secp256k1Keypair::create(&mut rand::thread_rng());
276+277+ let keys = KeyData {
278+ skey: skey.export(),
279+ rkey: rkey.export(),
280+ };
281+282+ let mut f = std::fs::File::create(&config.key).context("failed to create key file")?;
283+ serde_ipld_dagcbor::to_writer(&mut f, &keys).context("failed to serialize crypto keys")?;
284+285+ (SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
286+ };
287+288+ tokio::fs::create_dir_all(&config.repo.path).await?;
289+ tokio::fs::create_dir_all(&config.plc.path).await?;
290+ tokio::fs::create_dir_all(&config.blob.path).await?;
291+292+ let cred = azure_identity::DefaultAzureCredential::new()
293+ .context("failed to create Azure credential")?;
294+295+ // Create a database connection manager and pool for the main database.
296+ let pool =
297+ establish_pool(&config.db).context("failed to establish database connection pool")?;
298+ // Create a dictionary of database connection pools for each actor.
299+ let mut actor_pools = std::collections::HashMap::new();
300+ // let mut actor_blob_pools = std::collections::HashMap::new();
301+ // We'll determine actors by looking in the data/repo dir for .db files.
302+ let mut actor_dbs = tokio::fs::read_dir(&config.repo.path)
303+ .await
304+ .context("failed to read repo directory")?;
305+ while let Some(entry) = actor_dbs
306+ .next_entry()
307+ .await
308+ .context("failed to read repo dir")?
309+ {
310+ let path = entry.path();
311+ if path.extension().and_then(|s| s.to_str()) == Some("db") {
312+ let did_path = path
313+ .file_stem()
314+ .and_then(|s| s.to_str())
315+ .context("failed to get actor DID")?;
316+ let did = Did::from_str(&format!("did:plc:{}", did_path))
317+ .expect("should be able to parse actor DID");
318+319+ // Create a new database connection manager and pool for the actor.
320+ // The path for the SQLite connection needs to look like "sqlite://data/repo/<actor>.db"
321+ let path_repo = format!("sqlite://{}", did_path);
322+ let actor_repo_pool =
323+ establish_pool(&path_repo).context("failed to create database connection pool")?;
324+ // Create a new database connection manager and pool for the actor blobs.
325+ // The path for the SQLite connection needs to look like "sqlite://data/blob/<actor>.db"
326+ let path_blob = path_repo.replace("repo", "blob");
327+ let actor_blob_pool =
328+ establish_pool(&path_blob).context("failed to create database connection pool")?;
329+ drop(actor_pools.insert(
330+ did.to_string(),
331+ ActorPools {
332+ repo: actor_repo_pool,
333+ blob: actor_blob_pool,
334+ },
335+ ));
336+ }
337+ }
338+ // Apply pending migrations
339+ // let conn = pool.get().await?;
340+ // conn.run_pending_migrations(MIGRATIONS)
341+ // .expect("should be able to run migrations");
342+343+ let (_fh, fhp) = firehose::spawn(client.clone(), config.clone());
344+345+ let addr = config
346+ .listen_address
347+ .unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8000));
348+349+ let app = Router::new()
350+ .route("/", get(index))
351+ .merge(oauth::routes())
352+ .nest(
353+ "/xrpc",
354+ endpoints::routes()
355+ .merge(actor_endpoints::routes())
356+ .fallback(service_proxy),
357+ )
358+ // .layer(RateLimitLayer::new(30, Duration::from_secs(30)))
359+ .layer(CorsLayer::permissive())
360+ .layer(TraceLayer::new_for_http())
361+ .with_state(AppState {
362+ cred,
363+ config: config.clone(),
364+ db: pool.clone(),
365+ db_actors: actor_pools.clone(),
366+ client: client.clone(),
367+ simple_client,
368+ firehose: fhp,
369+ signing_key: skey,
370+ rotation_key: rkey,
371+ });
372+373+ info!("listening on {addr}");
374+ info!("connect to: http://127.0.0.1:{}", addr.port());
375+376+ // Determine whether or not this was the first startup (i.e. no accounts exist and no invite codes were created).
377+ // If so, create an invite code and share it via the console.
378+ let conn = pool.get().await.context("failed to get db connection")?;
379+380+ #[derive(QueryableByName)]
381+ struct TotalCount {
382+ #[diesel(sql_type = diesel::sql_types::Integer)]
383+ total_count: i32,
384+ }
385+386+ let result = conn.interact(move |conn| {
387+ diesel::sql_query(
388+ "SELECT (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites) AS total_count",
389+ )
390+ .get_result::<TotalCount>(conn)
391+ })
392+ .await
393+ .expect("should be able to query database")?;
394+395+ let c = result.total_count;
396+397+ #[expect(clippy::print_stdout)]
398+ if c == 0 {
399+ let uuid = Uuid::new_v4().to_string();
400+401+ let uuid_clone = uuid.clone();
402+ _ = conn
403+ .interact(move |conn| {
404+ diesel::sql_query(
405+ "INSERT INTO invites (id, did, count, created_at) VALUES (?, NULL, 1, datetime('now'))",
406+ )
407+ .bind::<diesel::sql_types::Text, _>(uuid_clone)
408+ .execute(conn)
409+ .context("failed to create new invite code")
410+ .expect("should be able to create invite code")
411+ })
412+ .await
413+ .expect("should be able to create invite code");
414+415+ // N.B: This is a sensitive message, so we're bypassing `tracing` here and
416+ // logging it directly to console.
417+ println!("=====================================");
418+ println!(" FIRST STARTUP ");
419+ println!("=====================================");
420+ println!("Use this code to create an account:");
421+ println!("{uuid}");
422+ println!("=====================================");
423+ }
424+425+ let listener = TcpListener::bind(&addr)
426+ .await
427+ .context("failed to bind address")?;
428+429+ // Serve the app, and request crawling from upstream relays.
430+ let serve = tokio::spawn(async move {
431+ axum::serve(listener, app.into_make_service())
432+ .await
433+ .context("failed to serve app")
434+ });
435+436+ // Now that the app is live, request a crawl from upstream relays.
437+ firehose::reconnect_relays(&client, &config).await;
438+439+ serve
440+ .await
441+ .map_err(Into::into)
442+ .and_then(|r| r)
443+ .context("failed to serve app")
444+}
445+446+/// Creates an app router with the provided AppState.
447+pub fn create_app(state: AppState) -> Router {
448+ Router::new()
449+ .route("/", get(index))
450+ .merge(oauth::routes())
451+ .nest(
452+ "/xrpc",
453+ endpoints::routes()
454+ .merge(actor_endpoints::routes())
455+ .fallback(service_proxy),
456+ )
457+ .layer(CorsLayer::permissive())
458+ .layer(TraceLayer::new_for_http())
459+ .with_state(state)
460+}
+5-444
src/main.rs
···1-//! PDS implementation.
2-mod account_manager;
3-mod actor_endpoints;
4-mod actor_store;
5-mod auth;
6-mod config;
7-mod db;
8-mod did;
9-mod endpoints;
10-mod error;
11-mod firehose;
12-mod metrics;
13-mod mmap;
14-mod oauth;
15-mod plc;
16-mod schema;
17-mod service_proxy;
18-#[cfg(test)]
19-mod tests;
2021-use anyhow::{Context as _, anyhow};
22-use atrium_api::types::string::Did;
23-use atrium_crypto::keypair::{Export as _, Secp256k1Keypair};
24-use auth::AuthenticatedUser;
25-use axum::{
26- Router,
27- body::Body,
28- extract::{FromRef, Request, State},
29- http::{self, HeaderMap, Response, StatusCode, Uri},
30- response::IntoResponse,
31- routing::get,
32-};
33-use azure_core::credentials::TokenCredential;
34use clap::Parser;
35-use clap_verbosity_flag::{InfoLevel, Verbosity, log::LevelFilter};
36-use config::AppConfig;
37-use db::establish_pool;
38-use deadpool_diesel::sqlite::Pool;
39-use diesel::prelude::*;
40-use diesel_migrations::{EmbeddedMigrations, embed_migrations};
41-#[expect(clippy::pub_use, clippy::useless_attribute)]
42-pub use error::Error;
43-use figment::{Figment, providers::Format as _};
44-use firehose::FirehoseProducer;
45-use http_cache_reqwest::{CacheMode, HttpCacheOptions, MokaManager};
46-use rand::Rng as _;
47-use serde::{Deserialize, Serialize};
48-use service_proxy::service_proxy;
49-use std::{
50- net::{IpAddr, Ipv4Addr, SocketAddr},
51- path::PathBuf,
52- str::FromStr as _,
53- sync::Arc,
54-};
55-use tokio::net::TcpListener;
56-use tower_http::{cors::CorsLayer, trace::TraceLayer};
57-use tracing::{info, warn};
58-use uuid::Uuid;
59-60-/// The application user agent. Concatenates the package name and version. e.g. `bluepds/0.0.0`.
61-pub const APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
62-63-/// Embedded migrations
64-pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
65-66-/// The application-wide result type.
67-pub type Result<T> = std::result::Result<T, Error>;
68-/// The reqwest client type with middleware.
69-pub type Client = reqwest_middleware::ClientWithMiddleware;
70-/// The Azure credential type.
71-pub type Cred = Arc<dyn TokenCredential>;
72-73-#[expect(
74- clippy::arbitrary_source_item_ordering,
75- reason = "serialized data might be structured"
76-)]
77-#[derive(Serialize, Deserialize, Debug, Clone)]
78-/// The key data structure.
79-struct KeyData {
80- /// Primary signing key for all repo operations.
81- skey: Vec<u8>,
82- /// Primary signing (rotation) key for all PLC operations.
83- rkey: Vec<u8>,
84-}
85-86-// FIXME: We should use P256Keypair instead. SecP256K1 is primarily used for cryptocurrencies,
87-// and the implementations of this algorithm are much more limited as compared to P256.
88-//
89-// Reference: https://soatok.blog/2022/05/19/guidance-for-choosing-an-elliptic-curve-signature-algorithm-in-2022/
90-#[derive(Clone)]
91-/// The signing key for PLC/DID operations.
92-pub struct SigningKey(Arc<Secp256k1Keypair>);
93-#[derive(Clone)]
94-/// The rotation key for PLC operations.
95-pub struct RotationKey(Arc<Secp256k1Keypair>);
96-97-impl std::ops::Deref for SigningKey {
98- type Target = Secp256k1Keypair;
99-100- fn deref(&self) -> &Self::Target {
101- &self.0
102- }
103-}
104-105-impl SigningKey {
106- /// Import from a private key.
107- pub fn import(key: &[u8]) -> Result<Self> {
108- let key = Secp256k1Keypair::import(key).context("failed to import signing key")?;
109- Ok(Self(Arc::new(key)))
110- }
111-}
112-113-impl std::ops::Deref for RotationKey {
114- type Target = Secp256k1Keypair;
115-116- fn deref(&self) -> &Self::Target {
117- &self.0
118- }
119-}
120-121-#[derive(Parser, Debug, Clone)]
122-/// Command line arguments.
123-struct Args {
124- /// Path to the configuration file
125- #[arg(short, long, default_value = "default.toml")]
126- config: PathBuf,
127- /// The verbosity level.
128- #[command(flatten)]
129- verbosity: Verbosity<InfoLevel>,
130-}
131-132-struct ActorPools {
133- repo: Pool,
134- blob: Pool,
135-}
136-impl Clone for ActorPools {
137- fn clone(&self) -> Self {
138- Self {
139- repo: self.repo.clone(),
140- blob: self.blob.clone(),
141- }
142- }
143-}
144-145-#[expect(clippy::arbitrary_source_item_ordering, reason = "arbitrary")]
146-#[derive(Clone, FromRef)]
147-struct AppState {
148- /// The application configuration.
149- config: AppConfig,
150- /// The Azure credential.
151- cred: Cred,
152- /// The main database connection pool. Used for common PDS data, like invite codes.
153- db: Pool,
154- /// Actor-specific database connection pools. Hashed by DID.
155- db_actors: std::collections::HashMap<String, ActorPools>,
156-157- /// The HTTP client with middleware.
158- client: Client,
159- /// The simple HTTP client.
160- simple_client: reqwest::Client,
161- /// The firehose producer.
162- firehose: FirehoseProducer,
163-164- /// The signing key.
165- signing_key: SigningKey,
166- /// The rotation key.
167- rotation_key: RotationKey,
168-}
169-170-/// The index (/) route.
171-async fn index() -> impl IntoResponse {
172- r"
173- __ __
174- /\ \__ /\ \__
175- __ \ \ ,_\ _____ _ __ ___\ \ ,_\ ___
176- /'__'\ \ \ \/ /\ '__'\/\''__\/ __'\ \ \/ / __'\
177- /\ \L\.\_\ \ \_\ \ \L\ \ \ \//\ \L\ \ \ \_/\ \L\ \
178- \ \__/.\_\\ \__\\ \ ,__/\ \_\\ \____/\ \__\ \____/
179- \/__/\/_/ \/__/ \ \ \/ \/_/ \/___/ \/__/\/___/
180- \ \_\
181- \/_/
182-183-184-This is an AT Protocol Personal Data Server (aka, an atproto PDS)
185-186-Most API routes are under /xrpc/
187-188- Code: https://github.com/DrChat/bluepds
189- Protocol: https://atproto.com
190- "
191-}
192-193-/// The main application entry point.
194-#[expect(
195- clippy::cognitive_complexity,
196- clippy::too_many_lines,
197- unused_qualifications,
198- reason = "main function has high complexity"
199-)]
200-async fn run() -> anyhow::Result<()> {
201- let args = Args::parse();
202-203- // Set up trace logging to console and account for the user-provided verbosity flag.
204- if args.verbosity.log_level_filter() != LevelFilter::Off {
205- let lvl = match args.verbosity.log_level_filter() {
206- LevelFilter::Error => tracing::Level::ERROR,
207- LevelFilter::Warn => tracing::Level::WARN,
208- LevelFilter::Info | LevelFilter::Off => tracing::Level::INFO,
209- LevelFilter::Debug => tracing::Level::DEBUG,
210- LevelFilter::Trace => tracing::Level::TRACE,
211- };
212- tracing_subscriber::fmt().with_max_level(lvl).init();
213- }
214-215- if !args.config.exists() {
216- // Throw up a warning if the config file does not exist.
217- //
218- // This is not fatal because users can specify all configuration settings via
219- // the environment, but the most likely scenario here is that a user accidentally
220- // omitted the config file for some reason (e.g. forgot to mount it into Docker).
221- warn!(
222- "configuration file {} does not exist",
223- args.config.display()
224- );
225- }
226-227- // Read and parse the user-provided configuration.
228- let config: AppConfig = Figment::new()
229- .admerge(figment::providers::Toml::file(args.config))
230- .admerge(figment::providers::Env::prefixed("BLUEPDS_"))
231- .extract()
232- .context("failed to load configuration")?;
233-234- if config.test {
235- warn!("BluePDS starting up in TEST mode.");
236- warn!("This means the application will not federate with the rest of the network.");
237- warn!(
238- "If you want to turn this off, either set `test` to false in the config or define `BLUEPDS_TEST = false`"
239- );
240- }
241-242- // Initialize metrics reporting.
243- metrics::setup(config.metrics.as_ref()).context("failed to set up metrics exporter")?;
244-245- // Create a reqwest client that will be used for all outbound requests.
246- let simple_client = reqwest::Client::builder()
247- .user_agent(APP_USER_AGENT)
248- .build()
249- .context("failed to build requester client")?;
250- let client = reqwest_middleware::ClientBuilder::new(simple_client.clone())
251- .with(http_cache_reqwest::Cache(http_cache_reqwest::HttpCache {
252- mode: CacheMode::Default,
253- manager: MokaManager::default(),
254- options: HttpCacheOptions::default(),
255- }))
256- .build();
257-258- tokio::fs::create_dir_all(&config.key.parent().context("should have parent")?)
259- .await
260- .context("failed to create key directory")?;
261-262- // Check if crypto keys exist. If not, create new ones.
263- let (skey, rkey) = if let Ok(f) = std::fs::File::open(&config.key) {
264- let keys: KeyData = serde_ipld_dagcbor::from_reader(std::io::BufReader::new(f))
265- .context("failed to deserialize crypto keys")?;
266-267- let skey = Secp256k1Keypair::import(&keys.skey).context("failed to import signing key")?;
268- let rkey = Secp256k1Keypair::import(&keys.rkey).context("failed to import rotation key")?;
269-270- (SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
271- } else {
272- info!("signing keys not found, generating new ones");
273-274- let skey = Secp256k1Keypair::create(&mut rand::thread_rng());
275- let rkey = Secp256k1Keypair::create(&mut rand::thread_rng());
276-277- let keys = KeyData {
278- skey: skey.export(),
279- rkey: rkey.export(),
280- };
281-282- let mut f = std::fs::File::create(&config.key).context("failed to create key file")?;
283- serde_ipld_dagcbor::to_writer(&mut f, &keys).context("failed to serialize crypto keys")?;
284-285- (SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
286- };
287-288- tokio::fs::create_dir_all(&config.repo.path).await?;
289- tokio::fs::create_dir_all(&config.plc.path).await?;
290- tokio::fs::create_dir_all(&config.blob.path).await?;
291-292- let cred = azure_identity::DefaultAzureCredential::new()
293- .context("failed to create Azure credential")?;
294-295- // Create a database connection manager and pool for the main database.
296- let pool =
297- establish_pool(&config.db).context("failed to establish database connection pool")?;
298- // Create a dictionary of database connection pools for each actor.
299- let mut actor_pools = std::collections::HashMap::new();
300- // let mut actor_blob_pools = std::collections::HashMap::new();
301- // We'll determine actors by looking in the data/repo dir for .db files.
302- let mut actor_dbs = tokio::fs::read_dir(&config.repo.path)
303- .await
304- .context("failed to read repo directory")?;
305- while let Some(entry) = actor_dbs
306- .next_entry()
307- .await
308- .context("failed to read repo dir")?
309- {
310- let path = entry.path();
311- if path.extension().and_then(|s| s.to_str()) == Some("db") {
312- let did = path
313- .file_stem()
314- .and_then(|s| s.to_str())
315- .context("failed to get actor DID")?;
316- let did = Did::from_str(did).expect("should be able to parse actor DID");
317-318- // Create a new database connection manager and pool for the actor.
319- // The path for the SQLite connection needs to look like "sqlite://data/repo/<actor>.db"
320- let path_repo = format!("sqlite://{}", path.display());
321- let actor_repo_pool =
322- establish_pool(&path_repo).context("failed to create database connection pool")?;
323- // Create a new database connection manager and pool for the actor blobs.
324- // The path for the SQLite connection needs to look like "sqlite://data/blob/<actor>.db"
325- let path_blob = path_repo.replace("repo", "blob");
326- let actor_blob_pool =
327- establish_pool(&path_blob).context("failed to create database connection pool")?;
328- drop(actor_pools.insert(
329- did.to_string(),
330- ActorPools {
331- repo: actor_repo_pool,
332- blob: actor_blob_pool,
333- },
334- ));
335- }
336- }
337- // Apply pending migrations
338- // let conn = pool.get().await?;
339- // conn.run_pending_migrations(MIGRATIONS)
340- // .expect("should be able to run migrations");
341-342- let (_fh, fhp) = firehose::spawn(client.clone(), config.clone());
343-344- let addr = config
345- .listen_address
346- .unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8000));
347-348- let app = Router::new()
349- .route("/", get(index))
350- .merge(oauth::routes())
351- .nest(
352- "/xrpc",
353- endpoints::routes()
354- .merge(actor_endpoints::routes())
355- .fallback(service_proxy),
356- )
357- // .layer(RateLimitLayer::new(30, Duration::from_secs(30)))
358- .layer(CorsLayer::permissive())
359- .layer(TraceLayer::new_for_http())
360- .with_state(AppState {
361- cred,
362- config: config.clone(),
363- db: pool.clone(),
364- db_actors: actor_pools.clone(),
365- client: client.clone(),
366- simple_client,
367- firehose: fhp,
368- signing_key: skey,
369- rotation_key: rkey,
370- });
371-372- info!("listening on {addr}");
373- info!("connect to: http://127.0.0.1:{}", addr.port());
374-375- // Determine whether or not this was the first startup (i.e. no accounts exist and no invite codes were created).
376- // If so, create an invite code and share it via the console.
377- let conn = pool.get().await.context("failed to get db connection")?;
378-379- #[derive(QueryableByName)]
380- struct TotalCount {
381- #[diesel(sql_type = diesel::sql_types::Integer)]
382- total_count: i32,
383- }
384-385- let result = conn.interact(move |conn| {
386- diesel::sql_query(
387- "SELECT (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites) AS total_count",
388- )
389- .get_result::<TotalCount>(conn)
390- })
391- .await
392- .expect("should be able to query database")?;
393-394- let c = result.total_count;
395-396- #[expect(clippy::print_stdout)]
397- if c == 0 {
398- let uuid = Uuid::new_v4().to_string();
399-400- let uuid_clone = uuid.clone();
401- _ = conn
402- .interact(move |conn| {
403- diesel::sql_query(
404- "INSERT INTO invites (id, did, count, created_at) VALUES (?, NULL, 1, datetime('now'))",
405- )
406- .bind::<diesel::sql_types::Text, _>(uuid_clone)
407- .execute(conn)
408- .context("failed to create new invite code")
409- .expect("should be able to create invite code")
410- })
411- .await
412- .expect("should be able to create invite code");
413-414- // N.B: This is a sensitive message, so we're bypassing `tracing` here and
415- // logging it directly to console.
416- println!("=====================================");
417- println!(" FIRST STARTUP ");
418- println!("=====================================");
419- println!("Use this code to create an account:");
420- println!("{uuid}");
421- println!("=====================================");
422- }
423-424- let listener = TcpListener::bind(&addr)
425- .await
426- .context("failed to bind address")?;
427-428- // Serve the app, and request crawling from upstream relays.
429- let serve = tokio::spawn(async move {
430- axum::serve(listener, app.into_make_service())
431- .await
432- .context("failed to serve app")
433- });
434-435- // Now that the app is live, request a crawl from upstream relays.
436- firehose::reconnect_relays(&client, &config).await;
437-438- serve
439- .await
440- .map_err(Into::into)
441- .and_then(|r| r)
442- .context("failed to serve app")
443-}
444445#[tokio::main(flavor = "multi_thread")]
446async fn main() -> anyhow::Result<()> {
447- // Dispatch out to a separate function without a derive macro to help rust-analyzer along.
448- run().await
449-}
···1+//! BluePDS binary entry point.
00000000000000000023+use anyhow::Context as _;
0000000000004use clap::Parser;
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000056#[tokio::main(flavor = "multi_thread")]
7async fn main() -> anyhow::Result<()> {
8+ // Parse command line arguments and call into the library's run function
9+ bluepds::run().await.context("failed to run application")
10+}
+2-2
src/oauth.rs
···577 .expect("Failed to query PAR request");
578579 // Authenticate the user
580- use rsky_pds::schema::pds::account::dsl as AccountSchema;
581- use rsky_pds::schema::pds::actor::dsl as ActorSchema;
582 let username_clone = username.to_owned();
583 let account = db
584 .get()
···577 .expect("Failed to query PAR request");
578579 // Authenticate the user
580+ use crate::schema::pds::account::dsl as AccountSchema;
581+ use crate::schema::pds::actor::dsl as ActorSchema;
582 let username_clone = username.to_owned();
583 let account = db
584 .get()