···1414jacquard = "0.9.5"
1515jacquard-axum = "0.9.6"
1616jacquard-common = "0.9.5"
1717-jacquard-identity = "0.9.5"
1717+jacquard-identity = { version = "0.9.5", features = ["dns", "cache"] }
1818jacquard-api = "0.9.5"
1919apalis = { version = "0.7.3", features = ["retry", "limit", "layers", ] }
2020#Manually brought in migrations cause I was fighting them
+84-26
shared/src/jobs/scheduled_back_up_start.rs
···11use crate::jobs::account_backup::AccountBackupJobContext;
22use crate::jobs::{account_backup, pds_backup, push_job_json};
33use apalis::prelude::{Data, Error};
44-use jacquard::client::{AgentSessionExt, BasicClient};
55-use jacquard::url;
66-use jacquard_api::app_bsky::contact::match_and_contact_index_state::members::r#match;
77-use jacquard_api::com_atproto::sync::get_repo_status::{
88- GetRepoStatus, GetRepoStatusError, GetRepoStatusOutput,
99-};
1010-use jacquard_common::CowStr;
1111-use jacquard_common::error::XrpcResult;
1212-use jacquard_common::xrpc::{XrpcClient, XrpcError};
1313-use lexicon_types_crate::com_pdsmoover::backup::get_repo_status::GetRepoStatusRequest;
44+use jacquard::{client::BasicClient, identity::PublicResolver, prelude::IdentityResolver, url};
55+use jacquard_api::com_atproto::sync::get_repo_status::GetRepoStatus;
66+use jacquard_common::{types::did::Did, types::tid::Tid, xrpc::XrpcClient};
147use serde::{Deserialize, Serialize};
158use sqlx::{Pool, Postgres};
169use std::sync::Arc;
···2013#[derive(Debug, Deserialize, Serialize, Clone)]
2114pub struct ScheduledBackUpStartJobContext;
22151616+/// Finds the new PDS and saves it in the database if the user has changed PDSs since signing up
1717+async fn find_new_pds_host(pool: &Pool<Postgres>, did: String) -> Option<String> {
1818+ let resolver = PublicResolver::default();
1919+ let did_string = did.clone();
2020+ let did = match Did::new(did.as_str()) {
2121+ Ok(did) => did,
2222+ Err(err) => {
2323+ log::error!("Failed to parse did {did} : {err}");
2424+ return None;
2525+ }
2626+ };
2727+2828+ let did_doc = match resolver.resolve_did_doc_owned(&did).await {
2929+ Ok(did_doc) => did_doc,
3030+ Err(err) => {
3131+ log::error!("Failed to resolve did doc for {did} : {err}");
3232+ return None;
3333+ }
3434+ };
3535+3636+ match did_doc.pds_endpoint() {
3737+ None => None,
3838+ Some(url) => match url.host_str() {
3939+ None => {
4040+ log::error!("Failed to parse pds_endpoint for {did} : {url}");
4141+ return None;
4242+ }
4343+ Some(host) => {
4444+ //I may of copied and pasted this to get it going <.< who needs dry in all this snow
4545+ let result = sqlx::query(
4646+ "UPDATE accounts SET pds_host = $2 WHERE did = $1 AND pds_host <> $2",
4747+ )
4848+ .bind(did_string)
4949+ .bind(host)
5050+ .execute(pool)
5151+ .await;
5252+ if let Err(e) = result {
5353+ log::error!("Failed to update pds_host for {did} : {e}");
5454+ }
5555+ // Ok(result.rows_affected())
5656+ Some(host.to_string())
5757+ }
5858+ },
5959+ }
6060+}
6161+2362/// This scheduled job finds:
2463/// - accounts that have not been backed up in the last 6 hours and have pds_sign_up = false,
2564/// and enqueues AccountBackup jobs for them;
···5392 //TODO maybe check here?
5493 let agent = BasicClient::unauthenticated();
5594 for (did, pds_host) in accounts {
9595+ //This is a holder we can change the PDS if we need to
9696+ let mut pds_host_to_call = pds_host.clone();
9797+5698 //Need to do matches here cause I don't want to fail the whole loop
5799 let pds_url = match url::Url::parse(&format!("https://{}", pds_host)) {
58100 Ok(url) => url,
···75117 })?,
76118 };
771197878- let rev = match agent.send(request).await {
120120+ let rev: Option<Tid> = match agent.send(request).await {
79121 Ok(response) => match response.parse() {
80122 Ok(output) => {
123123+ log::info!("{output:?}");
81124 if output.active {
8282- output.rev;
8383- }
8484- match output.status {
8585- None => {
8686- log::warn!(
8787- "{did} has no status and not active. Not sure if this happens"
8888- );
8989- continue;
9090- }
9191- Some(status) => {
9292- if status == "deactivated" {
9393- //TODO do that call to update the PDS for the user
9494- } else {
9595- log::warn!("{did} has the repo status {status}.");
125125+ output.rev
126126+ } else {
127127+ match output.status {
128128+ None => {
129129+ log::warn!(
130130+ "{did} has no status and not active. Not sure if this happens"
131131+ );
96132 continue;
97133 }
134134+ Some(status) => {
135135+ if status == "deactivated" {
136136+ log::info!(
137137+ "{did} has been deactivated on this PDS. Let's see if they moved."
138138+ );
139139+ match find_new_pds_host(&pool, did.clone()).await {
140140+ None => continue,
141141+ Some(new_host) => {
142142+ log::info!(
143143+ "{did} has moved to {new_host}. Database updated."
144144+ );
145145+ pds_host_to_call = new_host;
146146+ }
147147+ }
148148+ //As long as it's successful, we can just send along None for the sub job to pick up the new rev on the new PDS
149149+ None
150150+ } else {
151151+ //TODO will most likely set accounts to deactivated as i collect more data here
152152+ log::warn!("{did} has the repo status {status}.");
153153+ continue;
154154+ }
155155+ }
98156 }
99157 }
100158 }
···113171114172 let ctx = AccountBackupJobContext {
115173 did,
116116- pds_host,
174174+ pds_host: pds_host_to_call,
117175 rev: rev_string,
118176 };
119177