this repo has no description
1use crate::circuit_breaker::CircuitBreaker; 2use crate::sync::firehose::SequencedEvent; 3use reqwest::Client; 4use std::sync::Arc; 5use std::sync::atomic::{AtomicU64, Ordering}; 6use std::time::Duration; 7use tokio::sync::{broadcast, watch}; 8use tracing::{debug, error, info, warn}; 9 10const NOTIFY_THRESHOLD_SECS: u64 = 20 * 60; 11 12pub struct Crawlers { 13 hostname: String, 14 crawler_urls: Vec<String>, 15 http_client: Client, 16 last_notified: AtomicU64, 17 circuit_breaker: Option<Arc<CircuitBreaker>>, 18} 19 20impl Crawlers { 21 pub fn new(hostname: String, crawler_urls: Vec<String>) -> Self { 22 Self { 23 hostname, 24 crawler_urls, 25 http_client: Client::builder() 26 .timeout(Duration::from_secs(30)) 27 .build() 28 .unwrap_or_default(), 29 last_notified: AtomicU64::new(0), 30 circuit_breaker: None, 31 } 32 } 33 34 pub fn with_circuit_breaker(mut self, circuit_breaker: Arc<CircuitBreaker>) -> Self { 35 self.circuit_breaker = Some(circuit_breaker); 36 self 37 } 38 39 pub fn from_env() -> Option<Self> { 40 let hostname = std::env::var("PDS_HOSTNAME").ok()?; 41 42 let crawler_urls: Vec<String> = std::env::var("CRAWLERS") 43 .unwrap_or_default() 44 .split(',') 45 .filter(|s| !s.is_empty()) 46 .map(|s| s.trim().to_string()) 47 .collect(); 48 49 if crawler_urls.is_empty() { 50 return None; 51 } 52 53 Some(Self::new(hostname, crawler_urls)) 54 } 55 56 fn should_notify(&self) -> bool { 57 let now = std::time::SystemTime::now() 58 .duration_since(std::time::UNIX_EPOCH) 59 .unwrap_or_default() 60 .as_secs(); 61 62 let last = self.last_notified.load(Ordering::Relaxed); 63 now - last >= NOTIFY_THRESHOLD_SECS 64 } 65 66 fn mark_notified(&self) { 67 let now = std::time::SystemTime::now() 68 .duration_since(std::time::UNIX_EPOCH) 69 .unwrap_or_default() 70 .as_secs(); 71 72 self.last_notified.store(now, Ordering::Relaxed); 73 } 74 75 pub async fn notify_of_update(&self) { 76 if !self.should_notify() { 77 debug!("Skipping crawler notification due to debounce"); 78 return; 79 } 80 81 if let Some(cb) = &self.circuit_breaker 82 && !cb.can_execute().await { 83 debug!("Skipping crawler notification due to circuit breaker open"); 84 return; 85 } 86 87 self.mark_notified(); 88 let circuit_breaker = self.circuit_breaker.clone(); 89 90 for crawler_url in &self.crawler_urls { 91 let url = format!( 92 "{}/xrpc/com.atproto.sync.requestCrawl", 93 crawler_url.trim_end_matches('/') 94 ); 95 let hostname = self.hostname.clone(); 96 let client = self.http_client.clone(); 97 let cb = circuit_breaker.clone(); 98 99 tokio::spawn(async move { 100 match client 101 .post(&url) 102 .json(&serde_json::json!({ "hostname": hostname })) 103 .send() 104 .await 105 { 106 Ok(response) => { 107 if response.status().is_success() { 108 debug!(crawler = %url, "Successfully notified crawler"); 109 if let Some(cb) = cb { 110 cb.record_success().await; 111 } 112 } else { 113 let status = response.status(); 114 let body = response.text().await.unwrap_or_default(); 115 warn!( 116 crawler = %url, 117 status = %status, 118 body = %body, 119 hostname = %hostname, 120 "Crawler notification returned non-success status" 121 ); 122 if let Some(cb) = cb { 123 cb.record_failure().await; 124 } 125 } 126 } 127 Err(e) => { 128 warn!(crawler = %url, error = %e, "Failed to notify crawler"); 129 if let Some(cb) = cb { 130 cb.record_failure().await; 131 } 132 } 133 } 134 }); 135 } 136 } 137} 138 139pub async fn start_crawlers_service( 140 crawlers: Arc<Crawlers>, 141 mut firehose_rx: broadcast::Receiver<SequencedEvent>, 142 mut shutdown: watch::Receiver<bool>, 143) { 144 info!( 145 hostname = %crawlers.hostname, 146 crawler_count = crawlers.crawler_urls.len(), 147 crawlers = ?crawlers.crawler_urls, 148 "Starting crawlers notification service" 149 ); 150 151 loop { 152 tokio::select! { 153 result = firehose_rx.recv() => { 154 match result { 155 Ok(event) => { 156 if event.event_type == "commit" { 157 crawlers.notify_of_update().await; 158 } 159 } 160 Err(broadcast::error::RecvError::Lagged(n)) => { 161 warn!(skipped = n, "Crawlers service lagged behind firehose"); 162 crawlers.notify_of_update().await; 163 } 164 Err(broadcast::error::RecvError::Closed) => { 165 error!("Firehose channel closed, stopping crawlers service"); 166 break; 167 } 168 } 169 } 170 _ = shutdown.changed() => { 171 if *shutdown.borrow() { 172 info!("Crawlers service shutting down"); 173 break; 174 } 175 } 176 } 177 } 178}