Server tools to backfill, tail, mirror, and verify PLC logs

unwrap => expect

+90 -45
+61 -27
src/bin/allegedly.rs
··· 36 36 /// Bulk load into did-method-plc-compatible postgres instead of stdout 37 37 /// 38 38 /// Pass a postgres connection url like "postgresql://localhost:5432" 39 - #[arg(long)] 39 + #[arg(long, env = "ALLEGEDLY_TO_POSTGRES")] 40 40 to_postgres: Option<Url>, 41 41 /// Cert for postgres (if needed) 42 + #[arg(long)] 42 43 postgres_cert: Option<PathBuf>, 43 44 /// Delete all operations from the postgres db before starting 44 45 /// ··· 82 83 #[arg(long, env = "ALLEGEDLY_WRAP_PG")] 83 84 wrap_pg: Url, 84 85 /// path to tls cert for the wrapped postgres db, if needed 86 + #[arg(long, env = "ALLEGEDLY_WRAP_PG_CERT")] 85 87 wrap_pg_cert: Option<PathBuf>, 86 88 /// wrapping server listen address 87 89 #[arg(short, long, env = "ALLEGEDLY_BIND")] ··· 150 152 while let Some(page) = rx.recv().await 151 153 && page.ops.len() > 900 152 154 { 153 - tx.send(page).await.unwrap(); 155 + tx.send(page).await.expect("to be able to forward a page"); 154 156 } 155 157 }); 156 158 fwd ··· 181 183 log::info!("Reading weekly bundles from local folder {dir:?}"); 182 184 backfill(FolderSource(dir), tx, source_workers.unwrap_or(1), until) 183 185 .await 184 - .unwrap(); 186 + .expect("to source bundles from a folder"); 185 187 } else { 186 188 log::info!("Fetching weekly bundles from from {http}"); 187 189 backfill(HttpSource(http), tx, source_workers.unwrap_or(4), until) 188 190 .await 189 - .unwrap(); 191 + .expect("to source bundles from http"); 190 192 } 191 193 }); 192 194 ··· 203 205 let pg_cert = postgres_cert.clone(); 204 206 let bulk_out_write = tokio::task::spawn(async move { 205 207 if let Some(ref url) = to_postgres_url_bulk { 206 - let db = Db::new(url.as_str(), pg_cert).await.unwrap(); 208 + let db = Db::new(url.as_str(), pg_cert) 209 + .await 210 + .expect("to get db for bulk out write"); 207 211 backfill_to_pg(db, postgres_reset, rx, notify_last_at) 208 212 .await 209 - .unwrap(); 213 + .expect("to backfill to pg"); 210 214 } else { 211 - pages_to_stdout(rx, notify_last_at).await.unwrap(); 215 + pages_to_stdout(rx, notify_last_at) 216 + .await 217 + .expect("to backfill to stdout"); 212 218 } 213 219 }); 214 220 ··· 216 222 let mut upstream = args.upstream; 217 223 upstream.set_path("/export"); 218 224 // wait until the time for `after` is known 219 - let last_at = rx_last.await.unwrap(); 225 + let last_at = rx_last.await.expect("to get the last log's createdAt"); 220 226 log::info!("beginning catch-up from {last_at:?} while the writer finalizes stuff"); 221 227 let (tx, rx) = mpsc::channel(256); // these are small pages 222 - tokio::task::spawn( 223 - async move { poll_upstream(last_at, upstream, tx).await.unwrap() }, 224 - ); 225 - bulk_out_write.await.unwrap(); 228 + tokio::task::spawn(async move { 229 + poll_upstream(last_at, upstream, tx) 230 + .await 231 + .expect("polling upstream to work") 232 + }); 233 + bulk_out_write.await.expect("to wait for bulk_out_write"); 226 234 log::info!("writing catch-up pages"); 227 235 let full_pages = full_pages(rx); 228 236 if let Some(url) = to_postgres { 229 - let db = Db::new(url.as_str(), postgres_cert).await.unwrap(); 230 - pages_to_pg(db, full_pages).await.unwrap(); 237 + let db = Db::new(url.as_str(), postgres_cert) 238 + .await 239 + .expect("to connect pg for catchup"); 240 + pages_to_pg(db, full_pages) 241 + .await 242 + .expect("to write catch-up pages to pg"); 231 243 } else { 232 - pages_to_stdout(full_pages, None).await.unwrap(); 244 + pages_to_stdout(full_pages, None) 245 + .await 246 + .expect("to write catch-up pages to stdout"); 233 247 } 234 248 } 235 249 } ··· 241 255 let mut url = args.upstream; 242 256 url.set_path("/export"); 243 257 let (tx, rx) = mpsc::channel(32); // read ahead if gzip stalls for some reason 244 - tokio::task::spawn(async move { poll_upstream(Some(after), url, tx).await.unwrap() }); 258 + tokio::task::spawn(async move { 259 + poll_upstream(Some(after), url, tx) 260 + .await 261 + .expect("to poll upstream") 262 + }); 245 263 log::trace!("ensuring output directory exists"); 246 - std::fs::create_dir_all(&dest).unwrap(); 247 - pages_to_weeks(rx, dest, clobber).await.unwrap(); 264 + std::fs::create_dir_all(&dest).expect("to ensure output dir exists"); 265 + pages_to_weeks(rx, dest, clobber) 266 + .await 267 + .expect("to write bundles to output files"); 248 268 } 249 269 Commands::Mirror { 250 270 wrap, ··· 255 275 acme_cache_path, 256 276 acme_directory_url, 257 277 } => { 258 - let db = Db::new(wrap_pg.as_str(), wrap_pg_cert).await.unwrap(); 278 + let db = Db::new(wrap_pg.as_str(), wrap_pg_cert) 279 + .await 280 + .expect("to connect to pg for mirroring"); 259 281 let latest = db 260 282 .get_latest() 261 283 .await 262 - .unwrap() 284 + .expect("to query for last createdAt") 263 285 .expect("there to be at least one op in the db. did you backfill?"); 264 286 265 287 let (tx, rx) = mpsc::channel(2); ··· 268 290 tokio::task::spawn(async move { 269 291 log::info!("starting poll reader..."); 270 292 url.set_path("/export"); 271 - tokio::task::spawn( 272 - async move { poll_upstream(Some(latest), url, tx).await.unwrap() }, 273 - ); 293 + tokio::task::spawn(async move { 294 + poll_upstream(Some(latest), url, tx) 295 + .await 296 + .expect("to poll upstream for mirror sync") 297 + }); 274 298 }); 275 299 // db writer 276 300 let poll_db = db.clone(); 277 301 tokio::task::spawn(async move { 278 302 log::info!("starting db writer..."); 279 - pages_to_pg(poll_db, rx).await.unwrap(); 303 + pages_to_pg(poll_db, rx) 304 + .await 305 + .expect("to write to pg for mirror"); 280 306 }); 281 307 282 308 let listen_conf = match (bind, acme_domain.is_empty(), acme_cache_path) { ··· 289 315 (_, _, _) => unreachable!(), 290 316 }; 291 317 292 - serve(&args.upstream, wrap, listen_conf).await.unwrap(); 318 + serve(&args.upstream, wrap, listen_conf) 319 + .await 320 + .expect("to be able to serve the mirror proxy app"); 293 321 } 294 322 Commands::Tail { after } => { 295 323 let mut url = args.upstream; 296 324 url.set_path("/export"); 297 325 let start_at = after.or_else(|| Some(chrono::Utc::now())); 298 326 let (tx, rx) = mpsc::channel(1); 299 - tokio::task::spawn(async move { poll_upstream(start_at, url, tx).await.unwrap() }); 300 - pages_to_stdout(rx, None).await.unwrap(); 327 + tokio::task::spawn(async move { 328 + poll_upstream(start_at, url, tx) 329 + .await 330 + .expect("to poll upstream") 331 + }); 332 + pages_to_stdout(rx, None) 333 + .await 334 + .expect("to write pages to stdout"); 301 335 } 302 336 } 303 337 log::info!("whew, {:?}. goodbye!", t0.elapsed());
+4 -1
src/client.rs
··· 10 10 ); 11 11 12 12 pub static CLIENT: LazyLock<ClientWithMiddleware> = LazyLock::new(|| { 13 - let inner = Client::builder().user_agent(UA).build().unwrap(); 13 + let inner = Client::builder() 14 + .user_agent(UA) 15 + .build() 16 + .expect("reqwest client to build"); 14 17 15 18 let policy = ExponentialBackoff::builder().build_with_max_retries(12); 16 19
+2 -2
src/mirror.rs
··· 192 192 .user_agent(UA) 193 193 .timeout(Duration::from_secs(10)) // fallback 194 194 .build() 195 - .unwrap(); 195 + .expect("reqwest client to build"); 196 196 197 197 let state = State { 198 198 client, ··· 208 208 .with(Cors::new().allow_credentials(false)) 209 209 .with(Compression::new()) 210 210 .with(GovernorMiddleware::new(Quota::per_minute( 211 - 3000.try_into().unwrap(), 211 + 3000.try_into().expect("ratelimit middleware to build"), 212 212 ))) 213 213 .with(CatchPanic::new()) 214 214 .with(Tracing);
+7 -7
src/plc_pg.rs
··· 13 13 }; 14 14 15 15 fn get_tls(cert: PathBuf) -> MakeTlsConnector { 16 - let cert = std::fs::read(cert).unwrap(); 17 - let cert = Certificate::from_pem(&cert).unwrap(); 16 + let cert = std::fs::read(cert).expect("to read cert file"); 17 + let cert = Certificate::from_pem(&cert).expect("to build cert"); 18 18 let connector = TlsConnector::builder() 19 19 .add_root_certificate(cert) 20 20 .build() 21 - .unwrap(); 21 + .expect("to build tls connector"); 22 22 MakeTlsConnector::new(connector) 23 23 } 24 24 ··· 46 46 connection 47 47 .await 48 48 .inspect_err(|e| log::error!("connection ended with error: {e}")) 49 - .unwrap(); 49 + .expect("pg validation connection not to blow up"); 50 50 }); 51 51 (client, task) 52 52 } else { ··· 55 55 connection 56 56 .await 57 57 .inspect_err(|e| log::error!("connection ended with error: {e}")) 58 - .unwrap(); 58 + .expect("pg validation connection not to blow up"); 59 59 }); 60 60 (client, task) 61 61 }; ··· 97 97 connection 98 98 .await 99 99 .inspect_err(|e| log::error!("connection ended with error: {e}")) 100 - .unwrap(); 100 + .expect("pg connection not to blow up"); 101 101 }); 102 102 client 103 103 } else { ··· 109 109 connection 110 110 .await 111 111 .inspect_err(|e| log::error!("connection ended with error: {e}")) 112 - .unwrap(); 112 + .expect("pg connection not to blow up"); 113 113 }); 114 114 client 115 115 };
+1 -1
src/poll.rs
··· 276 276 let page = ExportPage { 277 277 ops: vec![valid_op().to_string()], 278 278 }; 279 - PageBoundaryState::new(&page).unwrap() 279 + PageBoundaryState::new(&page).expect("to have a base page boundary state") 280 280 } 281 281 282 282 #[test]
+15 -7
src/ratelimit.rs
··· 24 24 let period = quota.replenish_interval() / factor; 25 25 let burst = quota 26 26 .burst_size() 27 - .checked_mul(factor.try_into().unwrap()) 28 - .unwrap(); 27 + .checked_mul(factor.try_into().expect("factor to be non-zero")) 28 + .expect("burst to be able to multiply"); 29 29 Quota::with_period(period).map(|q| q.allow_burst(burst)) 30 30 } 31 31 ··· 40 40 pub fn new(quota: Quota) -> Self { 41 41 Self { 42 42 per_ip: RateLimiter::keyed(quota), 43 - ip6_56: RateLimiter::keyed(scale_quota(quota, 8).unwrap()), 44 - ip6_48: RateLimiter::keyed(scale_quota(quota, 256).unwrap()), 43 + ip6_56: RateLimiter::keyed(scale_quota(quota, 8).expect("to scale quota")), 44 + ip6_48: RateLimiter::keyed(scale_quota(quota, 256).expect("to scale quota")), 45 45 } 46 46 } 47 47 pub fn check_key(&self, ip: IpAddr) -> Result<(), Duration> { ··· 56 56 .map_err(asdf); 57 57 let check_56 = self 58 58 .ip6_56 59 - .check_key(a.octets()[..7].try_into().unwrap()) 59 + .check_key( 60 + a.octets()[..7] 61 + .try_into() 62 + .expect("to check ip6 /56 limiter"), 63 + ) 60 64 .map_err(asdf); 61 65 let check_48 = self 62 66 .ip6_48 63 - .check_key(a.octets()[..6].try_into().unwrap()) 67 + .check_key( 68 + a.octets()[..6] 69 + .try_into() 70 + .expect("to check ip6 /48 limiter"), 71 + ) 64 72 .map_err(asdf); 65 73 check_ip.and(check_56).and(check_48) 66 74 } ··· 135 143 let remote = req 136 144 .remote_addr() 137 145 .as_socket_addr() 138 - .unwrap_or_else(|| panic!("failed to get request's remote addr")) // TODO 146 + .expect("failed to get request's remote addr") // TODO 139 147 .ip(); 140 148 141 149 log::trace!("remote: {remote}");