tracks lexicons and how many times they appeared on the jetstream

refactor(server): implement own rate counter that should be faster, use quanta on it

ptr.pet 6b78fe2d 32529fe8

verified
+225 -73
+35 -43
server/Cargo.lock
··· 18 18 checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" 19 19 20 20 [[package]] 21 - name = "ahash" 22 - version = "0.8.12" 23 - source = "registry+https://github.com/rust-lang/crates.io-index" 24 - checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" 25 - dependencies = [ 26 - "cfg-if", 27 - "getrandom 0.3.3", 28 - "once_cell", 29 - "version_check", 30 - "zerocopy", 31 - ] 32 - 33 - [[package]] 34 21 name = "aho-corasick" 35 22 version = "1.1.3" 36 23 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1062 1049 checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" 1063 1050 1064 1051 [[package]] 1065 - name = "pingora-limits" 1066 - version = "0.5.0" 1067 - source = "registry+https://github.com/rust-lang/crates.io-index" 1068 - checksum = "a719a8cb5558ca06bd6076c97b8905d500ea556da89e132ba53d4272844f95b9" 1069 - dependencies = [ 1070 - "ahash", 1071 - ] 1072 - 1073 - [[package]] 1074 1052 name = "pkg-config" 1075 1053 version = "0.3.32" 1076 1054 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1112 1090 ] 1113 1091 1114 1092 [[package]] 1093 + name = "quanta" 1094 + version = "0.12.6" 1095 + source = "registry+https://github.com/rust-lang/crates.io-index" 1096 + checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" 1097 + dependencies = [ 1098 + "crossbeam-utils", 1099 + "libc", 1100 + "once_cell", 1101 + "raw-cpuid", 1102 + "wasi 0.11.1+wasi-snapshot-preview1", 1103 + "web-sys", 1104 + "winapi", 1105 + ] 1106 + 1107 + [[package]] 1115 1108 name = "quick_cache" 1116 1109 version = "0.6.15" 1117 1110 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1146 1139 ] 1147 1140 1148 1141 [[package]] 1142 + name = "raw-cpuid" 1143 + version = "11.5.0" 1144 + source = "registry+https://github.com/rust-lang/crates.io-index" 1145 + checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" 1146 + dependencies = [ 1147 + "bitflags", 1148 + ] 1149 + 1150 + [[package]] 1149 1151 name = "redox_syscall" 1150 1152 version = "0.5.15" 1151 1153 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1496 1498 "fjall", 1497 1499 "futures-util", 1498 1500 "ordered-varint", 1499 - "pingora-limits", 1501 + "quanta", 1500 1502 "rkyv", 1501 1503 "rustls", 1502 1504 "scc", ··· 2033 2035 ] 2034 2036 2035 2037 [[package]] 2038 + name = "web-sys" 2039 + version = "0.3.77" 2040 + source = "registry+https://github.com/rust-lang/crates.io-index" 2041 + checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" 2042 + dependencies = [ 2043 + "js-sys", 2044 + "wasm-bindgen", 2045 + ] 2046 + 2047 + [[package]] 2036 2048 name = "webpki-root-certs" 2037 2049 version = "1.0.1" 2038 2050 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 2307 2319 version = "0.8.15" 2308 2320 source = "registry+https://github.com/rust-lang/crates.io-index" 2309 2321 checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" 2310 - 2311 - [[package]] 2312 - name = "zerocopy" 2313 - version = "0.8.26" 2314 - source = "registry+https://github.com/rust-lang/crates.io-index" 2315 - checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" 2316 - dependencies = [ 2317 - "zerocopy-derive", 2318 - ] 2319 - 2320 - [[package]] 2321 - name = "zerocopy-derive" 2322 - version = "0.8.26" 2323 - source = "registry+https://github.com/rust-lang/crates.io-index" 2324 - checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" 2325 - dependencies = [ 2326 - "proc-macro2", 2327 - "quote", 2328 - "syn", 2329 - ] 2330 2322 2331 2323 [[package]] 2332 2324 name = "zeroize"
+1 -1
server/Cargo.toml
··· 15 15 futures-util = "0.3" 16 16 axum = { version = "0.8", default-features = false, features = ["http1", "tokio", "tracing", "json", "query"] } 17 17 axum-tws = { git = "https://github.com/90-008/axum-tws.git", features = ["http2"] } 18 - pingora-limits = "0.5" 19 18 tower-http = {version = "0.6", features = ["request-id", "trace", "compression-full"]} 20 19 fjall = { version = "2", default-features = false, features = ["miniz", "lz4"] } 21 20 rkyv = {version = "0.8", features = ["unaligned"]} ··· 27 26 ordered-varint = "2.0.0" 28 27 threadpool = "1.8.1" 29 28 snmalloc-rs = "0.3.8" 29 + quanta = "0.12.6"
+21 -7
server/src/api.rs
··· 2 2 collections::HashMap, 3 3 fmt::Display, 4 4 net::SocketAddr, 5 - ops::Deref, 5 + ops::{Bound, Deref, RangeBounds}, 6 6 sync::Arc, 7 - time::{Duration, UNIX_EPOCH}, 7 + time::Duration, 8 8 }; 9 9 10 10 use anyhow::anyhow; ··· 30 30 use crate::{ 31 31 db::Db, 32 32 error::{AppError, AppResult}, 33 - utils::time_now, 34 33 }; 35 34 36 35 struct LatencyMillis(u128); ··· 155 154 156 155 const MAX_HITS: usize = 100_000; 157 156 157 + #[derive(Debug)] 158 + struct HitsRange { 159 + from: Bound<u64>, 160 + to: Bound<u64>, 161 + } 162 + 163 + impl RangeBounds<u64> for HitsRange { 164 + fn start_bound(&self) -> Bound<&u64> { 165 + self.from.as_ref() 166 + } 167 + 168 + fn end_bound(&self) -> Bound<&u64> { 169 + self.to.as_ref() 170 + } 171 + } 172 + 158 173 async fn hits( 159 174 State(db): State<Arc<Db>>, 160 175 Query(params): Query<HitsQuery>, 161 176 ) -> AppResult<Json<Vec<Hit>>> { 177 + let from = params.to.map(Bound::Included).unwrap_or(Bound::Unbounded); 178 + let to = params.from.map(Bound::Included).unwrap_or(Bound::Unbounded); 162 179 let maybe_hits = db 163 - .get_hits( 164 - &params.nsid, 165 - params.to.unwrap_or(0)..params.from.unwrap_or(time_now()), 166 - ) 180 + .get_hits(&params.nsid, HitsRange { from, to }) 167 181 .take(MAX_HITS); 168 182 let mut hits = Vec::with_capacity(maybe_hits.size_hint().0); 169 183
+10 -15
server/src/db/mod.rs
··· 21 21 db::block::{ReadVariableExt, WriteVariableExt}, 22 22 error::{AppError, AppResult}, 23 23 jetstream::JetstreamEvent, 24 - utils::time_now, 24 + utils::{DefaultRateTracker, RateTracker}, 25 25 }; 26 26 27 27 mod block; ··· 76 76 pub struct LexiconHandle { 77 77 tree: Partition, 78 78 buf: Arc<scc::Queue<EventRecord>>, 79 + // this is stored here since scc::Queue does not have O(1) length 79 80 buf_len: AtomicUsize, // relaxed 80 81 last_insert: AtomicInstant, // relaxed 81 - block_size: AtomicUsize, // relaxed 82 - eps: Rate, 82 + eps: DefaultRateTracker, 83 83 } 84 84 85 85 impl LexiconHandle { ··· 90 90 buf: Default::default(), 91 91 buf_len: AtomicUsize::new(0), 92 92 last_insert: AtomicInstant::now(), 93 - eps: Rate::new(Duration::from_secs(5)), 94 - block_size: AtomicUsize::new(1000), 93 + eps: RateTracker::new(Duration::from_secs(10)), 95 94 } 96 95 } 97 96 ··· 104 103 } 105 104 106 105 fn suggested_block_size(&self) -> usize { 107 - self.block_size.load(AtomicOrdering::Relaxed) 106 + self.eps.rate() as usize * 60 108 107 } 109 108 110 109 fn insert(&self, event: EventRecord) { ··· 112 111 self.buf_len.fetch_add(1, AtomicOrdering::Relaxed); 113 112 self.last_insert 114 113 .store(Instant::now(), AtomicOrdering::Relaxed); 115 - self.eps.observe(&(), 1); 116 - let rate = self.eps.rate(&()) as usize; 117 - if rate != 0 { 118 - self.block_size.store(rate * 60, AtomicOrdering::Relaxed); 119 - } 114 + self.eps.observe(); 120 115 } 121 116 122 117 fn sync(&self, max_block_size: usize) -> AppResult<usize> { ··· 165 160 hits: scc::HashIndex<SmolStr, Arc<LexiconHandle>>, 166 161 syncpool: threadpool::ThreadPool, 167 162 event_broadcaster: broadcast::Sender<(SmolStr, NsidCounts)>, 168 - eps: Rate, 163 + eps: RateTracker<100>, 169 164 shutting_down: AtomicBool, 170 165 min_block_size: usize, 171 166 max_block_size: usize, ··· 187 182 )?, 188 183 inner: ks, 189 184 event_broadcaster: broadcast::channel(1000).0, 190 - eps: Rate::new(Duration::from_secs(1)), 185 + eps: RateTracker::new(Duration::from_secs(1)), 191 186 shutting_down: AtomicBool::new(false), 192 187 min_block_size: 512, 193 188 max_block_size: 500_000, ··· 244 239 245 240 #[inline(always)] 246 241 pub fn eps(&self) -> usize { 247 - self.eps.rate(&()) as usize 242 + self.eps.rate() as usize 248 243 } 249 244 250 245 #[inline(always)] ··· 308 303 if self.event_broadcaster.receiver_count() > 0 { 309 304 let _ = self.event_broadcaster.send((SmolStr::new(&nsid), counts)); 310 305 } 311 - self.eps.observe(&(), 1); 306 + self.eps.observe(); 312 307 Ok(()) 313 308 } 314 309
+1 -1
server/src/main.rs
··· 92 92 } 93 93 }); 94 94 95 - std::thread::spawn({ 95 + let sync_thread = std::thread::spawn({ 96 96 let db = db.clone(); 97 97 move || { 98 98 loop {
+157 -6
server/src/utils.rs
··· 1 - use std::time::UNIX_EPOCH; 1 + use std::sync::atomic::{AtomicU64, Ordering}; 2 + use std::time::Duration; 3 + 4 + pub static CLOCK: std::sync::LazyLock<quanta::Clock> = 5 + std::sync::LazyLock::new(|| quanta::Clock::new()); 6 + 7 + /// simple thread-safe rate tracker using time buckets 8 + /// divides time into fixed buckets and rotates through them 9 + #[derive(Debug)] 10 + pub struct RateTracker<const BUCKET_WINDOW: u64> { 11 + buckets: Vec<AtomicU64>, 12 + bucket_duration_nanos: u64, 13 + window_duration: Duration, 14 + last_bucket_time: AtomicU64, 15 + start_time: u64, // raw time when tracker was created 16 + } 17 + 18 + pub type DefaultRateTracker = RateTracker<1000>; 19 + 20 + impl<const BUCKET_WINDOW: u64> RateTracker<BUCKET_WINDOW> { 21 + /// create a new rate tracker with the specified time window 22 + pub fn new(window_duration: Duration) -> Self { 23 + let bucket_duration_nanos = Duration::from_millis(BUCKET_WINDOW).as_nanos() as u64; 24 + let num_buckets = 25 + (window_duration.as_nanos() as u64 / bucket_duration_nanos).max(1) as usize; 26 + 27 + let mut buckets = Vec::with_capacity(num_buckets); 28 + for _ in 0..num_buckets { 29 + buckets.push(AtomicU64::new(0)); 30 + } 31 + 32 + let start_time = CLOCK.raw(); 33 + Self { 34 + buckets, 35 + bucket_duration_nanos, 36 + window_duration, 37 + last_bucket_time: AtomicU64::new(0), 38 + start_time, 39 + } 40 + } 41 + 42 + /// record an event 43 + pub fn observe(&self) { 44 + let now = CLOCK.raw(); 45 + self.maybe_advance_buckets(now); 46 + 47 + let bucket_index = self.get_current_bucket_index(now); 48 + self.buckets[bucket_index].fetch_add(1, Ordering::Relaxed); 49 + } 50 + 51 + /// get the current rate in events per second 52 + pub fn rate(&self) -> f64 { 53 + let now = CLOCK.raw(); 54 + self.maybe_advance_buckets(now); 55 + 56 + let total_events: u64 = self 57 + .buckets 58 + .iter() 59 + .map(|bucket| bucket.load(Ordering::Relaxed)) 60 + .sum(); 61 + 62 + total_events as f64 / self.window_duration.as_secs_f64() 63 + } 64 + 65 + fn get_current_bucket_index(&self, now: u64) -> usize { 66 + let elapsed_nanos = CLOCK.delta_as_nanos(self.start_time, now); 67 + let bucket_number = elapsed_nanos / self.bucket_duration_nanos; 68 + (bucket_number as usize) % self.buckets.len() 69 + } 70 + 71 + fn maybe_advance_buckets(&self, now: u64) { 72 + let elapsed_nanos = CLOCK.delta_as_nanos(self.start_time, now); 73 + let current_bucket_time = 74 + (elapsed_nanos / self.bucket_duration_nanos) * self.bucket_duration_nanos; 75 + let last_bucket_time = self.last_bucket_time.load(Ordering::Relaxed); 76 + 77 + if current_bucket_time > last_bucket_time { 78 + // try to update the last bucket time 79 + if self 80 + .last_bucket_time 81 + .compare_exchange_weak( 82 + last_bucket_time, 83 + current_bucket_time, 84 + Ordering::Relaxed, 85 + Ordering::Relaxed, 86 + ) 87 + .is_ok() 88 + { 89 + // clear buckets that are now too old 90 + let buckets_to_advance = ((current_bucket_time - last_bucket_time) 91 + / self.bucket_duration_nanos) 92 + .min(self.buckets.len() as u64); 93 + 94 + for i in 0..buckets_to_advance { 95 + let bucket_time = last_bucket_time + (i + 1) * self.bucket_duration_nanos; 96 + let bucket_index = 97 + (bucket_time / self.bucket_duration_nanos) as usize % self.buckets.len(); 98 + self.buckets[bucket_index].store(0, Ordering::Relaxed); 99 + } 100 + } 101 + } 102 + } 103 + } 104 + 105 + #[cfg(test)] 106 + mod tests { 107 + use super::*; 108 + use std::sync::Arc; 109 + use std::thread; 110 + 111 + #[test] 112 + fn test_rate_tracker_basic() { 113 + let tracker = DefaultRateTracker::new(Duration::from_secs(2)); 114 + 115 + // record some events 116 + tracker.observe(); 117 + tracker.observe(); 118 + tracker.observe(); 119 + 120 + let rate = tracker.rate(); 121 + assert_eq!(rate, 1.5); // 3 events over 2 seconds = 1.5 events/sec 122 + } 123 + 124 + #[test] 125 + fn test_rate_tracker_burst() { 126 + let tracker = DefaultRateTracker::new(Duration::from_secs(1)); 2 127 3 - pub fn time_now() -> u64 { 4 - std::time::SystemTime::now() 5 - .duration_since(UNIX_EPOCH) 6 - .expect("oops") 7 - .as_micros() as u64 128 + // record a lot of events 129 + for _ in 0..1000 { 130 + tracker.observe(); 131 + } 132 + 133 + let rate = tracker.rate(); 134 + assert_eq!(rate, 1000.0); // 1000 events in 1 second 135 + } 136 + 137 + #[test] 138 + fn test_rate_tracker_threading() { 139 + let tracker = Arc::new(DefaultRateTracker::new(Duration::from_secs(1))); 140 + let mut handles = vec![]; 141 + 142 + for _ in 0..4 { 143 + let tracker_clone = Arc::clone(&tracker); 144 + let handle = thread::spawn(move || { 145 + for _ in 0..10 { 146 + tracker_clone.observe(); 147 + } 148 + }); 149 + handles.push(handle); 150 + } 151 + 152 + for handle in handles { 153 + handle.join().unwrap(); 154 + } 155 + 156 + let rate = tracker.rate(); 157 + assert_eq!(rate, 40.0); // 40 events in 1 second 158 + } 8 159 }