QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
···1+//! Lexicon resolution with caching support.
2+//!
3+//! This module provides implementations for resolving AT Protocol lexicons (NSIDs)
4+//! to their schemas with various caching strategies.
5+6+mod redis;
7+8+pub use redis::{create_redis_lexicon_resolver, create_redis_lexicon_resolver_with_ttl};
···1+//! Redis-backed caching lexicon resolver.
2+//!
3+//! This module provides a lexicon resolver that caches resolution results in Redis
4+//! with configurable expiration times. Redis caching provides persistence across
5+//! service restarts and allows sharing of cached results across multiple instances.
6+7+use crate::metrics::SharedMetricsPublisher;
8+use async_trait::async_trait;
9+use atproto_lexicon::resolve::LexiconResolver;
10+use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
11+use metrohash::MetroHash64;
12+use std::hash::Hasher as _;
13+use std::sync::Arc;
14+15+/// Redis-backed caching lexicon resolver.
16+///
17+/// This resolver caches lexicon resolution results in Redis with a configurable TTL.
18+/// Results are stored as JSON bytes to minimize storage overhead while maintaining
19+/// the schema structure.
20+///
21+/// # Features
22+///
23+/// - Persistent caching across service restarts
24+/// - Shared cache across multiple service instances
25+/// - Configurable TTL (default: 90 days)
26+/// - JSON storage format for lexicon schemas
27+/// - Graceful fallback if Redis is unavailable
28+///
29+/// # Example
30+///
31+/// ```no_run
32+/// use std::sync::Arc;
33+/// use deadpool_redis::Pool;
34+/// use atproto_lexicon::resolve::LexiconResolver;
35+/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
36+/// use quickdid::metrics::NoOpMetricsPublisher;
37+///
38+/// # async fn example() {
39+/// # let inner_resolver: Arc<dyn LexiconResolver> = todo!();
40+/// # let redis_pool: Pool = todo!();
41+/// # let metrics = Arc::new(NoOpMetricsPublisher);
42+/// // Create with default 90-day TTL
43+/// let resolver = create_redis_lexicon_resolver(
44+/// inner_resolver,
45+/// redis_pool,
46+/// metrics
47+/// );
48+/// # }
49+/// ```
50+pub(super) struct RedisLexiconResolver {
51+ /// Base lexicon resolver to perform actual resolution
52+ inner: Arc<dyn LexiconResolver>,
53+ /// Redis connection pool
54+ pool: RedisPool,
55+ /// Redis key prefix for lexicon resolution cache
56+ key_prefix: String,
57+ /// TTL for cache entries in seconds
58+ ttl_seconds: u64,
59+ /// Metrics publisher for telemetry
60+ metrics: SharedMetricsPublisher,
61+}
62+63+impl RedisLexiconResolver {
64+ /// Create a new Redis-backed lexicon resolver with default 90-day TTL.
65+ fn new(
66+ inner: Arc<dyn LexiconResolver>,
67+ pool: RedisPool,
68+ metrics: SharedMetricsPublisher,
69+ ) -> Self {
70+ Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
71+ }
72+73+ /// Create a new Redis-backed lexicon resolver with custom TTL.
74+ fn with_ttl(
75+ inner: Arc<dyn LexiconResolver>,
76+ pool: RedisPool,
77+ ttl_seconds: u64,
78+ metrics: SharedMetricsPublisher,
79+ ) -> Self {
80+ Self::with_full_config(inner, pool, "lexicon:".to_string(), ttl_seconds, metrics)
81+ }
82+83+ /// Create a new Redis-backed lexicon resolver with full configuration.
84+ fn with_full_config(
85+ inner: Arc<dyn LexiconResolver>,
86+ pool: RedisPool,
87+ key_prefix: String,
88+ ttl_seconds: u64,
89+ metrics: SharedMetricsPublisher,
90+ ) -> Self {
91+ Self {
92+ inner,
93+ pool,
94+ key_prefix,
95+ ttl_seconds,
96+ metrics,
97+ }
98+ }
99+100+ /// Generate the Redis key for an NSID.
101+ ///
102+ /// Uses MetroHash64 to generate a consistent hash of the NSID
103+ /// for use as the Redis key. This provides better key distribution
104+ /// and avoids issues with special characters in NSIDs.
105+ fn make_key(&self, nsid: &str) -> String {
106+ let mut h = MetroHash64::default();
107+ h.write(nsid.as_bytes());
108+ format!("{}{}", self.key_prefix, h.finish())
109+ }
110+111+ /// Get the TTL in seconds.
112+ fn ttl_seconds(&self) -> u64 {
113+ self.ttl_seconds
114+ }
115+}
116+117+#[async_trait]
118+impl LexiconResolver for RedisLexiconResolver {
119+ async fn resolve(&self, nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
120+ let key = self.make_key(nsid);
121+122+ // Try to get from Redis cache first
123+ match self.pool.get().await {
124+ Ok(mut conn) => {
125+ // Check if the key exists in Redis (stored as JSON bytes)
126+ let cached: Option<Vec<u8>> = match conn.get(&key).await {
127+ Ok(value) => value,
128+ Err(e) => {
129+ self.metrics.incr("lexicon_resolver.redis.get_error").await;
130+ tracing::warn!("Failed to get NSID from Redis cache: {}", e);
131+ None
132+ }
133+ };
134+135+ if let Some(cached_bytes) = cached {
136+ // Deserialize the cached JSON
137+ match serde_json::from_slice::<serde_json::Value>(&cached_bytes) {
138+ Ok(cached_value) => {
139+ tracing::debug!("Cache hit for NSID {}", nsid);
140+ self.metrics.incr("lexicon_resolver.redis.cache_hit").await;
141+ return Ok(cached_value);
142+ }
143+ Err(e) => {
144+ tracing::warn!(
145+ "Failed to deserialize cached lexicon for NSID {}: {}",
146+ nsid,
147+ e
148+ );
149+ self.metrics
150+ .incr("lexicon_resolver.redis.deserialize_error")
151+ .await;
152+ // Fall through to re-resolve if deserialization fails
153+ }
154+ }
155+ }
156+157+ // Not in cache, resolve through inner resolver
158+ tracing::debug!("Cache miss for NSID {}, resolving...", nsid);
159+ self.metrics.incr("lexicon_resolver.redis.cache_miss").await;
160+ let result = self.inner.resolve(nsid).await;
161+162+ // Cache successful result
163+ if let Ok(ref schema) = result {
164+ // Serialize to JSON bytes
165+ match serde_json::to_vec(schema) {
166+ Ok(bytes) => {
167+ // Set with expiration (ignore errors to not fail the resolution)
168+ if let Err(e) = conn
169+ .set_ex::<_, _, ()>(&key, bytes, self.ttl_seconds())
170+ .await
171+ {
172+ tracing::warn!(
173+ "Failed to cache lexicon resolution in Redis: {}",
174+ e
175+ );
176+ self.metrics
177+ .incr("lexicon_resolver.redis.cache_set_error")
178+ .await;
179+ } else {
180+ tracing::debug!("Cached lexicon for NSID {}", nsid);
181+ self.metrics.incr("lexicon_resolver.redis.cache_set").await;
182+ }
183+ }
184+ Err(e) => {
185+ tracing::warn!(
186+ "Failed to serialize lexicon result for NSID {}: {}",
187+ nsid,
188+ e
189+ );
190+ self.metrics
191+ .incr("lexicon_resolver.redis.serialize_error")
192+ .await;
193+ }
194+ }
195+ }
196+197+ result
198+ }
199+ Err(e) => {
200+ // Redis connection failed, fall back to inner resolver
201+ tracing::warn!(
202+ "Failed to get Redis connection, falling back to uncached resolution: {}",
203+ e
204+ );
205+ self.metrics
206+ .incr("lexicon_resolver.redis.connection_error")
207+ .await;
208+ self.inner.resolve(nsid).await
209+ }
210+ }
211+ }
212+}
213+214+/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
215+///
216+/// # Arguments
217+///
218+/// * `inner` - The underlying resolver to use for actual resolution
219+/// * `pool` - Redis connection pool
220+/// * `metrics` - Metrics publisher for telemetry
221+///
222+/// # Example
223+///
224+/// ```no_run
225+/// use std::sync::Arc;
226+/// use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
227+/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
228+/// use quickdid::cache::create_redis_pool;
229+/// use quickdid::metrics::NoOpMetricsPublisher;
230+///
231+/// # async fn example() -> anyhow::Result<()> {
232+/// # use atproto_identity::resolve::HickoryDnsResolver;
233+/// # use reqwest::Client;
234+/// # let dns_resolver = HickoryDnsResolver::create_resolver(&[]);
235+/// # let http_client = Client::new();
236+/// # let metrics = Arc::new(NoOpMetricsPublisher);
237+/// let base: Arc<dyn LexiconResolver> = Arc::new(
238+/// DefaultLexiconResolver::new(http_client, dns_resolver)
239+/// );
240+///
241+/// let pool = create_redis_pool("redis://localhost:6379")?;
242+/// let resolver = create_redis_lexicon_resolver(base, pool, metrics);
243+/// let schema = resolver.resolve("app.bsky.feed.post").await.unwrap();
244+/// # Ok(())
245+/// # }
246+/// ```
247+pub fn create_redis_lexicon_resolver(
248+ inner: Arc<dyn LexiconResolver>,
249+ pool: RedisPool,
250+ metrics: SharedMetricsPublisher,
251+) -> Arc<dyn LexiconResolver> {
252+ Arc::new(RedisLexiconResolver::new(inner, pool, metrics))
253+}
254+255+/// Create a new Redis-backed lexicon resolver with custom TTL.
256+///
257+/// # Arguments
258+///
259+/// * `inner` - The underlying resolver to use for actual resolution
260+/// * `pool` - Redis connection pool
261+/// * `ttl_seconds` - TTL for cache entries in seconds
262+/// * `metrics` - Metrics publisher for telemetry
263+pub fn create_redis_lexicon_resolver_with_ttl(
264+ inner: Arc<dyn LexiconResolver>,
265+ pool: RedisPool,
266+ ttl_seconds: u64,
267+ metrics: SharedMetricsPublisher,
268+) -> Arc<dyn LexiconResolver> {
269+ Arc::new(RedisLexiconResolver::with_ttl(
270+ inner,
271+ pool,
272+ ttl_seconds,
273+ metrics,
274+ ))
275+}
276+277+#[cfg(test)]
278+mod tests {
279+ use super::*;
280+281+ // Mock lexicon resolver for testing
282+ #[derive(Clone)]
283+ struct MockLexiconResolver {
284+ should_fail: bool,
285+ expected_schema: serde_json::Value,
286+ }
287+288+ #[async_trait]
289+ impl LexiconResolver for MockLexiconResolver {
290+ async fn resolve(&self, _nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
291+ if self.should_fail {
292+ Err(anyhow::anyhow!("Mock resolution failure"))
293+ } else {
294+ Ok(self.expected_schema.clone())
295+ }
296+ }
297+ }
298+299+ #[tokio::test]
300+ async fn test_redis_lexicon_resolver_cache_hit() {
301+ let pool = match crate::test_helpers::get_test_redis_pool() {
302+ Some(p) => p,
303+ None => return,
304+ };
305+306+ // Create mock resolver with sample schema
307+ let schema = serde_json::json!({
308+ "lexicon": 1,
309+ "id": "app.bsky.feed.post",
310+ "defs": {
311+ "main": {
312+ "type": "record",
313+ "description": "A post record"
314+ }
315+ }
316+ });
317+318+ let mock_resolver = Arc::new(MockLexiconResolver {
319+ should_fail: false,
320+ expected_schema: schema.clone(),
321+ });
322+323+ // Create metrics publisher
324+ let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
325+326+ // Create Redis-backed resolver with a unique key prefix for testing
327+ let test_prefix = format!(
328+ "test:lexicon:{}:",
329+ std::time::SystemTime::now()
330+ .duration_since(std::time::UNIX_EPOCH)
331+ .unwrap()
332+ .as_nanos()
333+ );
334+ let redis_resolver = RedisLexiconResolver::with_full_config(
335+ mock_resolver,
336+ pool.clone(),
337+ test_prefix.clone(),
338+ 3600,
339+ metrics,
340+ );
341+342+ let test_nsid = "app.bsky.feed.post";
343+344+ // First resolution - should call inner resolver
345+ let result1 = redis_resolver.resolve(test_nsid).await.unwrap();
346+ assert_eq!(result1, schema);
347+348+ // Second resolution - should hit cache
349+ let result2 = redis_resolver.resolve(test_nsid).await.unwrap();
350+ assert_eq!(result2, schema);
351+352+ // Clean up test data
353+ if let Ok(mut conn) = pool.get().await {
354+ let mut h = MetroHash64::default();
355+ h.write(test_nsid.as_bytes());
356+ let key = format!("{}{}", test_prefix, h.finish());
357+ let _: Result<(), _> = conn.del(key).await;
358+ }
359+ }
360+361+ #[tokio::test]
362+ async fn test_redis_lexicon_resolver_cache_miss() {
363+ let pool = match crate::test_helpers::get_test_redis_pool() {
364+ Some(p) => p,
365+ None => return,
366+ };
367+368+ let schema = serde_json::json!({
369+ "lexicon": 1,
370+ "id": "com.example.test",
371+ });
372+373+ let mock_resolver = Arc::new(MockLexiconResolver {
374+ should_fail: false,
375+ expected_schema: schema.clone(),
376+ });
377+378+ let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
379+380+ let test_prefix = format!(
381+ "test:lexicon:{}:",
382+ std::time::SystemTime::now()
383+ .duration_since(std::time::UNIX_EPOCH)
384+ .unwrap()
385+ .as_nanos()
386+ );
387+ let redis_resolver = RedisLexiconResolver::with_full_config(
388+ mock_resolver,
389+ pool.clone(),
390+ test_prefix.clone(),
391+ 3600,
392+ metrics,
393+ );
394+395+ let test_nsid = "com.example.test";
396+397+ // Ensure key doesn't exist
398+ if let Ok(mut conn) = pool.get().await {
399+ let mut h = MetroHash64::default();
400+ h.write(test_nsid.as_bytes());
401+ let key = format!("{}{}", test_prefix, h.finish());
402+ let _: Result<(), _> = conn.del(&key).await;
403+ }
404+405+ // Resolution should succeed and cache the result
406+ let result = redis_resolver.resolve(test_nsid).await.unwrap();
407+ assert_eq!(result, schema);
408+409+ // Verify the result was cached
410+ if let Ok(mut conn) = pool.get().await {
411+ let mut h = MetroHash64::default();
412+ h.write(test_nsid.as_bytes());
413+ let key = format!("{}{}", test_prefix, h.finish());
414+ let exists: bool = conn.exists(&key).await.unwrap();
415+ assert!(exists, "Result should be cached");
416+417+ // Clean up
418+ let _: Result<(), _> = conn.del(key).await;
419+ }
420+ }
421+422+ #[tokio::test]
423+ async fn test_redis_lexicon_resolver_error_handling() {
424+ let pool = match crate::test_helpers::get_test_redis_pool() {
425+ Some(p) => p,
426+ None => return,
427+ };
428+429+ // Create mock resolver that fails
430+ let mock_resolver = Arc::new(MockLexiconResolver {
431+ should_fail: true,
432+ expected_schema: serde_json::Value::Null,
433+ });
434+435+ let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
436+437+ let test_prefix = format!(
438+ "test:lexicon:{}:",
439+ std::time::SystemTime::now()
440+ .duration_since(std::time::UNIX_EPOCH)
441+ .unwrap()
442+ .as_nanos()
443+ );
444+ let redis_resolver = RedisLexiconResolver::with_full_config(
445+ mock_resolver,
446+ pool.clone(),
447+ test_prefix,
448+ 3600,
449+ metrics,
450+ );
451+452+ let test_nsid = "com.example.nonexistent";
453+454+ // Resolution should fail
455+ let result = redis_resolver.resolve(test_nsid).await;
456+ assert!(result.is_err());
457+ }
458+}
+1
src/lib.rs
···3pub mod handle_resolver; // Only traits and factory functions exposed
4pub mod http; // Only create_router exposed
5pub mod jetstream_handler; // Jetstream event handler for AT Protocol events
067// Semi-public modules - needed by binary but with limited exposure
8pub mod cache; // Only create_redis_pool exposed
···3pub mod handle_resolver; // Only traits and factory functions exposed
4pub mod http; // Only create_router exposed
5pub mod jetstream_handler; // Jetstream event handler for AT Protocol events
6+pub mod lexicon_resolver; // Lexicon resolution with caching support
78// Semi-public modules - needed by binary but with limited exposure
9pub mod cache; // Only create_redis_pool exposed