QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
···1111//! - [`BaseHandleResolver`]: Core resolver that performs actual DNS/HTTP lookups
1212//! - [`CachingHandleResolver`]: In-memory caching wrapper with configurable TTL
1313//! - [`RedisHandleResolver`]: Redis-backed persistent caching with binary serialization
1414+//! - [`SqliteHandleResolver`]: SQLite-backed persistent caching for single-instance deployments
1415//!
1516//! # Example Usage
1617//!
···4344mod errors;
4445mod memory;
4546mod redis;
4747+mod sqlite;
4648mod traits;
47494850// Re-export public API
···5355pub use base::create_base_resolver;
5456pub use memory::create_caching_resolver;
5557pub use redis::{create_redis_resolver, create_redis_resolver_with_ttl};
5858+pub use sqlite::{create_sqlite_resolver, create_sqlite_resolver_with_ttl};
+468
src/handle_resolver/sqlite.rs
···11+//! SQLite-backed caching handle resolver.
22+//!
33+//! This module provides a handle resolver that caches resolution results in SQLite
44+//! with configurable expiration times. SQLite caching provides persistence across
55+//! service restarts while remaining lightweight for single-instance deployments.
66+77+use super::errors::HandleResolverError;
88+use super::traits::HandleResolver;
99+use crate::handle_resolution_result::HandleResolutionResult;
1010+use async_trait::async_trait;
1111+use metrohash::MetroHash64;
1212+use sqlx::{Row, SqlitePool};
1313+use std::hash::Hasher as _;
1414+use std::sync::Arc;
1515+use std::time::{SystemTime, UNIX_EPOCH};
1616+1717+/// SQLite-backed caching handle resolver.
1818+///
1919+/// This resolver caches handle resolution results in SQLite with a configurable TTL.
2020+/// Results are stored in a compact binary format using bincode serialization
2121+/// to minimize storage overhead.
2222+///
2323+/// # Features
2424+///
2525+/// - Persistent caching across service restarts
2626+/// - Lightweight single-file database
2727+/// - Configurable TTL (default: 90 days)
2828+/// - Compact binary storage format
2929+/// - Automatic schema management
3030+/// - Graceful fallback if SQLite is unavailable
3131+///
3232+/// # Example
3333+///
3434+/// ```no_run
3535+/// use std::sync::Arc;
3636+/// use sqlx::SqlitePool;
3737+/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
3838+///
3939+/// # async fn example() {
4040+/// # use atproto_identity::resolve::HickoryDnsResolver;
4141+/// # use reqwest::Client;
4242+/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
4343+/// # let http_client = Client::new();
4444+/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
4545+/// # let sqlite_pool: SqlitePool = todo!();
4646+/// // Create with default 90-day TTL
4747+/// let resolver = create_sqlite_resolver(
4848+/// base_resolver,
4949+/// sqlite_pool
5050+/// );
5151+/// # }
5252+/// ```
5353+pub(super) struct SqliteHandleResolver {
5454+ /// Base handle resolver to perform actual resolution
5555+ inner: Arc<dyn HandleResolver>,
5656+ /// SQLite connection pool
5757+ pool: SqlitePool,
5858+ /// TTL for cache entries in seconds
5959+ ttl_seconds: u64,
6060+}
6161+6262+impl SqliteHandleResolver {
6363+ /// Create a new SQLite-backed handle resolver with default 90-day TTL.
6464+ fn new(inner: Arc<dyn HandleResolver>, pool: SqlitePool) -> Self {
6565+ Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
6666+ }
6767+6868+ /// Create a new SQLite-backed handle resolver with custom TTL.
6969+ fn with_ttl(inner: Arc<dyn HandleResolver>, pool: SqlitePool, ttl_seconds: u64) -> Self {
7070+ Self {
7171+ inner,
7272+ pool,
7373+ ttl_seconds,
7474+ }
7575+ }
7676+7777+ /// Generate the cache key for a handle.
7878+ ///
7979+ /// Uses MetroHash64 to generate a consistent hash of the handle
8080+ /// for use as the primary key. This provides better key distribution
8181+ /// and avoids issues with special characters in handles.
8282+ fn make_key(&self, handle: &str) -> u64 {
8383+ let mut h = MetroHash64::default();
8484+ h.write(handle.as_bytes());
8585+ h.finish()
8686+ }
8787+8888+ /// Check if a cache entry is expired.
8989+ fn is_expired(&self, updated_timestamp: i64) -> bool {
9090+ let current_timestamp = SystemTime::now()
9191+ .duration_since(UNIX_EPOCH)
9292+ .unwrap_or_default()
9393+ .as_secs() as i64;
9494+9595+ (current_timestamp - updated_timestamp) > (self.ttl_seconds as i64)
9696+ }
9797+}
9898+9999+#[async_trait]
100100+impl HandleResolver for SqliteHandleResolver {
101101+ async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
102102+ let handle = s.to_string();
103103+ let key = self.make_key(&handle) as i64; // SQLite uses signed integers
104104+105105+ // Try to get from SQLite cache first
106106+ let cached_result = sqlx::query(
107107+ "SELECT result, updated FROM handle_resolution_cache WHERE key = ?1"
108108+ )
109109+ .bind(key)
110110+ .fetch_optional(&self.pool)
111111+ .await;
112112+113113+ match cached_result {
114114+ Ok(Some(row)) => {
115115+ let cached_bytes: Vec<u8> = row.get("result");
116116+ let updated_timestamp: i64 = row.get("updated");
117117+118118+ // Check if the entry is expired
119119+ if !self.is_expired(updated_timestamp) {
120120+ // Deserialize the cached result
121121+ match HandleResolutionResult::from_bytes(&cached_bytes) {
122122+ Ok(cached_result) => {
123123+ if let Some(did) = cached_result.to_did() {
124124+ tracing::debug!("Cache hit for handle {}: {}", handle, did);
125125+ return Ok(did);
126126+ } else {
127127+ tracing::debug!("Cache hit (not resolved) for handle {}", handle);
128128+ return Err(HandleResolverError::HandleNotFound);
129129+ }
130130+ }
131131+ Err(e) => {
132132+ tracing::warn!(
133133+ "Failed to deserialize cached result for handle {}: {}",
134134+ handle,
135135+ e
136136+ );
137137+ // Fall through to re-resolve if deserialization fails
138138+ }
139139+ }
140140+ } else {
141141+ tracing::debug!("Cache entry expired for handle {}", handle);
142142+ // Entry is expired, we'll re-resolve and update it
143143+ }
144144+ }
145145+ Ok(None) => {
146146+ tracing::debug!("Cache miss for handle {}, resolving...", handle);
147147+ }
148148+ Err(e) => {
149149+ tracing::warn!("Failed to query SQLite cache for handle {}: {}", handle, e);
150150+ // Fall through to resolve without caching on database error
151151+ }
152152+ }
153153+154154+ // Not in cache or expired, resolve through inner resolver
155155+ let result = self.inner.resolve(s).await;
156156+157157+ // Create and serialize resolution result
158158+ let resolution_result = match &result {
159159+ Ok(did) => {
160160+ tracing::debug!(
161161+ "Caching successful resolution for handle {}: {}",
162162+ handle,
163163+ did
164164+ );
165165+ match HandleResolutionResult::success(did) {
166166+ Ok(res) => res,
167167+ Err(e) => {
168168+ tracing::warn!("Failed to create resolution result: {}", e);
169169+ return result;
170170+ }
171171+ }
172172+ }
173173+ Err(e) => {
174174+ tracing::debug!("Caching failed resolution for handle {}: {}", handle, e);
175175+ match HandleResolutionResult::not_resolved() {
176176+ Ok(res) => res,
177177+ Err(err) => {
178178+ tracing::warn!("Failed to create not_resolved result: {}", err);
179179+ return result;
180180+ }
181181+ }
182182+ }
183183+ };
184184+185185+ // Serialize to bytes
186186+ match resolution_result.to_bytes() {
187187+ Ok(bytes) => {
188188+ let current_timestamp = SystemTime::now()
189189+ .duration_since(UNIX_EPOCH)
190190+ .unwrap_or_default()
191191+ .as_secs() as i64;
192192+193193+ // Insert or update the cache entry
194194+ let query_result = sqlx::query(
195195+ r#"
196196+ INSERT INTO handle_resolution_cache (key, result, created, updated)
197197+ VALUES (?1, ?2, ?3, ?4)
198198+ ON CONFLICT(key) DO UPDATE SET
199199+ result = excluded.result,
200200+ updated = excluded.updated
201201+ "#
202202+ )
203203+ .bind(key)
204204+ .bind(&bytes)
205205+ .bind(current_timestamp)
206206+ .bind(current_timestamp)
207207+ .execute(&self.pool)
208208+ .await;
209209+210210+ if let Err(e) = query_result {
211211+ tracing::warn!("Failed to cache handle resolution in SQLite: {}", e);
212212+ }
213213+ }
214214+ Err(e) => {
215215+ tracing::warn!(
216216+ "Failed to serialize resolution result for handle {}: {}",
217217+ handle,
218218+ e
219219+ );
220220+ }
221221+ }
222222+223223+ result
224224+ }
225225+}
226226+227227+/// Create a new SQLite-backed handle resolver with default 90-day TTL.
228228+///
229229+/// # Arguments
230230+///
231231+/// * `inner` - The underlying resolver to use for actual resolution
232232+/// * `pool` - SQLite connection pool
233233+///
234234+/// # Example
235235+///
236236+/// ```no_run
237237+/// use std::sync::Arc;
238238+/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
239239+/// use quickdid::sqlite_schema::create_sqlite_pool;
240240+///
241241+/// # async fn example() -> anyhow::Result<()> {
242242+/// # use atproto_identity::resolve::HickoryDnsResolver;
243243+/// # use reqwest::Client;
244244+/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
245245+/// # let http_client = Client::new();
246246+/// let base = create_base_resolver(
247247+/// dns_resolver,
248248+/// http_client,
249249+/// );
250250+///
251251+/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
252252+/// let resolver = create_sqlite_resolver(base, pool);
253253+/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
254254+/// # Ok(())
255255+/// # }
256256+/// ```
257257+pub fn create_sqlite_resolver(
258258+ inner: Arc<dyn HandleResolver>,
259259+ pool: SqlitePool,
260260+) -> Arc<dyn HandleResolver> {
261261+ Arc::new(SqliteHandleResolver::new(inner, pool))
262262+}
263263+264264+/// Create a new SQLite-backed handle resolver with custom TTL.
265265+///
266266+/// # Arguments
267267+///
268268+/// * `inner` - The underlying resolver to use for actual resolution
269269+/// * `pool` - SQLite connection pool
270270+/// * `ttl_seconds` - TTL for cache entries in seconds
271271+pub fn create_sqlite_resolver_with_ttl(
272272+ inner: Arc<dyn HandleResolver>,
273273+ pool: SqlitePool,
274274+ ttl_seconds: u64,
275275+) -> Arc<dyn HandleResolver> {
276276+ Arc::new(SqliteHandleResolver::with_ttl(inner, pool, ttl_seconds))
277277+}
278278+279279+#[cfg(test)]
280280+mod tests {
281281+ use super::*;
282282+283283+ // Mock handle resolver for testing
284284+ #[derive(Clone)]
285285+ struct MockHandleResolver {
286286+ should_fail: bool,
287287+ expected_did: String,
288288+ }
289289+290290+ #[async_trait]
291291+ impl HandleResolver for MockHandleResolver {
292292+ async fn resolve(&self, _handle: &str) -> Result<String, HandleResolverError> {
293293+ if self.should_fail {
294294+ Err(HandleResolverError::MockResolutionFailure)
295295+ } else {
296296+ Ok(self.expected_did.clone())
297297+ }
298298+ }
299299+ }
300300+301301+ #[tokio::test]
302302+ async fn test_sqlite_handle_resolver_cache_hit() {
303303+ // Create in-memory SQLite database for testing
304304+ let pool = SqlitePool::connect("sqlite::memory:")
305305+ .await
306306+ .expect("Failed to connect to in-memory SQLite");
307307+308308+ // Create the schema
309309+ crate::sqlite_schema::create_schema(&pool)
310310+ .await
311311+ .expect("Failed to create schema");
312312+313313+ // Create mock resolver
314314+ let mock_resolver = Arc::new(MockHandleResolver {
315315+ should_fail: false,
316316+ expected_did: "did:plc:testuser123".to_string(),
317317+ });
318318+319319+ // Create SQLite-backed resolver
320320+ let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
321321+322322+ let test_handle = "alice.bsky.social";
323323+ let expected_key = sqlite_resolver.make_key(test_handle) as i64;
324324+325325+ // Verify database is empty initially
326326+ let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
327327+ .fetch_one(&pool)
328328+ .await
329329+ .expect("Failed to query initial count");
330330+ assert_eq!(initial_count, 0);
331331+332332+ // First resolution - should call inner resolver and cache the result
333333+ let result1 = sqlite_resolver.resolve(test_handle).await.unwrap();
334334+ assert_eq!(result1, "did:plc:testuser123");
335335+336336+ // Verify record was inserted
337337+ let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
338338+ .fetch_one(&pool)
339339+ .await
340340+ .expect("Failed to query count after first resolution");
341341+ assert_eq!(count_after_first, 1);
342342+343343+ // Verify the cached record has correct key and non-empty result
344344+ let cached_record = sqlx::query("SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1")
345345+ .bind(expected_key)
346346+ .fetch_one(&pool)
347347+ .await
348348+ .expect("Failed to fetch cached record");
349349+350350+ let cached_key: i64 = cached_record.get("key");
351351+ let cached_result: Vec<u8> = cached_record.get("result");
352352+ let cached_created: i64 = cached_record.get("created");
353353+ let cached_updated: i64 = cached_record.get("updated");
354354+355355+ assert_eq!(cached_key, expected_key);
356356+ assert!(!cached_result.is_empty(), "Cached result should not be empty");
357357+ assert!(cached_created > 0, "Created timestamp should be positive");
358358+ assert!(cached_updated > 0, "Updated timestamp should be positive");
359359+ assert_eq!(cached_created, cached_updated, "Created and updated should be equal on first insert");
360360+361361+ // Verify we can deserialize the cached result
362362+ let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
363363+ .expect("Failed to deserialize cached result");
364364+ let cached_did = resolution_result.to_did().expect("Should have a DID");
365365+ assert_eq!(cached_did, "did:plc:testuser123");
366366+367367+ // Second resolution - should hit cache (no additional database insert)
368368+ let result2 = sqlite_resolver.resolve(test_handle).await.unwrap();
369369+ assert_eq!(result2, "did:plc:testuser123");
370370+371371+ // Verify count hasn't changed (cache hit, no new insert)
372372+ let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
373373+ .fetch_one(&pool)
374374+ .await
375375+ .expect("Failed to query count after second resolution");
376376+ assert_eq!(count_after_second, 1);
377377+ }
378378+379379+ #[tokio::test]
380380+ async fn test_sqlite_handle_resolver_cache_error() {
381381+ // Create in-memory SQLite database for testing
382382+ let pool = SqlitePool::connect("sqlite::memory:")
383383+ .await
384384+ .expect("Failed to connect to in-memory SQLite");
385385+386386+ // Create the schema
387387+ crate::sqlite_schema::create_schema(&pool)
388388+ .await
389389+ .expect("Failed to create schema");
390390+391391+ // Create mock resolver that fails
392392+ let mock_resolver = Arc::new(MockHandleResolver {
393393+ should_fail: true,
394394+ expected_did: String::new(),
395395+ });
396396+397397+ // Create SQLite-backed resolver
398398+ let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
399399+400400+ let test_handle = "error.bsky.social";
401401+ let expected_key = sqlite_resolver.make_key(test_handle) as i64;
402402+403403+ // Verify database is empty initially
404404+ let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
405405+ .fetch_one(&pool)
406406+ .await
407407+ .expect("Failed to query initial count");
408408+ assert_eq!(initial_count, 0);
409409+410410+ // First resolution - should fail and cache the failure
411411+ let result1 = sqlite_resolver.resolve(test_handle).await;
412412+ assert!(result1.is_err());
413413+414414+ // Match the specific error type we expect
415415+ match result1 {
416416+ Err(HandleResolverError::MockResolutionFailure) => {},
417417+ other => panic!("Expected MockResolutionFailure, got {:?}", other),
418418+ }
419419+420420+ // Verify the failure was cached
421421+ let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
422422+ .fetch_one(&pool)
423423+ .await
424424+ .expect("Failed to query count after first resolution");
425425+ assert_eq!(count_after_first, 1);
426426+427427+ // Verify the cached error record
428428+ let cached_record = sqlx::query("SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1")
429429+ .bind(expected_key)
430430+ .fetch_one(&pool)
431431+ .await
432432+ .expect("Failed to fetch cached error record");
433433+434434+ let cached_key: i64 = cached_record.get("key");
435435+ let cached_result: Vec<u8> = cached_record.get("result");
436436+ let cached_created: i64 = cached_record.get("created");
437437+ let cached_updated: i64 = cached_record.get("updated");
438438+439439+ assert_eq!(cached_key, expected_key);
440440+ assert!(!cached_result.is_empty(), "Cached error result should not be empty");
441441+ assert!(cached_created > 0, "Created timestamp should be positive");
442442+ assert!(cached_updated > 0, "Updated timestamp should be positive");
443443+ assert_eq!(cached_created, cached_updated, "Created and updated should be equal on first insert");
444444+445445+ // Verify we can deserialize the cached error result
446446+ let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
447447+ .expect("Failed to deserialize cached error result");
448448+ let cached_did = resolution_result.to_did();
449449+ assert!(cached_did.is_none(), "Error result should have no DID");
450450+451451+ // Second resolution - should hit cache with error (no additional database operations)
452452+ let result2 = sqlite_resolver.resolve(test_handle).await;
453453+ assert!(result2.is_err());
454454+455455+ // Match the specific error type we expect from cache
456456+ match result2 {
457457+ Err(HandleResolverError::HandleNotFound) => {}, // Cache returns HandleNotFound for "not resolved"
458458+ other => panic!("Expected HandleNotFound from cache, got {:?}", other),
459459+ }
460460+461461+ // Verify count hasn't changed (cache hit, no new operations)
462462+ let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
463463+ .fetch_one(&pool)
464464+ .await
465465+ .expect("Failed to query count after second resolution");
466466+ assert_eq!(count_after_second, 1);
467467+ }
468468+}
+1
src/lib.rs
···77pub mod cache; // Only create_redis_pool exposed
88pub mod handle_resolver_task; // Factory functions and TaskConfig exposed
99pub mod queue_adapter; // Trait and factory functions exposed
1010+pub mod sqlite_schema; // SQLite schema management functions exposed
1011pub mod task_manager; // Only spawn_cancellable_task exposed
11121213// Internal modules - crate visibility only
+285
src/sqlite_schema.rs
···11+//! SQLite schema management for QuickDID.
22+//!
33+//! This module provides functionality to create and manage the SQLite database
44+//! schema used by the SQLite-backed handle resolver cache.
55+66+use anyhow::Result;
77+use sqlx::{SqlitePool, migrate::MigrateDatabase, Sqlite};
88+use std::path::Path;
99+1010+/// SQL schema for the handle resolution cache table.
1111+const CREATE_HANDLE_RESOLUTION_CACHE_TABLE: &str = r#"
1212+CREATE TABLE IF NOT EXISTS handle_resolution_cache (
1313+ key INTEGER PRIMARY KEY,
1414+ result BLOB NOT NULL,
1515+ created INTEGER NOT NULL,
1616+ updated INTEGER NOT NULL
1717+);
1818+1919+CREATE INDEX IF NOT EXISTS idx_handle_resolution_cache_updated
2020+ON handle_resolution_cache(updated);
2121+"#;
2222+2323+/// Create or connect to a SQLite database and ensure schema is initialized.
2424+///
2525+/// # Arguments
2626+///
2727+/// * `database_url` - SQLite database URL (e.g., "sqlite:./quickdid.db" or "sqlite::memory:")
2828+///
2929+/// # Returns
3030+///
3131+/// Returns a SqlitePool connected to the database with schema initialized.
3232+///
3333+/// # Example
3434+///
3535+/// ```no_run
3636+/// use quickdid::sqlite_schema::create_sqlite_pool;
3737+///
3838+/// # async fn example() -> anyhow::Result<()> {
3939+/// // File-based database
4040+/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
4141+///
4242+/// // In-memory database (for testing)
4343+/// let pool = create_sqlite_pool("sqlite::memory:").await?;
4444+/// # Ok(())
4545+/// # }
4646+/// ```
4747+pub async fn create_sqlite_pool(database_url: &str) -> Result<SqlitePool> {
4848+ tracing::info!("Initializing SQLite database: {}", database_url);
4949+5050+ // Extract the database path from the URL for file-based databases
5151+ if let Some(path) = database_url.strip_prefix("sqlite:") {
5252+ if path != ":memory:" && !path.is_empty() {
5353+ // Create the database file if it doesn't exist
5454+ if !Sqlite::database_exists(database_url).await? {
5555+ tracing::info!("Creating SQLite database file: {}", path);
5656+ Sqlite::create_database(database_url).await?;
5757+ }
5858+5959+ // Ensure the parent directory exists
6060+ if let Some(parent) = Path::new(path).parent() {
6161+ if !parent.exists() {
6262+ tracing::info!("Creating directory: {}", parent.display());
6363+ std::fs::create_dir_all(parent)?;
6464+ }
6565+ }
6666+ }
6767+ }
6868+6969+ // Connect to the database
7070+ let pool = SqlitePool::connect(database_url).await?;
7171+ tracing::info!("Connected to SQLite database");
7272+7373+ // Create the schema
7474+ create_schema(&pool).await?;
7575+7676+ Ok(pool)
7777+}
7878+7979+/// Create the database schema if it doesn't exist.
8080+///
8181+/// # Arguments
8282+///
8383+/// * `pool` - SQLite connection pool
8484+///
8585+/// # Example
8686+///
8787+/// ```no_run
8888+/// use quickdid::sqlite_schema::create_schema;
8989+/// use sqlx::SqlitePool;
9090+///
9191+/// # async fn example() -> anyhow::Result<()> {
9292+/// let pool = SqlitePool::connect("sqlite::memory:").await?;
9393+/// create_schema(&pool).await?;
9494+/// # Ok(())
9595+/// # }
9696+/// ```
9797+pub async fn create_schema(pool: &SqlitePool) -> Result<()> {
9898+ tracing::debug!("Creating SQLite schema if not exists");
9999+100100+ // Execute the schema creation SQL
101101+ sqlx::query(CREATE_HANDLE_RESOLUTION_CACHE_TABLE)
102102+ .execute(pool)
103103+ .await?;
104104+105105+ tracing::info!("SQLite schema initialized");
106106+107107+ Ok(())
108108+}
109109+110110+/// Clean up expired entries from the handle resolution cache.
111111+///
112112+/// This function removes entries that are older than the specified TTL.
113113+/// It should be called periodically to prevent the database from growing indefinitely.
114114+///
115115+/// # Arguments
116116+///
117117+/// * `pool` - SQLite connection pool
118118+/// * `ttl_seconds` - TTL in seconds for cache entries
119119+///
120120+/// # Returns
121121+///
122122+/// Returns the number of entries deleted.
123123+///
124124+/// # Example
125125+///
126126+/// ```no_run
127127+/// use quickdid::sqlite_schema::cleanup_expired_entries;
128128+/// use sqlx::SqlitePool;
129129+///
130130+/// # async fn example() -> anyhow::Result<()> {
131131+/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
132132+/// let deleted_count = cleanup_expired_entries(&pool, 7776000).await?; // 90 days
133133+/// println!("Deleted {} expired entries", deleted_count);
134134+/// # Ok(())
135135+/// # }
136136+/// ```
137137+pub async fn cleanup_expired_entries(pool: &SqlitePool, ttl_seconds: u64) -> Result<u64> {
138138+ let current_timestamp = std::time::SystemTime::now()
139139+ .duration_since(std::time::UNIX_EPOCH)
140140+ .unwrap_or_default()
141141+ .as_secs() as i64;
142142+143143+ let cutoff_timestamp = current_timestamp - (ttl_seconds as i64);
144144+145145+ let result = sqlx::query("DELETE FROM handle_resolution_cache WHERE updated < ?1")
146146+ .bind(cutoff_timestamp)
147147+ .execute(pool)
148148+ .await?;
149149+150150+ let deleted_count = result.rows_affected();
151151+ if deleted_count > 0 {
152152+ tracing::info!("Cleaned up {} expired cache entries", deleted_count);
153153+ }
154154+155155+ Ok(deleted_count)
156156+}
157157+158158+/// Get statistics about the handle resolution cache.
159159+///
160160+/// # Arguments
161161+///
162162+/// * `pool` - SQLite connection pool
163163+///
164164+/// # Returns
165165+///
166166+/// Returns a tuple of (total_entries, database_size_bytes).
167167+///
168168+/// # Example
169169+///
170170+/// ```no_run
171171+/// use quickdid::sqlite_schema::get_cache_stats;
172172+/// use sqlx::SqlitePool;
173173+///
174174+/// # async fn example() -> anyhow::Result<()> {
175175+/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
176176+/// let (total_entries, size_bytes) = get_cache_stats(&pool).await?;
177177+/// println!("Cache has {} entries, {} bytes", total_entries, size_bytes);
178178+/// # Ok(())
179179+/// # }
180180+/// ```
181181+pub async fn get_cache_stats(pool: &SqlitePool) -> Result<(i64, i64)> {
182182+ // Get total entries
183183+ let total_entries: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
184184+ .fetch_one(pool)
185185+ .await?;
186186+187187+ // Get database page size and page count to calculate total size
188188+ let page_size: i64 = sqlx::query_scalar("PRAGMA page_size")
189189+ .fetch_one(pool)
190190+ .await?;
191191+192192+ let page_count: i64 = sqlx::query_scalar("PRAGMA page_count")
193193+ .fetch_one(pool)
194194+ .await?;
195195+196196+ let size_bytes = page_size * page_count;
197197+198198+ Ok((total_entries, size_bytes))
199199+}
200200+201201+#[cfg(test)]
202202+mod tests {
203203+ use super::*;
204204+205205+ #[tokio::test]
206206+ async fn test_create_sqlite_pool_memory() {
207207+ let pool = create_sqlite_pool("sqlite::memory:")
208208+ .await
209209+ .expect("Failed to create in-memory SQLite pool");
210210+211211+ // Verify the table was created
212212+ let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
213213+ .fetch_one(&pool)
214214+ .await
215215+ .expect("Failed to query table");
216216+217217+ assert_eq!(count, 0);
218218+ }
219219+220220+ #[tokio::test]
221221+ async fn test_cleanup_expired_entries() {
222222+ let pool = create_sqlite_pool("sqlite::memory:")
223223+ .await
224224+ .expect("Failed to create in-memory SQLite pool");
225225+226226+ // Insert a test entry that's already expired
227227+ let old_timestamp = std::time::SystemTime::now()
228228+ .duration_since(std::time::UNIX_EPOCH)
229229+ .unwrap()
230230+ .as_secs() as i64 - 3600; // 1 hour ago
231231+232232+ sqlx::query(
233233+ "INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (1, ?1, ?2, ?2)"
234234+ )
235235+ .bind(&b"test_data"[..])
236236+ .bind(old_timestamp)
237237+ .execute(&pool)
238238+ .await
239239+ .expect("Failed to insert test data");
240240+241241+ // Clean up entries older than 30 minutes (1800 seconds)
242242+ let deleted = cleanup_expired_entries(&pool, 1800)
243243+ .await
244244+ .expect("Failed to cleanup expired entries");
245245+246246+ assert_eq!(deleted, 1);
247247+248248+ // Verify the entry was deleted
249249+ let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
250250+ .fetch_one(&pool)
251251+ .await
252252+ .expect("Failed to query table");
253253+254254+ assert_eq!(count, 0);
255255+ }
256256+257257+ #[tokio::test]
258258+ async fn test_get_cache_stats() {
259259+ let pool = create_sqlite_pool("sqlite::memory:")
260260+ .await
261261+ .expect("Failed to create in-memory SQLite pool");
262262+263263+ // Insert a test entry
264264+ let current_timestamp = std::time::SystemTime::now()
265265+ .duration_since(std::time::UNIX_EPOCH)
266266+ .unwrap()
267267+ .as_secs() as i64;
268268+269269+ sqlx::query(
270270+ "INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (1, ?1, ?2, ?2)"
271271+ )
272272+ .bind(&b"test_data"[..])
273273+ .bind(current_timestamp)
274274+ .execute(&pool)
275275+ .await
276276+ .expect("Failed to insert test data");
277277+278278+ let (total_entries, size_bytes) = get_cache_stats(&pool)
279279+ .await
280280+ .expect("Failed to get cache stats");
281281+282282+ assert_eq!(total_entries, 1);
283283+ assert!(size_bytes > 0);
284284+ }
285285+}