QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
···66// Semi-public modules - needed by binary but with limited exposure
77pub mod cache; // Only create_redis_pool exposed
88pub mod handle_resolver_task; // Factory functions and TaskConfig exposed
99-pub mod queue_adapter; // Trait and factory functions exposed
99+pub mod queue; // Queue adapter system with trait and factory functions
1010pub mod sqlite_schema; // SQLite schema management functions exposed
1111pub mod task_manager; // Only spawn_cancellable_task exposed
1212
+189
src/queue/adapter.rs
···11+//! Queue adapter trait definition.
22+//!
33+//! This module defines the core `QueueAdapter` trait that provides a common
44+//! interface for different queue implementations (MPSC, Redis, SQLite, etc.).
55+66+use async_trait::async_trait;
77+use super::error::Result;
88+99+/// Generic trait for queue adapters that can work with any work type.
1010+///
1111+/// This trait provides a common interface for different queue implementations
1212+/// (MPSC, Redis, PostgreSQL, SQLite, etc.) allowing them to be used interchangeably.
1313+///
1414+/// # Type Parameters
1515+///
1616+/// * `T` - The type of work items that this queue processes. Must be `Send + Sync + 'static`.
1717+///
1818+/// # Implementation Notes
1919+///
2020+/// Implementors should ensure that:
2121+/// - `pull()` blocks until an item is available or the queue is closed
2222+/// - `push()` may block if the queue has a bounded capacity
2323+/// - `ack()` is used for reliable delivery semantics (can be no-op for simple queues)
2424+/// - `try_push()` never blocks and returns an error if the queue is full
2525+///
2626+/// # Examples
2727+///
2828+/// ```no_run
2929+/// use quickdid::queue::{QueueAdapter, MpscQueueAdapter};
3030+/// use std::sync::Arc;
3131+///
3232+/// # async fn example() -> anyhow::Result<()> {
3333+/// // Create a queue adapter for String work items
3434+/// let queue: Arc<dyn QueueAdapter<String>> = Arc::new(MpscQueueAdapter::new(100));
3535+///
3636+/// // Push work to the queue
3737+/// queue.push("process-this".to_string()).await?;
3838+///
3939+/// // Pull work from the queue
4040+/// if let Some(work) = queue.pull().await {
4141+/// println!("Processing: {}", work);
4242+/// // Acknowledge completion
4343+/// queue.ack(&work).await?;
4444+/// }
4545+/// # Ok(())
4646+/// # }
4747+/// ```
4848+#[async_trait]
4949+pub trait QueueAdapter<T>: Send + Sync
5050+where
5151+ T: Send + Sync + 'static,
5252+{
5353+ /// Pull the next work item from the queue.
5454+ ///
5555+ /// This method blocks until an item is available or the queue is closed.
5656+ /// Returns `None` if the queue is closed or empty (depending on implementation).
5757+ ///
5858+ /// # Returns
5959+ ///
6060+ /// * `Some(T)` - The next work item from the queue
6161+ /// * `None` - The queue is closed or empty
6262+ async fn pull(&self) -> Option<T>;
6363+6464+ /// Push a work item to the queue.
6565+ ///
6666+ /// This method may block if the queue has bounded capacity and is full.
6767+ ///
6868+ /// # Arguments
6969+ ///
7070+ /// * `work` - The work item to add to the queue
7171+ ///
7272+ /// # Errors
7373+ ///
7474+ /// Returns an error if:
7575+ /// - The queue is full (for bounded queues)
7676+ /// - The queue is closed
7777+ /// - Serialization fails (for persistent queues)
7878+ /// - Backend connection fails (for Redis/SQLite)
7979+ async fn push(&self, work: T) -> Result<()>;
8080+8181+ /// Acknowledge that a work item has been successfully processed.
8282+ ///
8383+ /// This is used by reliable queue implementations to remove the item
8484+ /// from a temporary processing queue. Implementations that don't require
8585+ /// acknowledgment (like MPSC) can use the default no-op implementation.
8686+ ///
8787+ /// # Arguments
8888+ ///
8989+ /// * `item` - The work item to acknowledge
9090+ ///
9191+ /// # Errors
9292+ ///
9393+ /// Returns an error if acknowledgment fails (backend-specific).
9494+ async fn ack(&self, _item: &T) -> Result<()> {
9595+ // Default no-op implementation for queues that don't need acknowledgment
9696+ Ok(())
9797+ }
9898+9999+ /// Try to push a work item without blocking.
100100+ ///
101101+ /// This method returns immediately with an error if the queue is full.
102102+ ///
103103+ /// # Arguments
104104+ ///
105105+ /// * `work` - The work item to add to the queue
106106+ ///
107107+ /// # Errors
108108+ ///
109109+ /// Returns an error if:
110110+ /// - The queue is full
111111+ /// - The queue is closed
112112+ /// - Other backend-specific errors occur
113113+ async fn try_push(&self, work: T) -> Result<()> {
114114+ // Default implementation uses regular push
115115+ self.push(work).await
116116+ }
117117+118118+ /// Get the current queue depth if available.
119119+ ///
120120+ /// # Returns
121121+ ///
122122+ /// * `Some(usize)` - The number of items currently in the queue
123123+ /// * `None` - Queue depth is not available or cannot be determined
124124+ async fn depth(&self) -> Option<usize> {
125125+ None
126126+ }
127127+128128+ /// Check if the queue is healthy.
129129+ ///
130130+ /// Used for health checks and monitoring. Implementations should verify
131131+ /// backend connectivity and basic functionality.
132132+ ///
133133+ /// # Returns
134134+ ///
135135+ /// * `true` - The queue is operational
136136+ /// * `false` - The queue has issues or is disconnected
137137+ async fn is_healthy(&self) -> bool {
138138+ true
139139+ }
140140+}
141141+142142+#[cfg(test)]
143143+mod tests {
144144+ use super::*;
145145+146146+ // Mock implementation for testing the trait
147147+ struct MockQueue<T> {
148148+ _phantom: std::marker::PhantomData<T>,
149149+ }
150150+151151+ impl<T> MockQueue<T> {
152152+ fn new() -> Self {
153153+ Self {
154154+ _phantom: std::marker::PhantomData,
155155+ }
156156+ }
157157+ }
158158+159159+ #[async_trait]
160160+ impl<T> QueueAdapter<T> for MockQueue<T>
161161+ where
162162+ T: Send + Sync + 'static,
163163+ {
164164+ async fn pull(&self) -> Option<T> {
165165+ None
166166+ }
167167+168168+ async fn push(&self, _work: T) -> Result<()> {
169169+ Ok(())
170170+ }
171171+ }
172172+173173+ #[tokio::test]
174174+ async fn test_default_trait_methods() {
175175+ let queue = MockQueue::<String>::new();
176176+177177+ // Test default ack implementation
178178+ assert!(queue.ack(&"test".to_string()).await.is_ok());
179179+180180+ // Test default try_push implementation
181181+ assert!(queue.try_push("test".to_string()).await.is_ok());
182182+183183+ // Test default depth implementation
184184+ assert_eq!(queue.depth().await, None);
185185+186186+ // Test default is_healthy implementation
187187+ assert!(queue.is_healthy().await);
188188+ }
189189+}
+76
src/queue/error.rs
···11+//! Queue operation error types.
22+//!
33+//! This module defines the error types that can occur during queue operations,
44+//! including push failures, serialization issues, and backend-specific errors.
55+66+use thiserror::Error;
77+88+/// Queue operation errors.
99+///
1010+/// These errors represent various failure modes that can occur when working
1111+/// with queue adapters, from connection issues to serialization problems.
1212+#[derive(Error, Debug)]
1313+pub enum QueueError {
1414+ /// Failed to push an item to the queue.
1515+ #[error("error-quickdid-queue-1 Failed to push to queue: {0}")]
1616+ PushFailed(String),
1717+1818+ /// The queue is full and cannot accept new items.
1919+ #[error("error-quickdid-queue-2 Queue is full")]
2020+ QueueFull,
2121+2222+ /// The queue has been closed and is no longer accepting items.
2323+ #[error("error-quickdid-queue-3 Queue is closed")]
2424+ QueueClosed,
2525+2626+ /// Redis connection failed.
2727+ #[error("error-quickdid-queue-4 Redis connection failed: {0}")]
2828+ RedisConnectionFailed(String),
2929+3030+ /// Redis operation failed.
3131+ #[error("error-quickdid-queue-5 Redis operation failed: {operation}: {details}")]
3232+ RedisOperationFailed {
3333+ /// The Redis operation that failed
3434+ operation: String,
3535+ /// Details about the failure
3636+ details: String
3737+ },
3838+3939+ /// Failed to serialize an item for storage.
4040+ #[error("error-quickdid-queue-6 Serialization failed: {0}")]
4141+ SerializationFailed(String),
4242+4343+ /// Failed to deserialize an item from storage.
4444+ #[error("error-quickdid-queue-7 Deserialization failed: {0}")]
4545+ DeserializationFailed(String),
4646+4747+ /// Item not found in worker queue during acknowledgment.
4848+ #[error("error-quickdid-queue-8 Item not found in worker queue during acknowledgment")]
4949+ AckItemNotFound,
5050+}
5151+5252+/// Result type alias for queue operations.
5353+pub type Result<T> = std::result::Result<T, QueueError>;
5454+5555+#[cfg(test)]
5656+mod tests {
5757+ use super::*;
5858+5959+ #[test]
6060+ fn test_error_messages() {
6161+ let err = QueueError::PushFailed("test failure".to_string());
6262+ assert!(err.to_string().contains("error-quickdid-queue-1"));
6363+ assert!(err.to_string().contains("test failure"));
6464+6565+ let err = QueueError::QueueFull;
6666+ assert_eq!(err.to_string(), "error-quickdid-queue-2 Queue is full");
6767+6868+ let err = QueueError::RedisOperationFailed {
6969+ operation: "LPUSH".to_string(),
7070+ details: "connection timeout".to_string(),
7171+ };
7272+ assert!(err.to_string().contains("error-quickdid-queue-5"));
7373+ assert!(err.to_string().contains("LPUSH"));
7474+ assert!(err.to_string().contains("connection timeout"));
7575+ }
7676+}
+330
src/queue/factory.rs
···11+//! Factory functions for creating queue adapters.
22+//!
33+//! This module provides convenient factory functions for creating different
44+//! types of queue adapters with appropriate configurations.
55+66+use deadpool_redis::Pool as RedisPool;
77+use serde::{Deserialize, Serialize};
88+use std::sync::Arc;
99+use tokio::sync::mpsc;
1010+1111+use super::{
1212+ adapter::QueueAdapter,
1313+ mpsc::MpscQueueAdapter,
1414+ noop::NoopQueueAdapter,
1515+ redis::RedisQueueAdapter,
1616+ sqlite::SqliteQueueAdapter,
1717+};
1818+1919+// ========= MPSC Queue Factories =========
2020+2121+/// Create a new MPSC queue adapter with the specified buffer size.
2222+///
2323+/// This creates an in-memory queue suitable for single-instance deployments.
2424+///
2525+/// # Arguments
2626+///
2727+/// * `buffer` - The buffer size for the channel
2828+///
2929+/// # Examples
3030+///
3131+/// ```
3232+/// use quickdid::queue::create_mpsc_queue;
3333+///
3434+/// let queue = create_mpsc_queue::<String>(100);
3535+/// ```
3636+pub fn create_mpsc_queue<T>(buffer: usize) -> Arc<dyn QueueAdapter<T>>
3737+where
3838+ T: Send + Sync + 'static,
3939+{
4040+ Arc::new(MpscQueueAdapter::new(buffer))
4141+}
4242+4343+/// Create an MPSC queue adapter from existing channels.
4444+///
4545+/// This allows integration with existing channel-based architectures.
4646+///
4747+/// # Arguments
4848+///
4949+/// * `sender` - The sender half of the channel
5050+/// * `receiver` - The receiver half of the channel
5151+///
5252+/// # Examples
5353+///
5454+/// ```
5555+/// use tokio::sync::mpsc;
5656+/// use quickdid::queue::create_mpsc_queue_from_channel;
5757+///
5858+/// let (sender, receiver) = mpsc::channel::<String>(50);
5959+/// let queue = create_mpsc_queue_from_channel(sender, receiver);
6060+/// ```
6161+pub fn create_mpsc_queue_from_channel<T>(
6262+ sender: mpsc::Sender<T>,
6363+ receiver: mpsc::Receiver<T>,
6464+) -> Arc<dyn QueueAdapter<T>>
6565+where
6666+ T: Send + Sync + 'static,
6767+{
6868+ Arc::new(MpscQueueAdapter::from_channel(sender, receiver))
6969+}
7070+7171+// ========= Redis Queue Factories =========
7272+7373+/// Create a new Redis-backed queue adapter.
7474+///
7575+/// This creates a distributed queue suitable for multi-instance deployments.
7676+///
7777+/// # Arguments
7878+///
7979+/// * `pool` - Redis connection pool
8080+/// * `worker_id` - Worker identifier for this queue instance
8181+/// * `key_prefix` - Redis key prefix for queue operations
8282+/// * `timeout_seconds` - Timeout for blocking operations
8383+///
8484+/// # Examples
8585+///
8686+/// ```no_run
8787+/// use quickdid::queue::create_redis_queue;
8888+/// use deadpool_redis::Config;
8989+///
9090+/// # async fn example() -> anyhow::Result<()> {
9191+/// let cfg = Config::from_url("redis://localhost:6379");
9292+/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
9393+///
9494+/// let queue = create_redis_queue::<String>(
9595+/// pool,
9696+/// "worker-1".to_string(),
9797+/// "queue:myapp:".to_string(),
9898+/// 5,
9999+/// );
100100+/// # Ok(())
101101+/// # }
102102+/// ```
103103+pub fn create_redis_queue<T>(
104104+ pool: RedisPool,
105105+ worker_id: String,
106106+ key_prefix: String,
107107+ timeout_seconds: u64,
108108+) -> Arc<dyn QueueAdapter<T>>
109109+where
110110+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
111111+{
112112+ Arc::new(RedisQueueAdapter::new(
113113+ pool,
114114+ worker_id,
115115+ key_prefix,
116116+ timeout_seconds,
117117+ ))
118118+}
119119+120120+// ========= SQLite Queue Factories =========
121121+122122+/// Create a new SQLite queue adapter with unlimited queue size.
123123+///
124124+/// This creates a persistent queue backed by SQLite database suitable
125125+/// for single-instance deployments that need persistence across restarts.
126126+/// The queue has no size limit and may grow unbounded.
127127+///
128128+/// # Arguments
129129+///
130130+/// * `pool` - SQLite connection pool
131131+///
132132+/// # Examples
133133+///
134134+/// ```no_run
135135+/// use quickdid::queue::{create_sqlite_queue, HandleResolutionWork};
136136+/// use quickdid::sqlite_schema::create_sqlite_pool;
137137+///
138138+/// # async fn example() -> anyhow::Result<()> {
139139+/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
140140+/// let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
141141+/// # Ok(())
142142+/// # }
143143+/// ```
144144+pub fn create_sqlite_queue<T>(pool: sqlx::SqlitePool) -> Arc<dyn QueueAdapter<T>>
145145+where
146146+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
147147+{
148148+ Arc::new(SqliteQueueAdapter::new(pool))
149149+}
150150+151151+/// Create a new SQLite queue adapter with work shedding.
152152+///
153153+/// This creates a persistent queue with configurable maximum size.
154154+/// When the queue exceeds `max_size`, the oldest entries are automatically
155155+/// deleted to maintain the limit, preserving the most recent work items.
156156+///
157157+/// # Arguments
158158+///
159159+/// * `pool` - SQLite connection pool
160160+/// * `max_size` - Maximum number of entries (0 = unlimited)
161161+///
162162+/// # Work Shedding Behavior
163163+///
164164+/// - New work items are always accepted
165165+/// - When queue size exceeds `max_size`, oldest entries are deleted
166166+/// - Deletion happens atomically with insertion in a single transaction
167167+/// - Essential for long-running deployments to prevent disk space issues
168168+///
169169+/// # Examples
170170+///
171171+/// ```no_run
172172+/// use quickdid::queue::{create_sqlite_queue_with_max_size, HandleResolutionWork};
173173+/// use quickdid::sqlite_schema::create_sqlite_pool;
174174+///
175175+/// # async fn example() -> anyhow::Result<()> {
176176+/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
177177+/// // Limit queue to 10,000 entries with automatic work shedding
178178+/// let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 10000);
179179+/// # Ok(())
180180+/// # }
181181+/// ```
182182+pub fn create_sqlite_queue_with_max_size<T>(
183183+ pool: sqlx::SqlitePool,
184184+ max_size: u64,
185185+) -> Arc<dyn QueueAdapter<T>>
186186+where
187187+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
188188+{
189189+ Arc::new(SqliteQueueAdapter::with_max_size(pool, max_size))
190190+}
191191+192192+// ========= No-op Queue Factory =========
193193+194194+/// Create a no-operation queue adapter.
195195+///
196196+/// This creates a queue that discards all work items, useful for testing
197197+/// or when queue processing is disabled.
198198+///
199199+/// # Examples
200200+///
201201+/// ```
202202+/// use quickdid::queue::create_noop_queue;
203203+///
204204+/// let queue = create_noop_queue::<String>();
205205+/// ```
206206+pub fn create_noop_queue<T>() -> Arc<dyn QueueAdapter<T>>
207207+where
208208+ T: Send + Sync + 'static,
209209+{
210210+ Arc::new(NoopQueueAdapter::new())
211211+}
212212+213213+#[cfg(test)]
214214+mod tests {
215215+ use super::*;
216216+ use crate::queue::HandleResolutionWork;
217217+218218+ #[tokio::test]
219219+ async fn test_create_mpsc_queue() {
220220+ let queue = create_mpsc_queue::<String>(10);
221221+222222+ queue.push("test".to_string()).await.unwrap();
223223+ let item = queue.pull().await;
224224+ assert_eq!(item, Some("test".to_string()));
225225+ }
226226+227227+ #[tokio::test]
228228+ async fn test_create_mpsc_queue_from_channel() {
229229+ let (sender, receiver) = mpsc::channel(5);
230230+ let queue = create_mpsc_queue_from_channel(sender.clone(), receiver);
231231+232232+ // Send via original sender
233233+ sender.send("external".to_string()).await.unwrap();
234234+235235+ // Receive via queue
236236+ let item = queue.pull().await;
237237+ assert_eq!(item, Some("external".to_string()));
238238+ }
239239+240240+ #[tokio::test]
241241+ async fn test_create_noop_queue() {
242242+ let queue = create_noop_queue::<String>();
243243+244244+ // Should accept pushes
245245+ queue.push("ignored".to_string()).await.unwrap();
246246+247247+ // Should report as healthy
248248+ assert!(queue.is_healthy().await);
249249+250250+ // Should report depth as 0
251251+ assert_eq!(queue.depth().await, Some(0));
252252+ }
253253+254254+ #[tokio::test]
255255+ async fn test_create_sqlite_queue() {
256256+ // Create in-memory SQLite database for testing
257257+ let pool = sqlx::SqlitePool::connect("sqlite::memory:")
258258+ .await
259259+ .expect("Failed to connect to in-memory SQLite");
260260+261261+ // Create the queue schema
262262+ crate::sqlite_schema::create_schema(&pool)
263263+ .await
264264+ .expect("Failed to create schema");
265265+266266+ let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
267267+268268+ let work = HandleResolutionWork::new("test.example.com".to_string());
269269+ queue.push(work.clone()).await.unwrap();
270270+271271+ let pulled = queue.pull().await;
272272+ assert_eq!(pulled, Some(work));
273273+ }
274274+275275+ #[tokio::test]
276276+ async fn test_create_sqlite_queue_with_max_size() {
277277+ // Create in-memory SQLite database for testing
278278+ let pool = sqlx::SqlitePool::connect("sqlite::memory:")
279279+ .await
280280+ .expect("Failed to connect to in-memory SQLite");
281281+282282+ // Create the queue schema
283283+ crate::sqlite_schema::create_schema(&pool)
284284+ .await
285285+ .expect("Failed to create schema");
286286+287287+ // Create queue with small max size
288288+ let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 5);
289289+290290+ // Push items
291291+ for i in 0..10 {
292292+ let work = HandleResolutionWork::new(format!("test-{}.example.com", i));
293293+ queue.push(work).await.unwrap();
294294+ }
295295+296296+ // Should have limited items due to work shedding
297297+ let depth = queue.depth().await.unwrap();
298298+ assert!(depth <= 5, "Queue should have at most 5 items after work shedding");
299299+ }
300300+301301+ #[tokio::test]
302302+ async fn test_create_redis_queue() {
303303+ let pool = match crate::test_helpers::get_test_redis_pool() {
304304+ Some(p) => p,
305305+ None => {
306306+ eprintln!("Skipping Redis test - no Redis connection available");
307307+ return;
308308+ }
309309+ };
310310+311311+ let test_prefix = format!(
312312+ "test:factory:{}:",
313313+ std::time::SystemTime::now()
314314+ .duration_since(std::time::UNIX_EPOCH)
315315+ .unwrap()
316316+ .as_nanos()
317317+ );
318318+319319+ let queue = create_redis_queue::<String>(
320320+ pool,
321321+ "test-worker".to_string(),
322322+ test_prefix,
323323+ 1,
324324+ );
325325+326326+ queue.push("test-item".to_string()).await.unwrap();
327327+ let pulled = queue.pull().await;
328328+ assert_eq!(pulled, Some("test-item".to_string()));
329329+ }
330330+}
+83
src/queue/mod.rs
···11+//! Queue adapter system for work queue abstraction.
22+//!
33+//! This module provides a generic trait and implementations for queue adapters
44+//! that can be used with any work type for handle resolution and other tasks.
55+//!
66+//! # Architecture
77+//!
88+//! The queue system is designed with the following components:
99+//!
1010+//! - **Trait**: `QueueAdapter` - Common interface for all queue implementations
1111+//! - **Implementations**:
1212+//! - `MpscQueueAdapter` - In-memory MPSC channel-based queue
1313+//! - `RedisQueueAdapter` - Distributed Redis-backed queue
1414+//! - `SqliteQueueAdapter` - Persistent SQLite-backed queue
1515+//! - `NoopQueueAdapter` - No-operation queue for testing
1616+//! - **Work Types**: `HandleResolutionWork` - Work items for handle resolution
1717+//! - **Factory Functions**: Convenient functions for creating queue adapters
1818+//!
1919+//! # Examples
2020+//!
2121+//! ## Simple In-Memory Queue
2222+//!
2323+//! ```
2424+//! use quickdid::queue::{create_mpsc_queue, QueueAdapter};
2525+//!
2626+//! # async fn example() -> anyhow::Result<()> {
2727+//! let queue = create_mpsc_queue::<String>(100);
2828+//!
2929+//! queue.push("work-item".to_string()).await?;
3030+//! if let Some(item) = queue.pull().await {
3131+//! println!("Processing: {}", item);
3232+//! }
3333+//! # Ok(())
3434+//! # }
3535+//! ```
3636+//!
3737+//! ## Persistent Queue with Work Shedding
3838+//!
3939+//! ```no_run
4040+//! use quickdid::queue::{create_sqlite_queue_with_max_size, HandleResolutionWork};
4141+//! use quickdid::sqlite_schema::create_sqlite_pool;
4242+//!
4343+//! # async fn example() -> anyhow::Result<()> {
4444+//! let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
4545+//! let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 10000);
4646+//!
4747+//! let work = HandleResolutionWork::new("alice.bsky.social".to_string());
4848+//! queue.push(work).await?;
4949+//! # Ok(())
5050+//! # }
5151+//! ```
5252+5353+// Internal modules
5454+mod adapter;
5555+mod error;
5656+mod factory;
5757+mod mpsc;
5858+mod noop;
5959+mod redis;
6060+mod sqlite;
6161+mod work;
6262+6363+// Re-export core types
6464+pub use adapter::QueueAdapter;
6565+pub use error::{QueueError, Result};
6666+pub use work::HandleResolutionWork;
6767+6868+// Re-export implementations (with limited visibility)
6969+pub use mpsc::MpscQueueAdapter;
7070+pub use noop::NoopQueueAdapter;
7171+pub use redis::RedisQueueAdapter;
7272+pub use sqlite::SqliteQueueAdapter;
7373+7474+// Re-export factory functions
7575+pub use factory::{
7676+ create_mpsc_queue,
7777+ create_mpsc_queue_from_channel,
7878+ create_noop_queue,
7979+ create_redis_queue,
8080+ create_sqlite_queue,
8181+ create_sqlite_queue_with_max_size,
8282+};
8383+
+286
src/queue/mpsc.rs
···11+//! MPSC channel-based queue adapter implementation.
22+//!
33+//! This module provides an in-memory queue implementation using Tokio's
44+//! multi-producer, single-consumer (MPSC) channels. It's suitable for
55+//! single-instance deployments with moderate throughput requirements.
66+77+use async_trait::async_trait;
88+use std::sync::Arc;
99+use tokio::sync::{mpsc, Mutex};
1010+1111+use super::adapter::QueueAdapter;
1212+use super::error::{QueueError, Result};
1313+1414+/// MPSC channel-based queue adapter implementation.
1515+///
1616+/// This adapter uses tokio's multi-producer, single-consumer channel
1717+/// for in-memory queuing of work items. It provides fast, lock-free
1818+/// operation for single-instance deployments.
1919+///
2020+/// # Features
2121+///
2222+/// - In-memory operation (no persistence)
2323+/// - Bounded capacity with backpressure
2424+/// - Fast push/pull operations
2525+/// - No acknowledgment needed (fire-and-forget)
2626+///
2727+/// # Limitations
2828+///
2929+/// - No persistence across restarts
3030+/// - Single consumer only
3131+/// - No distributed operation
3232+///
3333+/// # Examples
3434+///
3535+/// ```
3636+/// use quickdid::queue::MpscQueueAdapter;
3737+/// use quickdid::queue::QueueAdapter;
3838+///
3939+/// # async fn example() -> anyhow::Result<()> {
4040+/// // Create a queue with buffer size of 100
4141+/// let queue = MpscQueueAdapter::<String>::new(100);
4242+///
4343+/// // Push items
4444+/// queue.push("item1".to_string()).await?;
4545+/// queue.push("item2".to_string()).await?;
4646+///
4747+/// // Pull items
4848+/// while let Some(item) = queue.pull().await {
4949+/// println!("Processing: {}", item);
5050+/// }
5151+/// # Ok(())
5252+/// # }
5353+/// ```
5454+pub struct MpscQueueAdapter<T>
5555+where
5656+ T: Send + Sync + 'static,
5757+{
5858+ receiver: Arc<Mutex<mpsc::Receiver<T>>>,
5959+ sender: mpsc::Sender<T>,
6060+}
6161+6262+impl<T> MpscQueueAdapter<T>
6363+where
6464+ T: Send + Sync + 'static,
6565+{
6666+ /// Create a new MPSC queue adapter with the specified buffer size.
6767+ ///
6868+ /// # Arguments
6969+ ///
7070+ /// * `buffer` - The maximum number of items that can be buffered
7171+ ///
7272+ /// # Examples
7373+ ///
7474+ /// ```
7575+ /// use quickdid::queue::MpscQueueAdapter;
7676+ ///
7777+ /// let queue = MpscQueueAdapter::<String>::new(100);
7878+ /// ```
7979+ pub fn new(buffer: usize) -> Self {
8080+ let (sender, receiver) = mpsc::channel(buffer);
8181+ Self {
8282+ receiver: Arc::new(Mutex::new(receiver)),
8383+ sender,
8484+ }
8585+ }
8686+8787+ /// Create an adapter from existing MPSC channels.
8888+ ///
8989+ /// This constructor is useful for integrating with existing channel-based
9090+ /// architectures or when you need custom channel configuration.
9191+ ///
9292+ /// # Arguments
9393+ ///
9494+ /// * `sender` - The sender half of the channel
9595+ /// * `receiver` - The receiver half of the channel
9696+ ///
9797+ /// # Examples
9898+ ///
9999+ /// ```
100100+ /// use tokio::sync::mpsc;
101101+ /// use quickdid::queue::MpscQueueAdapter;
102102+ ///
103103+ /// let (sender, receiver) = mpsc::channel::<String>(50);
104104+ /// let queue = MpscQueueAdapter::from_channel(sender, receiver);
105105+ /// ```
106106+ pub fn from_channel(sender: mpsc::Sender<T>, receiver: mpsc::Receiver<T>) -> Self {
107107+ Self {
108108+ receiver: Arc::new(Mutex::new(receiver)),
109109+ sender,
110110+ }
111111+ }
112112+}
113113+114114+#[async_trait]
115115+impl<T> QueueAdapter<T> for MpscQueueAdapter<T>
116116+where
117117+ T: Send + Sync + 'static,
118118+{
119119+ async fn pull(&self) -> Option<T> {
120120+ let mut receiver = self.receiver.lock().await;
121121+ receiver.recv().await
122122+ }
123123+124124+ async fn push(&self, work: T) -> Result<()> {
125125+ self.sender
126126+ .send(work)
127127+ .await
128128+ .map_err(|e| QueueError::PushFailed(e.to_string()))
129129+ }
130130+131131+ async fn try_push(&self, work: T) -> Result<()> {
132132+ self.sender.try_send(work).map_err(|e| match e {
133133+ mpsc::error::TrySendError::Full(_) => QueueError::QueueFull,
134134+ mpsc::error::TrySendError::Closed(_) => QueueError::QueueClosed,
135135+ })
136136+ }
137137+138138+ async fn depth(&self) -> Option<usize> {
139139+ // Note: This is an approximation as mpsc doesn't provide exact depth
140140+ Some(self.sender.max_capacity() - self.sender.capacity())
141141+ }
142142+143143+ async fn is_healthy(&self) -> bool {
144144+ !self.sender.is_closed()
145145+ }
146146+}
147147+148148+#[cfg(test)]
149149+mod tests {
150150+ use super::*;
151151+152152+ #[tokio::test]
153153+ async fn test_mpsc_queue_push_pull() {
154154+ let queue = MpscQueueAdapter::<String>::new(10);
155155+156156+ // Test push
157157+ queue.push("test1".to_string()).await.unwrap();
158158+ queue.push("test2".to_string()).await.unwrap();
159159+160160+ // Test pull in FIFO order
161161+ let item1 = queue.pull().await;
162162+ assert_eq!(item1, Some("test1".to_string()));
163163+164164+ let item2 = queue.pull().await;
165165+ assert_eq!(item2, Some("test2".to_string()));
166166+ }
167167+168168+ #[tokio::test]
169169+ async fn test_mpsc_queue_try_push() {
170170+ // Create a small queue to test full condition
171171+ let queue = MpscQueueAdapter::<i32>::new(2);
172172+173173+ // Fill the queue
174174+ queue.push(1).await.unwrap();
175175+ queue.push(2).await.unwrap();
176176+177177+ // Try to push when full should fail
178178+ let result = queue.try_push(3).await;
179179+ assert!(matches!(result, Err(QueueError::QueueFull)));
180180+181181+ // Pull one item to make space
182182+ let _ = queue.pull().await;
183183+184184+ // Now try_push should succeed
185185+ queue.try_push(3).await.unwrap();
186186+ }
187187+188188+ #[tokio::test]
189189+ async fn test_mpsc_queue_from_channel() {
190190+ let (sender, receiver) = mpsc::channel(5);
191191+ let queue = MpscQueueAdapter::from_channel(sender.clone(), receiver);
192192+193193+ // Send via original sender
194194+ sender.send("external".to_string()).await.unwrap();
195195+196196+ // Send via queue
197197+ queue.push("internal".to_string()).await.unwrap();
198198+199199+ // Pull both items
200200+ assert_eq!(queue.pull().await, Some("external".to_string()));
201201+ assert_eq!(queue.pull().await, Some("internal".to_string()));
202202+ }
203203+204204+ #[tokio::test]
205205+ async fn test_mpsc_queue_health() {
206206+ let queue = MpscQueueAdapter::<String>::new(10);
207207+208208+ // Queue should be healthy initially
209209+ assert!(queue.is_healthy().await);
210210+211211+ // Create a queue and drop the receiver to close it
212212+ let (sender, receiver) = mpsc::channel::<String>(10);
213213+ drop(receiver);
214214+ let closed_queue = MpscQueueAdapter::from_channel(sender, mpsc::channel(1).1);
215215+216216+ // Push should fail on closed queue
217217+ let result = closed_queue.push("test".to_string()).await;
218218+ assert!(result.is_err());
219219+ }
220220+221221+ #[tokio::test]
222222+ async fn test_mpsc_queue_depth() {
223223+ let queue = MpscQueueAdapter::<i32>::new(10);
224224+225225+ // Initially empty
226226+ let depth = queue.depth().await;
227227+ assert_eq!(depth, Some(0));
228228+229229+ // Add items and check depth
230230+ queue.push(1).await.unwrap();
231231+ queue.push(2).await.unwrap();
232232+ queue.push(3).await.unwrap();
233233+234234+ let depth = queue.depth().await;
235235+ assert_eq!(depth, Some(3));
236236+237237+ // Pull an item and check depth
238238+ let _ = queue.pull().await;
239239+ let depth = queue.depth().await;
240240+ assert_eq!(depth, Some(2));
241241+ }
242242+243243+ #[tokio::test]
244244+ async fn test_mpsc_queue_concurrent_operations() {
245245+ use std::sync::Arc;
246246+247247+ let queue = Arc::new(MpscQueueAdapter::<i32>::new(100));
248248+249249+ // Spawn multiple producers
250250+ let mut handles = vec![];
251251+ for i in 0..10 {
252252+ let q = queue.clone();
253253+ handles.push(tokio::spawn(async move {
254254+ for j in 0..10 {
255255+ q.push(i * 10 + j).await.unwrap();
256256+ }
257257+ }));
258258+ }
259259+260260+ // Wait for all producers
261261+ for handle in handles {
262262+ handle.await.unwrap();
263263+ }
264264+265265+ // Verify we can pull all 100 items
266266+ let mut count = 0;
267267+ while queue.pull().await.is_some() {
268268+ count += 1;
269269+ if count >= 100 {
270270+ break;
271271+ }
272272+ }
273273+ assert_eq!(count, 100);
274274+ }
275275+276276+ #[tokio::test]
277277+ async fn test_mpsc_queue_no_ack_needed() {
278278+ let queue = MpscQueueAdapter::<String>::new(10);
279279+280280+ queue.push("test".to_string()).await.unwrap();
281281+ let item = queue.pull().await.unwrap();
282282+283283+ // Ack should always succeed (no-op)
284284+ queue.ack(&item).await.unwrap();
285285+ }
286286+}
+222
src/queue/noop.rs
···11+//! No-operation queue adapter implementation.
22+//!
33+//! This module provides a queue adapter that discards all work items,
44+//! useful for testing or when queue processing is disabled.
55+66+use async_trait::async_trait;
77+use std::time::Duration;
88+use tokio::time::sleep;
99+1010+use super::adapter::QueueAdapter;
1111+use super::error::Result;
1212+1313+/// No-operation queue adapter that discards all work items.
1414+///
1515+/// This adapter is useful for configurations where queuing is disabled
1616+/// or as a fallback when other queue adapters fail to initialize.
1717+/// All work items pushed to this queue are silently discarded.
1818+///
1919+/// # Features
2020+///
2121+/// - Zero resource usage
2222+/// - Always healthy
2323+/// - Discards all work items
2424+/// - Never returns items from pull
2525+///
2626+/// # Use Cases
2727+///
2828+/// - Testing environments where queue processing isn't needed
2929+/// - Graceful degradation when queue backends are unavailable
3030+/// - Configurations where queue processing is explicitly disabled
3131+///
3232+/// # Examples
3333+///
3434+/// ```
3535+/// use quickdid::queue::NoopQueueAdapter;
3636+/// use quickdid::queue::QueueAdapter;
3737+///
3838+/// # async fn example() -> anyhow::Result<()> {
3939+/// let queue = NoopQueueAdapter::<String>::new();
4040+///
4141+/// // Push is silently discarded
4242+/// queue.push("ignored".to_string()).await?;
4343+///
4444+/// // Pull never returns items (blocks indefinitely)
4545+/// // let item = queue.pull().await; // Would block forever
4646+///
4747+/// // Always reports healthy
4848+/// assert!(queue.is_healthy().await);
4949+///
5050+/// // Always reports empty
5151+/// assert_eq!(queue.depth().await, Some(0));
5252+/// # Ok(())
5353+/// # }
5454+/// ```
5555+pub struct NoopQueueAdapter<T>
5656+where
5757+ T: Send + Sync + 'static,
5858+{
5959+ _phantom: std::marker::PhantomData<T>,
6060+}
6161+6262+impl<T> NoopQueueAdapter<T>
6363+where
6464+ T: Send + Sync + 'static,
6565+{
6666+ /// Create a new no-op queue adapter.
6767+ ///
6868+ /// # Examples
6969+ ///
7070+ /// ```
7171+ /// use quickdid::queue::NoopQueueAdapter;
7272+ ///
7373+ /// let queue = NoopQueueAdapter::<String>::new();
7474+ /// ```
7575+ pub fn new() -> Self {
7676+ Self {
7777+ _phantom: std::marker::PhantomData,
7878+ }
7979+ }
8080+}
8181+8282+impl<T> Default for NoopQueueAdapter<T>
8383+where
8484+ T: Send + Sync + 'static,
8585+{
8686+ fn default() -> Self {
8787+ Self::new()
8888+ }
8989+}
9090+9191+#[async_trait]
9292+impl<T> QueueAdapter<T> for NoopQueueAdapter<T>
9393+where
9494+ T: Send + Sync + 'static,
9595+{
9696+ async fn pull(&self) -> Option<T> {
9797+ // Never returns any work - sleeps to avoid busy-waiting
9898+ sleep(Duration::from_secs(60)).await;
9999+ None
100100+ }
101101+102102+ async fn push(&self, _work: T) -> Result<()> {
103103+ // Silently discard the work
104104+ Ok(())
105105+ }
106106+107107+ async fn ack(&self, _item: &T) -> Result<()> {
108108+ // No-op
109109+ Ok(())
110110+ }
111111+112112+ async fn try_push(&self, _work: T) -> Result<()> {
113113+ // Silently discard the work
114114+ Ok(())
115115+ }
116116+117117+ async fn depth(&self) -> Option<usize> {
118118+ // Always empty
119119+ Some(0)
120120+ }
121121+122122+ async fn is_healthy(&self) -> bool {
123123+ // Always healthy
124124+ true
125125+ }
126126+}
127127+128128+#[cfg(test)]
129129+mod tests {
130130+ use super::*;
131131+132132+ #[tokio::test]
133133+ async fn test_noop_queue_push() {
134134+ let queue = NoopQueueAdapter::<String>::new();
135135+136136+ // Push should always succeed
137137+ queue.push("test1".to_string()).await.unwrap();
138138+ queue.push("test2".to_string()).await.unwrap();
139139+ queue.push("test3".to_string()).await.unwrap();
140140+ }
141141+142142+ #[tokio::test]
143143+ async fn test_noop_queue_try_push() {
144144+ let queue = NoopQueueAdapter::<i32>::new();
145145+146146+ // Try push should always succeed
147147+ queue.try_push(1).await.unwrap();
148148+ queue.try_push(2).await.unwrap();
149149+ queue.try_push(3).await.unwrap();
150150+ }
151151+152152+ #[tokio::test]
153153+ async fn test_noop_queue_ack() {
154154+ let queue = NoopQueueAdapter::<String>::new();
155155+156156+ // Ack should always succeed
157157+ queue.ack(&"any".to_string()).await.unwrap();
158158+ }
159159+160160+ #[tokio::test]
161161+ async fn test_noop_queue_depth() {
162162+ let queue = NoopQueueAdapter::<String>::new();
163163+164164+ // Should always report empty
165165+ assert_eq!(queue.depth().await, Some(0));
166166+167167+ // Even after pushing items
168168+ queue.push("item".to_string()).await.unwrap();
169169+ assert_eq!(queue.depth().await, Some(0));
170170+ }
171171+172172+ #[tokio::test]
173173+ async fn test_noop_queue_health() {
174174+ let queue = NoopQueueAdapter::<String>::new();
175175+176176+ // Should always be healthy
177177+ assert!(queue.is_healthy().await);
178178+ }
179179+180180+ #[tokio::test]
181181+ async fn test_noop_queue_default() {
182182+ let queue: NoopQueueAdapter<String> = Default::default();
183183+184184+ // Default instance should work normally
185185+ queue.push("test".to_string()).await.unwrap();
186186+ assert!(queue.is_healthy().await);
187187+ }
188188+189189+ #[tokio::test(flavor = "multi_thread")]
190190+ async fn test_noop_queue_pull_blocks() {
191191+ use tokio::time::timeout;
192192+193193+ let queue = NoopQueueAdapter::<String>::new();
194194+195195+ // Pull should block and not return immediately
196196+ let result = timeout(Duration::from_millis(100), queue.pull()).await;
197197+ assert!(result.is_err(), "Pull should have timed out");
198198+ }
199199+200200+ #[tokio::test]
201201+ async fn test_noop_queue_with_custom_type() {
202202+ use serde::{Deserialize, Serialize};
203203+204204+ #[derive(Debug, Clone, Serialize, Deserialize)]
205205+ struct CustomWork {
206206+ id: u64,
207207+ data: Vec<String>,
208208+ }
209209+210210+ let queue = NoopQueueAdapter::<CustomWork>::new();
211211+212212+ let work = CustomWork {
213213+ id: 123,
214214+ data: vec!["test".to_string()],
215215+ };
216216+217217+ // Should handle custom types without issue
218218+ queue.push(work.clone()).await.unwrap();
219219+ queue.ack(&work).await.unwrap();
220220+ assert_eq!(queue.depth().await, Some(0));
221221+ }
222222+}
+474
src/queue/redis.rs
···11+//! Redis-backed queue adapter implementation.
22+//!
33+//! This module provides a distributed queue implementation using Redis lists
44+//! with a reliable queue pattern for at-least-once delivery semantics.
55+66+use async_trait::async_trait;
77+use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
88+use serde::{Deserialize, Serialize};
99+use tracing::{debug, error, warn};
1010+1111+use super::adapter::QueueAdapter;
1212+use super::error::{QueueError, Result};
1313+1414+/// Redis-backed queue adapter implementation.
1515+///
1616+/// This adapter uses Redis lists with a reliable queue pattern:
1717+/// - LPUSH to push items to the primary queue
1818+/// - BRPOPLPUSH to atomically move items from primary to worker queue
1919+/// - LREM to acknowledge processed items from worker queue
2020+///
2121+/// This ensures at-least-once delivery semantics and allows for recovery
2222+/// of in-flight items if a worker crashes.
2323+///
2424+/// # Features
2525+///
2626+/// - Distributed operation across multiple instances
2727+/// - Persistent storage with Redis
2828+/// - At-least-once delivery guarantees
2929+/// - Automatic recovery of failed items
3030+/// - Configurable timeouts
3131+///
3232+/// # Architecture
3333+///
3434+/// ```text
3535+/// Producer -> [Primary Queue] -> BRPOPLPUSH -> [Worker Queue] -> Consumer
3636+/// |
3737+/// LREM (on ack)
3838+/// ```
3939+///
4040+/// # Examples
4141+///
4242+/// ```no_run
4343+/// use quickdid::queue::RedisQueueAdapter;
4444+/// use quickdid::queue::QueueAdapter;
4545+/// use deadpool_redis::Config;
4646+///
4747+/// # async fn example() -> anyhow::Result<()> {
4848+/// // Create Redis pool
4949+/// let cfg = Config::from_url("redis://localhost:6379");
5050+/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
5151+///
5252+/// // Create queue adapter
5353+/// let queue = RedisQueueAdapter::<String>::new(
5454+/// pool,
5555+/// "worker-1".to_string(),
5656+/// "queue:myapp:".to_string(),
5757+/// 5, // 5 second timeout
5858+/// );
5959+///
6060+/// // Use the queue
6161+/// queue.push("work-item".to_string()).await?;
6262+/// if let Some(item) = queue.pull().await {
6363+/// // Process item
6464+/// queue.ack(&item).await?;
6565+/// }
6666+/// # Ok(())
6767+/// # }
6868+/// ```
6969+pub struct RedisQueueAdapter<T>
7070+where
7171+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
7272+{
7373+ /// Redis connection pool
7474+ pool: RedisPool,
7575+ /// Unique worker ID for this adapter instance
7676+ worker_id: String,
7777+ /// Key prefix for all queues (default: "queue:handleresolver:")
7878+ key_prefix: String,
7979+ /// Timeout for blocking RPOPLPUSH operations (in seconds)
8080+ timeout_seconds: u64,
8181+ /// Type marker for generic parameter
8282+ _phantom: std::marker::PhantomData<T>,
8383+}
8484+8585+impl<T> RedisQueueAdapter<T>
8686+where
8787+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
8888+{
8989+ /// Create a new Redis queue adapter.
9090+ ///
9191+ /// # Arguments
9292+ ///
9393+ /// * `pool` - Redis connection pool
9494+ /// * `worker_id` - Unique identifier for this worker instance
9595+ /// * `key_prefix` - Redis key prefix for queue operations
9696+ /// * `timeout_seconds` - Timeout for blocking pull operations
9797+ ///
9898+ /// # Examples
9999+ ///
100100+ /// ```no_run
101101+ /// use quickdid::queue::RedisQueueAdapter;
102102+ /// use deadpool_redis::Config;
103103+ ///
104104+ /// # async fn example() -> anyhow::Result<()> {
105105+ /// let cfg = Config::from_url("redis://localhost:6379");
106106+ /// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
107107+ ///
108108+ /// let queue = RedisQueueAdapter::<String>::new(
109109+ /// pool,
110110+ /// "worker-1".to_string(),
111111+ /// "queue:myapp:".to_string(),
112112+ /// 5,
113113+ /// );
114114+ /// # Ok(())
115115+ /// # }
116116+ /// ```
117117+ pub fn new(
118118+ pool: RedisPool,
119119+ worker_id: String,
120120+ key_prefix: String,
121121+ timeout_seconds: u64,
122122+ ) -> Self {
123123+ Self {
124124+ pool,
125125+ worker_id,
126126+ key_prefix,
127127+ timeout_seconds,
128128+ _phantom: std::marker::PhantomData,
129129+ }
130130+ }
131131+132132+ /// Get the primary queue key.
133133+ fn primary_queue_key(&self) -> String {
134134+ format!("{}primary", self.key_prefix)
135135+ }
136136+137137+ /// Get the worker-specific temporary queue key.
138138+ fn worker_queue_key(&self) -> String {
139139+ format!("{}{}", self.key_prefix, self.worker_id)
140140+ }
141141+}
142142+143143+#[async_trait]
144144+impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
145145+where
146146+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
147147+{
148148+ async fn pull(&self) -> Option<T> {
149149+ match self.pool.get().await {
150150+ Ok(mut conn) => {
151151+ let primary_key = self.primary_queue_key();
152152+ let worker_key = self.worker_queue_key();
153153+154154+ // Use blocking RPOPLPUSH to atomically move item from primary to worker queue
155155+ let data: Option<Vec<u8>> = match conn
156156+ .brpoplpush(&primary_key, &worker_key, self.timeout_seconds as f64)
157157+ .await
158158+ {
159159+ Ok(data) => data,
160160+ Err(e) => {
161161+ error!("Failed to pull from queue: {}", e);
162162+ return None;
163163+ }
164164+ };
165165+166166+ if let Some(data) = data {
167167+ // Deserialize the item
168168+ match serde_json::from_slice(&data) {
169169+ Ok(item) => {
170170+ debug!(
171171+ worker_id = %self.worker_id,
172172+ "Pulled item from queue"
173173+ );
174174+ Some(item)
175175+ }
176176+ Err(e) => {
177177+ error!("Failed to deserialize item: {}", e);
178178+ // Remove the corrupted item from worker queue
179179+ let _: std::result::Result<(), _> =
180180+ conn.lrem(&worker_key, 1, &data).await;
181181+ None
182182+ }
183183+ }
184184+ } else {
185185+ None
186186+ }
187187+ }
188188+ Err(e) => {
189189+ error!("Failed to get Redis connection: {}", e);
190190+ None
191191+ }
192192+ }
193193+ }
194194+195195+ async fn push(&self, work: T) -> Result<()> {
196196+ let mut conn = self
197197+ .pool
198198+ .get()
199199+ .await
200200+ .map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
201201+202202+ let data = serde_json::to_vec(&work)
203203+ .map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
204204+205205+ let primary_key = self.primary_queue_key();
206206+207207+ conn.lpush::<_, _, ()>(&primary_key, data)
208208+ .await
209209+ .map_err(|e| QueueError::RedisOperationFailed {
210210+ operation: "LPUSH".to_string(),
211211+ details: e.to_string(),
212212+ })?;
213213+214214+ debug!("Pushed item to queue");
215215+ Ok(())
216216+ }
217217+218218+ async fn ack(&self, item: &T) -> Result<()> {
219219+ let mut conn = self
220220+ .pool
221221+ .get()
222222+ .await
223223+ .map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
224224+225225+ let data =
226226+ serde_json::to_vec(item).map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
227227+228228+ let worker_key = self.worker_queue_key();
229229+230230+ // Remove exactly one occurrence of this item from the worker queue
231231+ let removed: i32 = conn.lrem(&worker_key, 1, &data).await.map_err(|e| {
232232+ QueueError::RedisOperationFailed {
233233+ operation: "LREM".to_string(),
234234+ details: e.to_string(),
235235+ }
236236+ })?;
237237+238238+ if removed == 0 {
239239+ warn!(
240240+ worker_id = %self.worker_id,
241241+ "Item not found in worker queue during acknowledgment"
242242+ );
243243+ } else {
244244+ debug!(
245245+ worker_id = %self.worker_id,
246246+ "Acknowledged item"
247247+ );
248248+ }
249249+250250+ Ok(())
251251+ }
252252+253253+ async fn depth(&self) -> Option<usize> {
254254+ match self.pool.get().await {
255255+ Ok(mut conn) => {
256256+ let primary_key = self.primary_queue_key();
257257+ match conn.llen::<_, usize>(&primary_key).await {
258258+ Ok(len) => Some(len),
259259+ Err(e) => {
260260+ error!("Failed to get queue depth: {}", e);
261261+ None
262262+ }
263263+ }
264264+ }
265265+ Err(e) => {
266266+ error!("Failed to get Redis connection: {}", e);
267267+ None
268268+ }
269269+ }
270270+ }
271271+272272+ async fn is_healthy(&self) -> bool {
273273+ match self.pool.get().await {
274274+ Ok(mut conn) => {
275275+ // Ping Redis to check health
276276+ match deadpool_redis::redis::cmd("PING")
277277+ .query_async::<String>(&mut conn)
278278+ .await
279279+ {
280280+ Ok(response) => response == "PONG",
281281+ Err(_) => false,
282282+ }
283283+ }
284284+ Err(_) => false,
285285+ }
286286+ }
287287+}
288288+289289+#[cfg(test)]
290290+mod tests {
291291+ use super::*;
292292+293293+ #[tokio::test]
294294+ async fn test_redis_queue_push_pull() {
295295+ let pool = match crate::test_helpers::get_test_redis_pool() {
296296+ Some(p) => p,
297297+ None => {
298298+ eprintln!("Skipping Redis test - no Redis connection available");
299299+ return;
300300+ }
301301+ };
302302+303303+ // Create adapter with unique prefix for testing
304304+ let test_prefix = format!(
305305+ "test:queue:{}:",
306306+ std::time::SystemTime::now()
307307+ .duration_since(std::time::UNIX_EPOCH)
308308+ .unwrap()
309309+ .as_nanos()
310310+ );
311311+ let adapter = RedisQueueAdapter::<String>::new(
312312+ pool.clone(),
313313+ "test-worker".to_string(),
314314+ test_prefix.clone(),
315315+ 1, // 1 second timeout for tests
316316+ );
317317+318318+ // Test push
319319+ adapter.push("test-item".to_string()).await.unwrap();
320320+321321+ // Test pull
322322+ let pulled = adapter.pull().await;
323323+ assert_eq!(pulled, Some("test-item".to_string()));
324324+325325+ // Test ack
326326+ adapter
327327+ .ack(&"test-item".to_string())
328328+ .await
329329+ .expect("Ack should succeed");
330330+ }
331331+332332+ #[tokio::test]
333333+ async fn test_redis_queue_reliable_delivery() {
334334+ let pool = match crate::test_helpers::get_test_redis_pool() {
335335+ Some(p) => p,
336336+ None => {
337337+ eprintln!("Skipping Redis test - no Redis connection available");
338338+ return;
339339+ }
340340+ };
341341+342342+ let test_prefix = format!(
343343+ "test:queue:{}:",
344344+ std::time::SystemTime::now()
345345+ .duration_since(std::time::UNIX_EPOCH)
346346+ .unwrap()
347347+ .as_nanos()
348348+ );
349349+ let worker_id = "test-worker-reliable";
350350+351351+ // Create adapter
352352+ let adapter1 = RedisQueueAdapter::<String>::new(
353353+ pool.clone(),
354354+ worker_id.to_string(),
355355+ test_prefix.clone(),
356356+ 1,
357357+ );
358358+359359+ // Push multiple items
360360+ adapter1.push("item1".to_string()).await.unwrap();
361361+ adapter1.push("item2".to_string()).await.unwrap();
362362+ adapter1.push("item3".to_string()).await.unwrap();
363363+364364+ // Pull but don't ack (simulating worker crash)
365365+ let item1 = adapter1.pull().await;
366366+ assert_eq!(item1, Some("item1".to_string()));
367367+368368+ // Item should be in worker queue
369369+ // In production, a recovery process would handle unacked items
370370+ // For this test, we verify the item is in the worker queue
371371+ let item2 = adapter1.pull().await;
372372+ assert_eq!(item2, Some("item2".to_string()));
373373+374374+ // Ack the second item
375375+ adapter1.ack(&"item2".to_string()).await.unwrap();
376376+ }
377377+378378+ #[tokio::test]
379379+ async fn test_redis_queue_depth() {
380380+ let pool = match crate::test_helpers::get_test_redis_pool() {
381381+ Some(p) => p,
382382+ None => {
383383+ eprintln!("Skipping Redis test - no Redis connection available");
384384+ return;
385385+ }
386386+ };
387387+388388+ let test_prefix = format!(
389389+ "test:queue:{}:",
390390+ std::time::SystemTime::now()
391391+ .duration_since(std::time::UNIX_EPOCH)
392392+ .unwrap()
393393+ .as_nanos()
394394+ );
395395+ let adapter = RedisQueueAdapter::<String>::new(
396396+ pool,
397397+ "test-worker-depth".to_string(),
398398+ test_prefix,
399399+ 1,
400400+ );
401401+402402+ // Initially empty
403403+ assert_eq!(adapter.depth().await, Some(0));
404404+405405+ // Push items and check depth
406406+ adapter.push("item1".to_string()).await.unwrap();
407407+ assert_eq!(adapter.depth().await, Some(1));
408408+409409+ adapter.push("item2".to_string()).await.unwrap();
410410+ assert_eq!(adapter.depth().await, Some(2));
411411+412412+ // Pull and check depth (note: depth checks primary queue)
413413+ let _ = adapter.pull().await;
414414+ assert_eq!(adapter.depth().await, Some(1));
415415+ }
416416+417417+ #[tokio::test]
418418+ async fn test_redis_queue_health() {
419419+ let pool = match crate::test_helpers::get_test_redis_pool() {
420420+ Some(p) => p,
421421+ None => {
422422+ eprintln!("Skipping Redis test - no Redis connection available");
423423+ return;
424424+ }
425425+ };
426426+427427+ let adapter = RedisQueueAdapter::<String>::new(
428428+ pool,
429429+ "test-worker-health".to_string(),
430430+ "test:queue:health:".to_string(),
431431+ 1,
432432+ );
433433+434434+ // Should be healthy if Redis is running
435435+ assert!(adapter.is_healthy().await);
436436+ }
437437+438438+ #[tokio::test]
439439+ async fn test_redis_queue_serialization() {
440440+ use crate::queue::HandleResolutionWork;
441441+442442+ let pool = match crate::test_helpers::get_test_redis_pool() {
443443+ Some(p) => p,
444444+ None => {
445445+ eprintln!("Skipping Redis test - no Redis connection available");
446446+ return;
447447+ }
448448+ };
449449+450450+ let test_prefix = format!(
451451+ "test:queue:{}:",
452452+ std::time::SystemTime::now()
453453+ .duration_since(std::time::UNIX_EPOCH)
454454+ .unwrap()
455455+ .as_nanos()
456456+ );
457457+ let adapter = RedisQueueAdapter::<HandleResolutionWork>::new(
458458+ pool,
459459+ "test-worker-ser".to_string(),
460460+ test_prefix,
461461+ 1,
462462+ );
463463+464464+ let work = HandleResolutionWork::new("alice.example.com".to_string());
465465+466466+ // Push and pull
467467+ adapter.push(work.clone()).await.unwrap();
468468+ let pulled = adapter.pull().await;
469469+ assert_eq!(pulled, Some(work.clone()));
470470+471471+ // Ack
472472+ adapter.ack(&work).await.unwrap();
473473+ }
474474+}
+502
src/queue/sqlite.rs
···11+//! SQLite-backed queue adapter implementation.
22+//!
33+//! This module provides a persistent queue implementation using SQLite
44+//! with optional work shedding to prevent unbounded growth.
55+66+use async_trait::async_trait;
77+use serde::{Deserialize, Serialize};
88+use sqlx::{self, Row};
99+use tracing::{debug, error, info, warn};
1010+1111+use super::adapter::QueueAdapter;
1212+use super::error::{QueueError, Result};
1313+1414+/// SQLite-backed queue adapter implementation.
1515+///
1616+/// This adapter uses SQLite database for persistent queuing of work items.
1717+/// It's suitable for single-instance deployments that need persistence
1818+/// across service restarts while remaining lightweight.
1919+///
2020+/// # Features
2121+///
2222+/// - Persistent queuing across service restarts
2323+/// - Simple FIFO ordering based on insertion time
2424+/// - Single consumer design (no complex locking needed)
2525+/// - Simple pull-and-delete semantics
2626+/// - Optional work shedding to prevent unbounded queue growth
2727+///
2828+/// # Work Shedding
2929+///
3030+/// When `max_size` is configured (> 0), the adapter implements work shedding:
3131+/// - New work items are always accepted
3232+/// - When the queue exceeds `max_size`, oldest entries are automatically deleted
3333+/// - This maintains the most recent work items while preventing unbounded growth
3434+/// - Essential for long-running deployments to avoid disk space issues
3535+///
3636+/// # Database Schema
3737+///
3838+/// The adapter expects the following table structure:
3939+/// ```sql
4040+/// CREATE TABLE handle_resolution_queue (
4141+/// id INTEGER PRIMARY KEY AUTOINCREMENT,
4242+/// work TEXT NOT NULL,
4343+/// queued_at INTEGER NOT NULL
4444+/// );
4545+/// CREATE INDEX idx_queue_timestamp ON handle_resolution_queue(queued_at);
4646+/// ```
4747+///
4848+/// # Examples
4949+///
5050+/// ```no_run
5151+/// use quickdid::queue::SqliteQueueAdapter;
5252+/// use quickdid::queue::QueueAdapter;
5353+/// use quickdid::sqlite_schema::create_sqlite_pool;
5454+///
5555+/// # async fn example() -> anyhow::Result<()> {
5656+/// // Create SQLite pool
5757+/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
5858+///
5959+/// // Create queue with unlimited size
6060+/// let queue = SqliteQueueAdapter::<String>::new(pool.clone());
6161+///
6262+/// // Or create queue with work shedding (max 10,000 items)
6363+/// let bounded_queue = SqliteQueueAdapter::<String>::with_max_size(pool, 10000);
6464+///
6565+/// // Use the queue
6666+/// queue.push("work-item".to_string()).await?;
6767+/// if let Some(item) = queue.pull().await {
6868+/// // Process item (automatically deleted from queue)
6969+/// println!("Processing: {}", item);
7070+/// }
7171+/// # Ok(())
7272+/// # }
7373+/// ```
7474+pub struct SqliteQueueAdapter<T>
7575+where
7676+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
7777+{
7878+ /// SQLite connection pool
7979+ pool: sqlx::SqlitePool,
8080+ /// Maximum queue size (0 = unlimited)
8181+ /// When exceeded, oldest entries are deleted to maintain this limit
8282+ max_size: u64,
8383+ /// Type marker for generic parameter
8484+ _phantom: std::marker::PhantomData<T>,
8585+}
8686+8787+impl<T> SqliteQueueAdapter<T>
8888+where
8989+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
9090+{
9191+ /// Create a new SQLite queue adapter with unlimited queue size.
9292+ ///
9393+ /// # Arguments
9494+ ///
9595+ /// * `pool` - SQLite connection pool
9696+ ///
9797+ /// # Examples
9898+ ///
9999+ /// ```no_run
100100+ /// use quickdid::queue::SqliteQueueAdapter;
101101+ /// use quickdid::sqlite_schema::create_sqlite_pool;
102102+ ///
103103+ /// # async fn example() -> anyhow::Result<()> {
104104+ /// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
105105+ /// let queue = SqliteQueueAdapter::<String>::new(pool);
106106+ /// # Ok(())
107107+ /// # }
108108+ /// ```
109109+ pub fn new(pool: sqlx::SqlitePool) -> Self {
110110+ Self::with_max_size(pool, 0)
111111+ }
112112+113113+ /// Create a new SQLite queue adapter with specified maximum queue size.
114114+ ///
115115+ /// # Arguments
116116+ ///
117117+ /// * `pool` - SQLite connection pool
118118+ /// * `max_size` - Maximum number of entries in queue (0 = unlimited)
119119+ ///
120120+ /// # Work Shedding Behavior
121121+ ///
122122+ /// When `max_size` > 0:
123123+ /// - New work items are always accepted
124124+ /// - If queue size exceeds `max_size` after insertion, oldest entries are deleted
125125+ /// - This preserves the most recent work while preventing unbounded growth
126126+ ///
127127+ /// # Examples
128128+ ///
129129+ /// ```no_run
130130+ /// use quickdid::queue::SqliteQueueAdapter;
131131+ /// use quickdid::sqlite_schema::create_sqlite_pool;
132132+ ///
133133+ /// # async fn example() -> anyhow::Result<()> {
134134+ /// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
135135+ /// // Limit queue to 10,000 entries with automatic work shedding
136136+ /// let queue = SqliteQueueAdapter::<String>::with_max_size(pool, 10000);
137137+ /// # Ok(())
138138+ /// # }
139139+ /// ```
140140+ pub fn with_max_size(pool: sqlx::SqlitePool, max_size: u64) -> Self {
141141+ Self {
142142+ pool,
143143+ max_size,
144144+ _phantom: std::marker::PhantomData,
145145+ }
146146+ }
147147+}
148148+149149+#[async_trait]
150150+impl<T> QueueAdapter<T> for SqliteQueueAdapter<T>
151151+where
152152+ T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
153153+{
154154+ async fn pull(&self) -> Option<T> {
155155+ // Get the oldest queued item and delete it in a transaction
156156+ let mut transaction = match self.pool.begin().await {
157157+ Ok(tx) => tx,
158158+ Err(e) => {
159159+ error!("Failed to start SQLite transaction: {}", e);
160160+ return None;
161161+ }
162162+ };
163163+164164+ // Select the oldest queued item
165165+ let record = match sqlx::query(
166166+ "SELECT id, work FROM handle_resolution_queue
167167+ ORDER BY queued_at ASC
168168+ LIMIT 1"
169169+ )
170170+ .fetch_optional(&mut *transaction)
171171+ .await
172172+ {
173173+ Ok(Some(row)) => row,
174174+ Ok(None) => {
175175+ // No queued items available
176176+ debug!("No queued items available in SQLite queue");
177177+ return None;
178178+ }
179179+ Err(e) => {
180180+ error!("Failed to query SQLite queue: {}", e);
181181+ return None;
182182+ }
183183+ };
184184+185185+ let item_id: i64 = record.get("id");
186186+ let work_json: String = record.get("work");
187187+188188+ // Delete the item from the queue
189189+ if let Err(e) = sqlx::query("DELETE FROM handle_resolution_queue WHERE id = ?1")
190190+ .bind(item_id)
191191+ .execute(&mut *transaction)
192192+ .await
193193+ {
194194+ error!("Failed to delete item from queue: {}", e);
195195+ return None;
196196+ }
197197+198198+ // Commit the transaction
199199+ if let Err(e) = transaction.commit().await {
200200+ error!("Failed to commit SQLite transaction: {}", e);
201201+ return None;
202202+ }
203203+204204+ // Deserialize the work item from JSON
205205+ match serde_json::from_str(&work_json) {
206206+ Ok(work) => {
207207+ debug!("Pulled work item from SQLite queue");
208208+ Some(work)
209209+ }
210210+ Err(e) => {
211211+ error!("Failed to deserialize work item: {}", e);
212212+ None
213213+ }
214214+ }
215215+ }
216216+217217+ async fn push(&self, work: T) -> Result<()> {
218218+ // Serialize the entire work item as JSON
219219+ let work_json = serde_json::to_string(&work)
220220+ .map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
221221+222222+ let current_timestamp = std::time::SystemTime::now()
223223+ .duration_since(std::time::UNIX_EPOCH)
224224+ .unwrap_or_default()
225225+ .as_secs() as i64;
226226+227227+ // Optimized approach: Insert first, then check if cleanup needed
228228+ // This avoids counting on every insert
229229+ sqlx::query(
230230+ "INSERT INTO handle_resolution_queue (work, queued_at) VALUES (?1, ?2)"
231231+ )
232232+ .bind(&work_json)
233233+ .bind(current_timestamp)
234234+ .execute(&self.pool)
235235+ .await
236236+ .map_err(|e| QueueError::PushFailed(format!("Failed to insert work item: {}", e)))?;
237237+238238+ // Implement optimized work shedding if max_size is configured
239239+ if self.max_size > 0 {
240240+ // Optimized approach: Only check and clean periodically or when likely over limit
241241+ // Use a limited count to avoid full table scan
242242+ let check_limit = self.max_size as i64 + (self.max_size as i64 / 10).max(1); // Check 10% over limit
243243+ let approx_count: Option<i64> = sqlx::query_scalar(
244244+ "SELECT COUNT(*) FROM (
245245+ SELECT 1 FROM handle_resolution_queue LIMIT ?1
246246+ ) AS limited_count"
247247+ )
248248+ .bind(check_limit)
249249+ .fetch_one(&self.pool)
250250+ .await
251251+ .map_err(|e| QueueError::PushFailed(format!("Failed to check queue size: {}", e)))?;
252252+253253+ // Only perform cleanup if we're definitely over the limit
254254+ if let Some(count) = approx_count && count >= check_limit {
255255+ // Perform batch cleanup - delete more than just the excess to reduce frequency
256256+ // Delete 20% more than needed to avoid frequent shedding
257257+ let target_size = (self.max_size as f64 * 0.8) as i64; // Keep 80% of max_size
258258+ let to_delete = count - target_size;
259259+260260+ if to_delete > 0 {
261261+ // Optimized deletion: First get the cutoff id and timestamp
262262+ // This avoids the expensive subquery in the DELETE statement
263263+ let cutoff: Option<(i64, i64)> = sqlx::query_as(
264264+ "SELECT id, queued_at FROM handle_resolution_queue
265265+ ORDER BY queued_at ASC, id ASC
266266+ LIMIT 1 OFFSET ?1"
267267+ )
268268+ .bind(to_delete - 1)
269269+ .fetch_optional(&self.pool)
270270+ .await
271271+ .map_err(|e| QueueError::PushFailed(format!("Failed to find cutoff: {}", e)))?;
272272+273273+ if let Some((cutoff_id, cutoff_timestamp)) = cutoff {
274274+ // Delete entries older than cutoff, or equal timestamp with lower id
275275+ // This handles the case where multiple entries have the same timestamp
276276+ let deleted_result = sqlx::query(
277277+ "DELETE FROM handle_resolution_queue
278278+ WHERE queued_at < ?1
279279+ OR (queued_at = ?1 AND id <= ?2)"
280280+ )
281281+ .bind(cutoff_timestamp)
282282+ .bind(cutoff_id)
283283+ .execute(&self.pool)
284284+ .await
285285+ .map_err(|e| QueueError::PushFailed(format!("Failed to delete excess entries: {}", e)))?;
286286+287287+ let deleted_count = deleted_result.rows_affected();
288288+ if deleted_count > 0 {
289289+ info!(
290290+ "Work shedding: deleted {} oldest entries (target size: {}, max: {})",
291291+ deleted_count,
292292+ target_size,
293293+ self.max_size
294294+ );
295295+ }
296296+ }
297297+ }
298298+ }
299299+ }
300300+301301+ debug!("Pushed work item to SQLite queue (max_size: {})", self.max_size);
302302+ Ok(())
303303+ }
304304+305305+ async fn ack(&self, _item: &T) -> Result<()> {
306306+ // With the simplified SQLite queue design, items are deleted when pulled,
307307+ // so acknowledgment is a no-op (item is already processed and removed)
308308+ debug!("Acknowledged work item in SQLite queue (no-op)");
309309+ Ok(())
310310+ }
311311+312312+ async fn depth(&self) -> Option<usize> {
313313+ match sqlx::query_scalar::<_, i64>(
314314+ "SELECT COUNT(*) FROM handle_resolution_queue"
315315+ )
316316+ .fetch_one(&self.pool)
317317+ .await
318318+ {
319319+ Ok(count) => Some(count as usize),
320320+ Err(e) => {
321321+ warn!("Failed to get SQLite queue depth: {}", e);
322322+ None
323323+ }
324324+ }
325325+ }
326326+327327+ async fn is_healthy(&self) -> bool {
328328+ // Test the connection by running a simple query
329329+ sqlx::query_scalar::<_, i64>("SELECT 1")
330330+ .fetch_one(&self.pool)
331331+ .await
332332+ .map(|_| true)
333333+ .unwrap_or(false)
334334+ }
335335+}
336336+337337+#[cfg(test)]
338338+mod tests {
339339+ use super::*;
340340+ use crate::queue::HandleResolutionWork;
341341+342342+ async fn create_test_pool() -> sqlx::SqlitePool {
343343+ let pool = sqlx::SqlitePool::connect("sqlite::memory:")
344344+ .await
345345+ .expect("Failed to connect to in-memory SQLite");
346346+347347+ // Create the queue schema
348348+ crate::sqlite_schema::create_schema(&pool)
349349+ .await
350350+ .expect("Failed to create schema");
351351+352352+ pool
353353+ }
354354+355355+ #[tokio::test]
356356+ async fn test_sqlite_queue_push_pull() {
357357+ let pool = create_test_pool().await;
358358+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool.clone());
359359+360360+ let work = HandleResolutionWork::new("alice.example.com".to_string());
361361+362362+ // Test push
363363+ adapter.push(work.clone()).await.unwrap();
364364+365365+ // Verify depth
366366+ assert_eq!(adapter.depth().await, Some(1));
367367+368368+ // Test pull
369369+ let pulled = adapter.pull().await;
370370+ assert_eq!(pulled, Some(work));
371371+372372+ // Verify queue is empty after pull
373373+ assert_eq!(adapter.depth().await, Some(0));
374374+ assert!(adapter.pull().await.is_none());
375375+ }
376376+377377+ #[tokio::test]
378378+ async fn test_sqlite_queue_fifo_ordering() {
379379+ let pool = create_test_pool().await;
380380+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
381381+382382+ // Push multiple items
383383+ let handles = vec!["alice.example.com", "bob.example.com", "charlie.example.com"];
384384+ for handle in &handles {
385385+ let work = HandleResolutionWork::new(handle.to_string());
386386+ adapter.push(work).await.unwrap();
387387+ }
388388+389389+ // Pull items in FIFO order
390390+ for expected_handle in handles {
391391+ let pulled = adapter.pull().await;
392392+ assert!(pulled.is_some());
393393+ assert_eq!(pulled.unwrap().handle, expected_handle);
394394+ }
395395+396396+ // Queue should be empty
397397+ assert!(adapter.pull().await.is_none());
398398+ }
399399+400400+ #[tokio::test]
401401+ async fn test_sqlite_queue_ack_noop() {
402402+ let pool = create_test_pool().await;
403403+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
404404+405405+ // Ack should always succeed as it's a no-op
406406+ let work = HandleResolutionWork::new("any.example.com".to_string());
407407+ adapter.ack(&work).await.unwrap();
408408+ }
409409+410410+ #[tokio::test]
411411+ async fn test_sqlite_queue_health() {
412412+ let pool = create_test_pool().await;
413413+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
414414+415415+ // Should be healthy if SQLite is working
416416+ assert!(adapter.is_healthy().await);
417417+ }
418418+419419+ #[tokio::test]
420420+ async fn test_sqlite_queue_work_shedding() {
421421+ let pool = create_test_pool().await;
422422+423423+ // Create adapter with small max_size for testing
424424+ let max_size = 10;
425425+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
426426+ pool.clone(),
427427+ max_size
428428+ );
429429+430430+ // Push items up to the limit (should not trigger shedding)
431431+ for i in 0..max_size {
432432+ let work = HandleResolutionWork::new(format!("test-{:03}", i));
433433+ adapter.push(work).await.expect("Push should succeed");
434434+ }
435435+436436+ // Verify all items are present
437437+ assert_eq!(adapter.depth().await, Some(max_size as usize));
438438+439439+ // Push beyond 110% of max_size to trigger batch shedding
440440+ let trigger_point = max_size + (max_size / 10) + 1;
441441+ for i in max_size..trigger_point {
442442+ let work = HandleResolutionWork::new(format!("test-{:03}", i));
443443+ adapter.push(work).await.expect("Push should succeed");
444444+ }
445445+446446+ // After triggering shedding, queue should be around 80% of max_size
447447+ let depth_after_shedding = adapter.depth().await.unwrap();
448448+ let expected_size = (max_size as f64 * 0.8) as usize;
449449+450450+ // Allow some variance due to batch deletion
451451+ assert!(
452452+ depth_after_shedding <= expected_size + 1,
453453+ "Queue size {} should be around 80% of max_size ({})",
454454+ depth_after_shedding,
455455+ expected_size
456456+ );
457457+ }
458458+459459+ #[tokio::test]
460460+ async fn test_sqlite_queue_work_shedding_disabled() {
461461+ let pool = create_test_pool().await;
462462+463463+ // Create adapter with max_size = 0 (disabled work shedding)
464464+ let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
465465+ pool,
466466+ 0
467467+ );
468468+469469+ // Push many items (should not trigger any shedding)
470470+ for i in 0..100 {
471471+ let work = HandleResolutionWork::new(format!("test-{:03}", i));
472472+ adapter.push(work).await.expect("Push should succeed");
473473+ }
474474+475475+ // Verify all items are present (no shedding occurred)
476476+ assert_eq!(adapter.depth().await, Some(100));
477477+ }
478478+479479+ #[tokio::test]
480480+ async fn test_sqlite_queue_generic_work_type() {
481481+ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
482482+ struct CustomWork {
483483+ id: u64,
484484+ name: String,
485485+ data: Vec<i32>,
486486+ }
487487+488488+ let pool = create_test_pool().await;
489489+ let adapter = SqliteQueueAdapter::<CustomWork>::new(pool);
490490+491491+ let work = CustomWork {
492492+ id: 123,
493493+ name: "test_work".to_string(),
494494+ data: vec![1, 2, 3, 4, 5],
495495+ };
496496+497497+ // Test push and pull
498498+ adapter.push(work.clone()).await.unwrap();
499499+ let pulled = adapter.pull().await;
500500+ assert_eq!(pulled, Some(work));
501501+ }
502502+}
+95
src/queue/work.rs
···11+//! Work item types for queue processing.
22+//!
33+//! This module defines the various work item types that can be processed
44+//! through the queue system, such as handle resolution requests.
55+66+use serde::{Deserialize, Serialize};
77+88+/// Work item for handle resolution tasks.
99+///
1010+/// This structure represents a request to resolve an AT Protocol handle
1111+/// to its corresponding DID. It's the primary work type processed by
1212+/// the QuickDID service's background queue workers.
1313+///
1414+/// # Examples
1515+///
1616+/// ```
1717+/// use quickdid::queue::HandleResolutionWork;
1818+///
1919+/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
2020+/// assert_eq!(work.handle, "alice.bsky.social");
2121+/// ```
2222+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
2323+pub struct HandleResolutionWork {
2424+ /// The AT Protocol handle to resolve (e.g., "alice.bsky.social")
2525+ pub handle: String,
2626+}
2727+2828+impl HandleResolutionWork {
2929+ /// Create a new handle resolution work item.
3030+ ///
3131+ /// # Arguments
3232+ ///
3333+ /// * `handle` - The AT Protocol handle to resolve
3434+ ///
3535+ /// # Examples
3636+ ///
3737+ /// ```
3838+ /// use quickdid::queue::HandleResolutionWork;
3939+ ///
4040+ /// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
4141+ /// ```
4242+ pub fn new(handle: String) -> Self {
4343+ Self { handle }
4444+ }
4545+}
4646+4747+impl std::fmt::Display for HandleResolutionWork {
4848+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
4949+ write!(f, "HandleResolution({})", self.handle)
5050+ }
5151+}
5252+5353+#[cfg(test)]
5454+mod tests {
5555+ use super::*;
5656+5757+ #[test]
5858+ fn test_handle_resolution_work_creation() {
5959+ let handle = "alice.example.com";
6060+ let work = HandleResolutionWork::new(handle.to_string());
6161+ assert_eq!(work.handle, handle);
6262+ }
6363+6464+ #[test]
6565+ fn test_handle_resolution_work_serialization() {
6666+ let work = HandleResolutionWork::new("bob.example.com".to_string());
6767+6868+ // Test JSON serialization (which is what we actually use in the queue adapters)
6969+ let json = serde_json::to_string(&work).expect("Failed to serialize to JSON");
7070+ let deserialized: HandleResolutionWork =
7171+ serde_json::from_str(&json).expect("Failed to deserialize from JSON");
7272+ assert_eq!(work, deserialized);
7373+7474+ // Verify the JSON structure
7575+ let json_value: serde_json::Value = serde_json::from_str(&json).unwrap();
7676+ assert_eq!(json_value["handle"], "bob.example.com");
7777+ }
7878+7979+ #[test]
8080+ fn test_handle_resolution_work_display() {
8181+ let work = HandleResolutionWork::new("charlie.example.com".to_string());
8282+ let display = format!("{}", work);
8383+ assert_eq!(display, "HandleResolution(charlie.example.com)");
8484+ }
8585+8686+ #[test]
8787+ fn test_handle_resolution_work_equality() {
8888+ let work1 = HandleResolutionWork::new("alice.example.com".to_string());
8989+ let work2 = HandleResolutionWork::new("alice.example.com".to_string());
9090+ let work3 = HandleResolutionWork::new("bob.example.com".to_string());
9191+9292+ assert_eq!(work1, work2);
9393+ assert_ne!(work1, work3);
9494+ }
9595+}
-1626
src/queue_adapter.rs
···11-//! Generic queue adapter system for work queue abstraction.
22-//!
33-//! This module provides a generic trait and implementations for queue adapters
44-//! that can be used with any work type for handle resolution and other tasks.
55-66-use async_trait::async_trait;
77-use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
88-use serde::{Deserialize, Serialize};
99-use sqlx::{self, Row};
1010-use std::sync::Arc;
1111-use thiserror::Error;
1212-use tokio::sync::{Mutex, mpsc};
1313-use tracing::{debug, error, warn};
1414-1515-/// Queue operation errors
1616-#[derive(Error, Debug)]
1717-pub enum QueueError {
1818- #[error("error-quickdid-queue-1 Failed to push to queue: {0}")]
1919- PushFailed(String),
2020-2121- #[error("error-quickdid-queue-2 Queue is full")]
2222- QueueFull,
2323-2424- #[error("error-quickdid-queue-3 Queue is closed")]
2525- QueueClosed,
2626-2727- #[error("error-quickdid-queue-4 Redis connection failed: {0}")]
2828- RedisConnectionFailed(String),
2929-3030- #[error("error-quickdid-queue-5 Redis operation failed: {operation}: {details}")]
3131- RedisOperationFailed { operation: String, details: String },
3232-3333- #[error("error-quickdid-queue-6 Serialization failed: {0}")]
3434- SerializationFailed(String),
3535-3636- #[error("error-quickdid-queue-7 Deserialization failed: {0}")]
3737- DeserializationFailed(String),
3838-3939- #[error("error-quickdid-queue-8 Item not found in worker queue during acknowledgment")]
4040- AckItemNotFound,
4141-}
4242-4343-type Result<T> = std::result::Result<T, QueueError>;
4444-4545-/// Generic trait for queue adapters that can work with any work type.
4646-///
4747-/// This trait provides a common interface for different queue implementations
4848-/// (MPSC, Redis, PostgreSQL, etc.) allowing them to be used interchangeably.
4949-#[async_trait]
5050-pub trait QueueAdapter<T>: Send + Sync
5151-where
5252- T: Send + Sync + 'static,
5353-{
5454- /// Pull the next work item from the queue.
5555- ///
5656- /// Returns None if the queue is closed or empty (depending on implementation).
5757- async fn pull(&self) -> Option<T>;
5858-5959- /// Push a work item to the queue.
6060- ///
6161- /// Returns an error if the queue is full or closed.
6262- async fn push(&self, work: T) -> Result<()>;
6363-6464- /// Acknowledge that a work item has been successfully processed.
6565- ///
6666- /// This is used by reliable queue implementations to remove the item
6767- /// from a temporary processing queue. Implementations that don't require
6868- /// acknowledgment (like MPSC) can use the default no-op implementation.
6969- async fn ack(&self, _item: &T) -> Result<()> {
7070- // Default no-op implementation for queues that don't need acknowledgment
7171- Ok(())
7272- }
7373-7474- /// Try to push a work item without blocking.
7575- ///
7676- /// Returns an error if the queue is full or closed.
7777- async fn try_push(&self, work: T) -> Result<()> {
7878- // Default implementation uses regular push
7979- self.push(work).await
8080- }
8181-8282- /// Get the current queue depth if available.
8383- ///
8484- /// Returns None if the implementation doesn't support queue depth.
8585- async fn depth(&self) -> Option<usize> {
8686- None
8787- }
8888-8989- /// Check if the queue is healthy.
9090- ///
9191- /// Used for health checks and monitoring.
9292- async fn is_healthy(&self) -> bool {
9393- true
9494- }
9595-}
9696-9797-/// MPSC channel-based queue adapter implementation.
9898-///
9999-/// This adapter uses tokio's multi-producer, single-consumer channel
100100-/// for in-memory queuing of work items. It's suitable for single-instance
101101-/// deployments with moderate throughput requirements.
102102-pub(crate) struct MpscQueueAdapter<T>
103103-where
104104- T: Send + Sync + 'static,
105105-{
106106- receiver: Arc<Mutex<mpsc::Receiver<T>>>,
107107- sender: mpsc::Sender<T>,
108108-}
109109-110110-impl<T> MpscQueueAdapter<T>
111111-where
112112- T: Send + Sync + 'static,
113113-{
114114- /// Create a new MPSC queue adapter with the specified buffer size.
115115- pub(crate) fn new(buffer: usize) -> Self {
116116- let (sender, receiver) = mpsc::channel(buffer);
117117- Self {
118118- receiver: Arc::new(Mutex::new(receiver)),
119119- sender,
120120- }
121121- }
122122-123123- /// Create an adapter from existing MPSC channels (for backward compatibility).
124124- pub(crate) fn from_channel(sender: mpsc::Sender<T>, receiver: mpsc::Receiver<T>) -> Self {
125125- Self {
126126- receiver: Arc::new(Mutex::new(receiver)),
127127- sender,
128128- }
129129- }
130130-}
131131-132132-#[async_trait]
133133-impl<T> QueueAdapter<T> for MpscQueueAdapter<T>
134134-where
135135- T: Send + Sync + 'static,
136136-{
137137- async fn pull(&self) -> Option<T> {
138138- let mut receiver = self.receiver.lock().await;
139139- receiver.recv().await
140140- }
141141-142142- async fn push(&self, work: T) -> Result<()> {
143143- self.sender
144144- .send(work)
145145- .await
146146- .map_err(|e| QueueError::PushFailed(e.to_string()))
147147- }
148148-149149- async fn try_push(&self, work: T) -> Result<()> {
150150- self.sender.try_send(work).map_err(|e| match e {
151151- mpsc::error::TrySendError::Full(_) => QueueError::QueueFull,
152152- mpsc::error::TrySendError::Closed(_) => QueueError::QueueClosed,
153153- })
154154- }
155155-156156- async fn depth(&self) -> Option<usize> {
157157- // Note: This is an approximation as mpsc doesn't provide exact depth
158158- Some(self.sender.max_capacity() - self.sender.capacity())
159159- }
160160-161161- async fn is_healthy(&self) -> bool {
162162- !self.sender.is_closed()
163163- }
164164-}
165165-166166-/// Work item for handle resolution tasks
167167-#[derive(Debug, Clone, Serialize, Deserialize)]
168168-pub struct HandleResolutionWork {
169169- /// The handle to resolve
170170- pub handle: String,
171171-}
172172-173173-impl HandleResolutionWork {
174174- /// Create a new handle resolution work item
175175- pub fn new(handle: String) -> Self {
176176- Self { handle }
177177- }
178178-}
179179-180180-/// Redis-backed queue adapter implementation.
181181-///
182182-/// This adapter uses Redis lists with a reliable queue pattern:
183183-/// - LPUSH to push items to the primary queue
184184-/// - RPOPLPUSH to atomically move items from primary to worker queue
185185-/// - LREM to acknowledge processed items from worker queue
186186-///
187187-/// This ensures at-least-once delivery semantics and allows for recovery
188188-/// of in-flight items if a worker crashes.
189189-pub(crate) struct RedisQueueAdapter<T>
190190-where
191191- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
192192-{
193193- /// Redis connection pool
194194- pool: RedisPool,
195195- /// Unique worker ID for this adapter instance
196196- worker_id: String,
197197- /// Key prefix for all queues (default: "queue:handleresolver:")
198198- key_prefix: String,
199199- /// Timeout for blocking RPOPLPUSH operations
200200- timeout_seconds: u64,
201201- /// Type marker for generic parameter
202202- _phantom: std::marker::PhantomData<T>,
203203-}
204204-205205-impl<T> RedisQueueAdapter<T>
206206-where
207207- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
208208-{
209209- /// Create a new Redis queue adapter with custom configuration
210210- fn with_config(
211211- pool: RedisPool,
212212- worker_id: String,
213213- key_prefix: String,
214214- timeout_seconds: u64,
215215- ) -> Self {
216216- Self {
217217- pool,
218218- worker_id,
219219- key_prefix,
220220- timeout_seconds,
221221- _phantom: std::marker::PhantomData,
222222- }
223223- }
224224-225225- /// Get the primary queue key
226226- fn primary_queue_key(&self) -> String {
227227- format!("{}primary", self.key_prefix)
228228- }
229229-230230- /// Get the worker-specific temporary queue key
231231- fn worker_queue_key(&self) -> String {
232232- format!("{}{}", self.key_prefix, self.worker_id)
233233- }
234234-}
235235-236236-#[async_trait]
237237-impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
238238-where
239239- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
240240-{
241241- async fn pull(&self) -> Option<T> {
242242- match self.pool.get().await {
243243- Ok(mut conn) => {
244244- let primary_key = self.primary_queue_key();
245245- let worker_key = self.worker_queue_key();
246246-247247- // Use blocking RPOPLPUSH to atomically move item from primary to worker queue
248248- let data: Option<Vec<u8>> = match conn
249249- .brpoplpush(&primary_key, &worker_key, self.timeout_seconds as f64)
250250- .await
251251- {
252252- Ok(data) => data,
253253- Err(e) => {
254254- error!("Failed to pull from queue: {}", e);
255255- return None;
256256- }
257257- };
258258-259259- if let Some(data) = data {
260260- // Deserialize the item
261261- match serde_json::from_slice(&data) {
262262- Ok(item) => {
263263- debug!(
264264- worker_id = %self.worker_id,
265265- "Pulled item from queue"
266266- );
267267- Some(item)
268268- }
269269- Err(e) => {
270270- error!("Failed to deserialize item: {}", e);
271271- // Remove the corrupted item from worker queue
272272- let _: std::result::Result<(), _> =
273273- conn.lrem(&worker_key, 1, &data).await;
274274- None
275275- }
276276- }
277277- } else {
278278- None
279279- }
280280- }
281281- Err(e) => {
282282- error!("Failed to get Redis connection: {}", e);
283283- None
284284- }
285285- }
286286- }
287287-288288- async fn push(&self, work: T) -> Result<()> {
289289- let mut conn = self
290290- .pool
291291- .get()
292292- .await
293293- .map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
294294-295295- let data = serde_json::to_vec(&work)
296296- .map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
297297-298298- let primary_key = self.primary_queue_key();
299299-300300- conn.lpush::<_, _, ()>(&primary_key, data)
301301- .await
302302- .map_err(|e| QueueError::RedisOperationFailed {
303303- operation: "LPUSH".to_string(),
304304- details: e.to_string(),
305305- })?;
306306-307307- debug!("Pushed item to queue");
308308- Ok(())
309309- }
310310-311311- async fn ack(&self, item: &T) -> Result<()> {
312312- let mut conn = self
313313- .pool
314314- .get()
315315- .await
316316- .map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
317317-318318- let data =
319319- serde_json::to_vec(item).map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
320320-321321- let worker_key = self.worker_queue_key();
322322-323323- // Remove exactly one occurrence of this item from the worker queue
324324- let removed: i32 = conn.lrem(&worker_key, 1, &data).await.map_err(|e| {
325325- QueueError::RedisOperationFailed {
326326- operation: "LREM".to_string(),
327327- details: e.to_string(),
328328- }
329329- })?;
330330-331331- if removed == 0 {
332332- warn!(
333333- worker_id = %self.worker_id,
334334- "Item not found in worker queue during acknowledgment"
335335- );
336336- } else {
337337- debug!(
338338- worker_id = %self.worker_id,
339339- "Acknowledged item"
340340- );
341341- }
342342-343343- Ok(())
344344- }
345345-346346- async fn depth(&self) -> Option<usize> {
347347- match self.pool.get().await {
348348- Ok(mut conn) => {
349349- let primary_key = self.primary_queue_key();
350350- match conn.llen::<_, usize>(&primary_key).await {
351351- Ok(len) => Some(len),
352352- Err(e) => {
353353- error!("Failed to get queue depth: {}", e);
354354- None
355355- }
356356- }
357357- }
358358- Err(e) => {
359359- error!("Failed to get Redis connection: {}", e);
360360- None
361361- }
362362- }
363363- }
364364-365365- async fn is_healthy(&self) -> bool {
366366- match self.pool.get().await {
367367- Ok(mut conn) => {
368368- // Ping Redis to check health
369369- match deadpool_redis::redis::cmd("PING")
370370- .query_async::<String>(&mut conn)
371371- .await
372372- {
373373- Ok(response) => response == "PONG",
374374- Err(_) => false,
375375- }
376376- }
377377- Err(_) => false,
378378- }
379379- }
380380-}
381381-382382-/// No-operation queue adapter that discards all work items.
383383-///
384384-/// This adapter is useful for configurations where queuing is disabled
385385-/// or as a fallback when other queue adapters fail to initialize.
386386-pub(crate) struct NoopQueueAdapter<T>
387387-where
388388- T: Send + Sync + 'static,
389389-{
390390- _phantom: std::marker::PhantomData<T>,
391391-}
392392-393393-impl<T> NoopQueueAdapter<T>
394394-where
395395- T: Send + Sync + 'static,
396396-{
397397- /// Create a new no-op queue adapter
398398- pub(crate) fn new() -> Self {
399399- Self {
400400- _phantom: std::marker::PhantomData,
401401- }
402402- }
403403-}
404404-405405-impl<T> Default for NoopQueueAdapter<T>
406406-where
407407- T: Send + Sync + 'static,
408408-{
409409- fn default() -> Self {
410410- Self::new()
411411- }
412412-}
413413-414414-#[async_trait]
415415-impl<T> QueueAdapter<T> for NoopQueueAdapter<T>
416416-where
417417- T: Send + Sync + 'static,
418418-{
419419- async fn pull(&self) -> Option<T> {
420420- // Never returns any work
421421- tokio::time::sleep(std::time::Duration::from_secs(60)).await;
422422- None
423423- }
424424-425425- async fn push(&self, _work: T) -> Result<()> {
426426- // Silently discard the work
427427- Ok(())
428428- }
429429-430430- async fn ack(&self, _item: &T) -> Result<()> {
431431- // No-op
432432- Ok(())
433433- }
434434-435435- async fn try_push(&self, _work: T) -> Result<()> {
436436- // Silently discard the work
437437- Ok(())
438438- }
439439-440440- async fn depth(&self) -> Option<usize> {
441441- // Always empty
442442- Some(0)
443443- }
444444-445445- async fn is_healthy(&self) -> bool {
446446- // Always healthy
447447- true
448448- }
449449-}
450450-451451-/// SQLite-backed queue adapter implementation.
452452-///
453453-/// This adapter uses SQLite database for persistent queuing of work items.
454454-/// It's suitable for single-instance deployments that need persistence
455455-/// across service restarts while remaining lightweight.
456456-///
457457-/// # Features
458458-///
459459-/// - Persistent queuing across service restarts
460460-/// - Simple FIFO ordering based on insertion time
461461-/// - Single consumer design (no complex locking needed)
462462-/// - Simple pull-and-delete semantics
463463-/// - Work shedding to prevent unbounded queue growth
464464-///
465465-/// # Work Shedding
466466-///
467467-/// When `max_size` is configured (> 0), the adapter implements work shedding:
468468-/// - New work items are always accepted
469469-/// - When the queue exceeds `max_size`, oldest entries are automatically deleted
470470-/// - This maintains the most recent work items while preventing unbounded growth
471471-/// - Essential for long-running deployments to avoid disk space issues
472472-pub(crate) struct SqliteQueueAdapter<T>
473473-where
474474- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
475475-{
476476- /// SQLite connection pool
477477- pool: sqlx::SqlitePool,
478478- /// Maximum queue size (0 = unlimited)
479479- /// When exceeded, oldest entries are deleted to maintain this limit
480480- max_size: u64,
481481- /// Type marker for generic parameter
482482- _phantom: std::marker::PhantomData<T>,
483483-}
484484-485485-impl<T> SqliteQueueAdapter<T>
486486-where
487487- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
488488-{
489489- /// Create a new SQLite queue adapter with unlimited queue size.
490490- pub(crate) fn new(pool: sqlx::SqlitePool) -> Self {
491491- Self::with_max_size(pool, 0)
492492- }
493493-494494- /// Create a new SQLite queue adapter with specified maximum queue size.
495495- ///
496496- /// # Arguments
497497- ///
498498- /// * `pool` - SQLite connection pool
499499- /// * `max_size` - Maximum number of entries in queue (0 = unlimited)
500500- ///
501501- /// # Work Shedding Behavior
502502- ///
503503- /// When `max_size` > 0:
504504- /// - New work items are always accepted
505505- /// - If queue size exceeds `max_size` after insertion, oldest entries are deleted
506506- /// - This preserves the most recent work while preventing unbounded growth
507507- pub(crate) fn with_max_size(pool: sqlx::SqlitePool, max_size: u64) -> Self {
508508- Self {
509509- pool,
510510- max_size,
511511- _phantom: std::marker::PhantomData,
512512- }
513513- }
514514-}
515515-516516-#[async_trait]
517517-impl<T> QueueAdapter<T> for SqliteQueueAdapter<T>
518518-where
519519- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
520520-{
521521- async fn pull(&self) -> Option<T> {
522522- // Get the oldest queued item and delete it in a transaction
523523- let mut transaction = match self.pool.begin().await {
524524- Ok(tx) => tx,
525525- Err(e) => {
526526- error!("Failed to start SQLite transaction: {}", e);
527527- return None;
528528- }
529529- };
530530-531531- // Select the oldest queued item
532532- let record = match sqlx::query(
533533- "SELECT id, work FROM handle_resolution_queue
534534- ORDER BY queued_at ASC
535535- LIMIT 1"
536536- )
537537- .fetch_optional(&mut *transaction)
538538- .await
539539- {
540540- Ok(Some(row)) => row,
541541- Ok(None) => {
542542- // No queued items available
543543- debug!("No queued items available in SQLite queue");
544544- return None;
545545- }
546546- Err(e) => {
547547- error!("Failed to query SQLite queue: {}", e);
548548- return None;
549549- }
550550- };
551551-552552- let item_id: i64 = record.get("id");
553553- let work_json: String = record.get("work");
554554-555555- // Delete the item from the queue
556556- if let Err(e) = sqlx::query("DELETE FROM handle_resolution_queue WHERE id = ?1")
557557- .bind(item_id)
558558- .execute(&mut *transaction)
559559- .await
560560- {
561561- error!("Failed to delete item from queue: {}", e);
562562- return None;
563563- }
564564-565565- // Commit the transaction
566566- if let Err(e) = transaction.commit().await {
567567- error!("Failed to commit SQLite transaction: {}", e);
568568- return None;
569569- }
570570-571571- // Deserialize the work item from JSON
572572- match serde_json::from_str(&work_json) {
573573- Ok(work) => {
574574- debug!("Pulled work item from SQLite queue");
575575- Some(work)
576576- }
577577- Err(e) => {
578578- error!("Failed to deserialize work item: {}", e);
579579- None
580580- }
581581- }
582582- }
583583-584584- async fn push(&self, work: T) -> Result<()> {
585585- // Serialize the entire work item as JSON
586586- let work_json = serde_json::to_string(&work)
587587- .map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
588588-589589- let current_timestamp = std::time::SystemTime::now()
590590- .duration_since(std::time::UNIX_EPOCH)
591591- .unwrap_or_default()
592592- .as_secs() as i64;
593593-594594- // Optimized approach: Insert first, then check if cleanup needed
595595- // This avoids counting on every insert
596596- sqlx::query(
597597- "INSERT INTO handle_resolution_queue (work, queued_at) VALUES (?1, ?2)"
598598- )
599599- .bind(&work_json)
600600- .bind(current_timestamp)
601601- .execute(&self.pool)
602602- .await
603603- .map_err(|e| QueueError::PushFailed(format!("Failed to insert work item: {}", e)))?;
604604-605605- // Implement optimized work shedding if max_size is configured
606606- if self.max_size > 0 {
607607- // Optimized approach: Only check and clean periodically or when likely over limit
608608- // Use a limited count to avoid full table scan
609609- let check_limit = self.max_size as i64 + (self.max_size as i64 / 10).max(1); // Check 10% over limit
610610- let approx_count: Option<i64> = sqlx::query_scalar(
611611- "SELECT COUNT(*) FROM (
612612- SELECT 1 FROM handle_resolution_queue LIMIT ?1
613613- ) AS limited_count"
614614- )
615615- .bind(check_limit)
616616- .fetch_one(&self.pool)
617617- .await
618618- .map_err(|e| QueueError::PushFailed(format!("Failed to check queue size: {}", e)))?;
619619-620620- // Only perform cleanup if we're definitely over the limit
621621- if let Some(count) = approx_count && count >= check_limit {
622622- // Perform batch cleanup - delete more than just the excess to reduce frequency
623623- // Delete 20% more than needed to avoid frequent shedding
624624- let target_size = (self.max_size as f64 * 0.8) as i64; // Keep 80% of max_size
625625- let to_delete = count - target_size;
626626-627627- if to_delete > 0 {
628628- // Optimized deletion: First get the cutoff id and timestamp
629629- // This avoids the expensive subquery in the DELETE statement
630630- let cutoff: Option<(i64, i64)> = sqlx::query_as(
631631- "SELECT id, queued_at FROM handle_resolution_queue
632632- ORDER BY queued_at ASC, id ASC
633633- LIMIT 1 OFFSET ?1"
634634- )
635635- .bind(to_delete - 1)
636636- .fetch_optional(&self.pool)
637637- .await
638638- .map_err(|e| QueueError::PushFailed(format!("Failed to find cutoff: {}", e)))?;
639639-640640- if let Some((cutoff_id, cutoff_timestamp)) = cutoff {
641641- // Delete entries older than cutoff, or equal timestamp with lower id
642642- // This handles the case where multiple entries have the same timestamp
643643- let deleted_result = sqlx::query(
644644- "DELETE FROM handle_resolution_queue
645645- WHERE queued_at < ?1
646646- OR (queued_at = ?1 AND id <= ?2)"
647647- )
648648- .bind(cutoff_timestamp)
649649- .bind(cutoff_id)
650650- .execute(&self.pool)
651651- .await
652652- .map_err(|e| QueueError::PushFailed(format!("Failed to delete excess entries: {}", e)))?;
653653-654654- let deleted_count = deleted_result.rows_affected();
655655- if deleted_count > 0 {
656656- tracing::info!(
657657- "Work shedding: deleted {} oldest entries (target size: {}, max: {})",
658658- deleted_count,
659659- target_size,
660660- self.max_size
661661- );
662662- }
663663- }
664664- }
665665- }
666666- }
667667-668668- debug!("Pushed work item to SQLite queue (max_size: {})", self.max_size);
669669- Ok(())
670670- }
671671-672672- async fn ack(&self, _item: &T) -> Result<()> {
673673- // With the simplified SQLite queue design, items are deleted when pulled,
674674- // so acknowledgment is a no-op (item is already processed and removed)
675675- debug!("Acknowledged work item in SQLite queue (no-op)");
676676- Ok(())
677677- }
678678-679679- async fn depth(&self) -> Option<usize> {
680680- match sqlx::query_scalar::<_, i64>(
681681- "SELECT COUNT(*) FROM handle_resolution_queue"
682682- )
683683- .fetch_one(&self.pool)
684684- .await
685685- {
686686- Ok(count) => Some(count as usize),
687687- Err(e) => {
688688- warn!("Failed to get SQLite queue depth: {}", e);
689689- None
690690- }
691691- }
692692- }
693693-694694- async fn is_healthy(&self) -> bool {
695695- // Test the connection by running a simple query
696696- sqlx::query_scalar::<_, i64>("SELECT 1")
697697- .fetch_one(&self.pool)
698698- .await
699699- .map(|_| true)
700700- .unwrap_or(false)
701701- }
702702-}
703703-704704-// ========= Factory Functions for Queue Adapters =========
705705-706706-/// Create a new MPSC queue adapter with the specified buffer size.
707707-///
708708-/// This creates an in-memory queue suitable for single-instance deployments.
709709-///
710710-/// # Arguments
711711-///
712712-/// * `buffer` - The buffer size for the channel
713713-pub fn create_mpsc_queue<T>(buffer: usize) -> Arc<dyn QueueAdapter<T>>
714714-where
715715- T: Send + Sync + 'static,
716716-{
717717- Arc::new(MpscQueueAdapter::new(buffer))
718718-}
719719-720720-/// Create an MPSC queue adapter from existing channels.
721721-///
722722-/// This allows integration with existing channel-based architectures.
723723-///
724724-/// # Arguments
725725-///
726726-/// * `sender` - The sender half of the channel
727727-/// * `receiver` - The receiver half of the channel
728728-pub fn create_mpsc_queue_from_channel<T>(
729729- sender: mpsc::Sender<T>,
730730- receiver: mpsc::Receiver<T>,
731731-) -> Arc<dyn QueueAdapter<T>>
732732-where
733733- T: Send + Sync + 'static,
734734-{
735735- Arc::new(MpscQueueAdapter::from_channel(sender, receiver))
736736-}
737737-738738-/// Create a new Redis-backed queue adapter.
739739-///
740740-/// This creates a distributed queue suitable for multi-instance deployments.
741741-///
742742-/// # Arguments
743743-///
744744-/// * `pool` - Redis connection pool
745745-/// * `worker_id` - Worker identifier for this queue instance
746746-/// * `key_prefix` - Redis key prefix for queue operations
747747-/// * `timeout_seconds` - Timeout for blocking operations
748748-pub fn create_redis_queue<T>(
749749- pool: RedisPool,
750750- worker_id: String,
751751- key_prefix: String,
752752- timeout_seconds: u64,
753753-) -> Arc<dyn QueueAdapter<T>>
754754-where
755755- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
756756-{
757757- Arc::new(RedisQueueAdapter::with_config(
758758- pool,
759759- worker_id,
760760- key_prefix,
761761- timeout_seconds,
762762- ))
763763-}
764764-765765-/// Create a no-operation queue adapter.
766766-///
767767-/// This creates a queue that discards all work items, useful for testing
768768-/// or when queue processing is disabled.
769769-pub fn create_noop_queue<T>() -> Arc<dyn QueueAdapter<T>>
770770-where
771771- T: Send + Sync + 'static,
772772-{
773773- Arc::new(NoopQueueAdapter::new())
774774-}
775775-776776-/// Create a new SQLite queue adapter with unlimited queue size.
777777-///
778778-/// This creates a persistent queue backed by SQLite database suitable
779779-/// for single-instance deployments that need persistence across restarts.
780780-/// The queue has no size limit and may grow unbounded.
781781-///
782782-/// # Arguments
783783-///
784784-/// * `pool` - SQLite connection pool
785785-///
786786-/// # Example
787787-///
788788-/// ```no_run
789789-/// use quickdid::queue_adapter::{create_sqlite_queue, HandleResolutionWork};
790790-/// use quickdid::sqlite_schema::create_sqlite_pool;
791791-/// use std::sync::Arc;
792792-///
793793-/// # async fn example() -> anyhow::Result<()> {
794794-/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
795795-/// let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
796796-/// # Ok(())
797797-/// # }
798798-/// ```
799799-pub fn create_sqlite_queue<T>(pool: sqlx::SqlitePool) -> Arc<dyn QueueAdapter<T>>
800800-where
801801- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
802802-{
803803- Arc::new(SqliteQueueAdapter::new(pool))
804804-}
805805-806806-/// Create a new SQLite queue adapter with work shedding.
807807-///
808808-/// This creates a persistent queue with configurable maximum size.
809809-/// When the queue exceeds `max_size`, the oldest entries are automatically
810810-/// deleted to maintain the limit, preserving the most recent work items.
811811-///
812812-/// # Arguments
813813-///
814814-/// * `pool` - SQLite connection pool
815815-/// * `max_size` - Maximum number of entries (0 = unlimited)
816816-///
817817-/// # Work Shedding Behavior
818818-///
819819-/// - New work items are always accepted
820820-/// - When queue size exceeds `max_size`, oldest entries are deleted
821821-/// - Deletion happens atomically with insertion in a single transaction
822822-/// - Essential for long-running deployments to prevent disk space issues
823823-///
824824-/// # Example
825825-///
826826-/// ```no_run
827827-/// use quickdid::queue_adapter::{create_sqlite_queue_with_max_size, HandleResolutionWork};
828828-/// use quickdid::sqlite_schema::create_sqlite_pool;
829829-/// use std::sync::Arc;
830830-///
831831-/// # async fn example() -> anyhow::Result<()> {
832832-/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
833833-/// // Limit queue to 10,000 entries with automatic work shedding
834834-/// let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 10000);
835835-/// # Ok(())
836836-/// # }
837837-/// ```
838838-pub fn create_sqlite_queue_with_max_size<T>(
839839- pool: sqlx::SqlitePool,
840840- max_size: u64,
841841-) -> Arc<dyn QueueAdapter<T>>
842842-where
843843- T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
844844-{
845845- Arc::new(SqliteQueueAdapter::with_max_size(pool, max_size))
846846-}
847847-848848-#[cfg(test)]
849849-mod tests {
850850- use super::*;
851851-852852- #[tokio::test]
853853- async fn test_mpsc_queue_adapter_push_pull() {
854854- let adapter = Arc::new(MpscQueueAdapter::<String>::new(10));
855855-856856- // Test push
857857- adapter.push("test".to_string()).await.unwrap();
858858-859859- // Test pull
860860- let pulled = adapter.pull().await;
861861- assert!(pulled.is_some());
862862- assert_eq!(pulled.unwrap(), "test");
863863- }
864864-865865- #[tokio::test]
866866- async fn test_handle_resolution_work() {
867867- let work = HandleResolutionWork::new("alice.example.com".to_string());
868868-869869- assert_eq!(work.handle, "alice.example.com");
870870- }
871871-872872- #[tokio::test]
873873- async fn test_redis_queue_adapter_push_pull() {
874874- let pool = match crate::test_helpers::get_test_redis_pool() {
875875- Some(p) => p,
876876- None => return,
877877- };
878878-879879- // Create adapter with unique prefix for testing
880880- let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
881881- let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
882882- pool.clone(),
883883- "test-worker".to_string(),
884884- test_prefix.clone(),
885885- 1, // 1 second timeout for tests
886886- ));
887887-888888- // Test push
889889- adapter.push("test-item".to_string()).await.unwrap();
890890-891891- // Test pull
892892- let pulled = adapter.pull().await;
893893- assert!(pulled.is_some());
894894- assert_eq!(pulled.unwrap(), "test-item");
895895-896896- // Test ack
897897- adapter
898898- .ack(&"test-item".to_string())
899899- .await
900900- .expect("Ack should succeed");
901901-902902- // Clean up test data - manually clean worker queue since cleanup was removed
903903- // In production, items would timeout or be processed
904904- }
905905-906906- #[tokio::test]
907907- async fn test_redis_queue_adapter_reliable_queue() {
908908- let pool = match crate::test_helpers::get_test_redis_pool() {
909909- Some(p) => p,
910910- None => return,
911911- };
912912-913913- let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
914914- let worker_id = "test-worker-reliable";
915915-916916- // Create first adapter
917917- let adapter1 = Arc::new(RedisQueueAdapter::<String>::with_config(
918918- pool.clone(),
919919- worker_id.to_string(),
920920- test_prefix.clone(),
921921- 1,
922922- ));
923923-924924- // Push multiple items
925925- adapter1.push("item1".to_string()).await.unwrap();
926926- adapter1.push("item2".to_string()).await.unwrap();
927927- adapter1.push("item3".to_string()).await.unwrap();
928928-929929- // Pull but don't ack (simulating worker crash)
930930- let item1 = adapter1.pull().await;
931931- assert!(item1.is_some());
932932- assert_eq!(item1.unwrap(), "item1");
933933-934934- // Create second adapter with same worker_id (simulating restart)
935935- let adapter2 = Arc::new(RedisQueueAdapter::<String>::with_config(
936936- pool.clone(),
937937- worker_id.to_string(),
938938- test_prefix.clone(),
939939- 1,
940940- ));
941941-942942- // In a real scenario, unacked items would be handled by timeout or manual recovery
943943- // For this test, we just verify the item is in the worker queue
944944- let recovered = adapter2.pull().await;
945945- assert!(recovered.is_some());
946946- }
947947-948948- #[tokio::test]
949949- async fn test_redis_queue_adapter_depth() {
950950- let pool = match crate::test_helpers::get_test_redis_pool() {
951951- Some(p) => p,
952952- None => return,
953953- };
954954-955955- let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
956956- let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
957957- pool.clone(),
958958- "test-worker-depth".to_string(),
959959- test_prefix.clone(),
960960- 1,
961961- ));
962962-963963- // Initially empty
964964- let depth = adapter.depth().await;
965965- assert_eq!(depth, Some(0));
966966-967967- // Push items and check depth
968968- adapter.push("item1".to_string()).await.unwrap();
969969- assert_eq!(adapter.depth().await, Some(1));
970970-971971- adapter.push("item2".to_string()).await.unwrap();
972972- assert_eq!(adapter.depth().await, Some(2));
973973-974974- // Pull and check depth decreases
975975- let _ = adapter.pull().await;
976976- // Note: depth checks primary queue, not worker queue
977977- assert_eq!(adapter.depth().await, Some(1));
978978-979979- // Test cleanup is automatic when adapter is dropped
980980- }
981981-982982- #[tokio::test]
983983- async fn test_redis_queue_adapter_health() {
984984- let pool = match crate::test_helpers::get_test_redis_pool() {
985985- Some(p) => p,
986986- None => return,
987987- };
988988-989989- let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
990990- pool,
991991- "test-worker-health".to_string(),
992992- "test:queue:health:".to_string(),
993993- 1,
994994- ));
995995-996996- // Should be healthy if Redis is running
997997- assert!(adapter.is_healthy().await);
998998- }
999999-10001000- #[tokio::test]
10011001- async fn test_sqlite_queue_adapter_push_pull() {
10021002- // Create in-memory SQLite database for testing
10031003- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
10041004- .await
10051005- .expect("Failed to connect to in-memory SQLite");
10061006-10071007- // Create the queue schema
10081008- crate::sqlite_schema::create_schema(&pool)
10091009- .await
10101010- .expect("Failed to create schema");
10111011-10121012- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::new(pool.clone()));
10131013-10141014- let test_handle = "alice.example.com";
10151015- let work = HandleResolutionWork {
10161016- handle: test_handle.to_string(),
10171017- };
10181018-10191019- // Test push
10201020- adapter.push(work.clone()).await.unwrap();
10211021-10221022- // Verify the record is actually in the database
10231023- let records: Vec<(i64, String, i64)> = sqlx::query_as(
10241024- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
10251025- )
10261026- .fetch_all(&pool)
10271027- .await
10281028- .expect("Failed to query database");
10291029-10301030- assert_eq!(records.len(), 1);
10311031- let (db_id, db_work_json, db_queued_at) = &records[0];
10321032- assert!(*db_id > 0);
10331033- assert!(*db_queued_at > 0);
10341034-10351035- // Verify the JSON content
10361036- let stored_work: HandleResolutionWork = serde_json::from_str(db_work_json)
10371037- .expect("Failed to deserialize stored work");
10381038- assert_eq!(stored_work.handle, test_handle);
10391039-10401040- // Verify depth
10411041- assert_eq!(adapter.depth().await, Some(1));
10421042-10431043- // Test pull
10441044- let pulled = adapter.pull().await;
10451045- assert!(pulled.is_some());
10461046- let pulled_work = pulled.unwrap();
10471047- assert_eq!(pulled_work.handle, test_handle);
10481048-10491049- // Verify the record was deleted from database after pull
10501050- let records_after_pull: Vec<(i64, String, i64)> = sqlx::query_as(
10511051- "SELECT id, work, queued_at FROM handle_resolution_queue"
10521052- )
10531053- .fetch_all(&pool)
10541054- .await
10551055- .expect("Failed to query database after pull");
10561056-10571057- assert_eq!(records_after_pull.len(), 0);
10581058-10591059- // Test ack - should be no-op since item already deleted
10601060- adapter.ack(&pulled_work).await.expect("Ack should succeed");
10611061-10621062- // Verify depth after ack
10631063- assert_eq!(adapter.depth().await, Some(0));
10641064-10651065- // Verify no more items to pull
10661066- let empty_pull = adapter.pull().await;
10671067- assert!(empty_pull.is_none());
10681068- }
10691069-10701070- #[tokio::test]
10711071- async fn test_sqlite_queue_adapter_multiple_items() {
10721072- // Create in-memory SQLite database for testing
10731073- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
10741074- .await
10751075- .expect("Failed to connect to in-memory SQLite");
10761076-10771077- // Create the queue schema
10781078- crate::sqlite_schema::create_schema(&pool)
10791079- .await
10801080- .expect("Failed to create schema");
10811081-10821082- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::new(pool.clone()));
10831083-10841084- // Push multiple items
10851085- let handles = vec!["alice.example.com", "bob.example.com", "charlie.example.com"];
10861086- for handle in &handles {
10871087- let work = HandleResolutionWork {
10881088- handle: handle.to_string(),
10891089- };
10901090- adapter.push(work).await.unwrap();
10911091- }
10921092-10931093- // Verify all records are in database with correct order
10941094- let records: Vec<(i64, String, i64)> = sqlx::query_as(
10951095- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
10961096- )
10971097- .fetch_all(&pool)
10981098- .await
10991099- .expect("Failed to query database");
11001100-11011101- assert_eq!(records.len(), 3);
11021102-11031103- // Verify FIFO ordering by timestamp
11041104- assert!(records[0].2 <= records[1].2); // queued_at timestamps should be in order
11051105- assert!(records[1].2 <= records[2].2);
11061106-11071107- // Verify JSON content matches expected handles
11081108- for (i, (db_id, db_work_json, _)) in records.iter().enumerate() {
11091109- assert!(*db_id > 0);
11101110- let stored_work: HandleResolutionWork = serde_json::from_str(db_work_json)
11111111- .expect("Failed to deserialize stored work");
11121112- assert_eq!(stored_work.handle, handles[i]);
11131113- }
11141114-11151115- // Verify depth
11161116- assert_eq!(adapter.depth().await, Some(3));
11171117-11181118- // Pull items in FIFO order and verify database state changes
11191119- for (i, expected_handle) in handles.iter().enumerate() {
11201120- let pulled = adapter.pull().await;
11211121- assert!(pulled.is_some());
11221122- let pulled_work = pulled.unwrap();
11231123- assert_eq!(pulled_work.handle, *expected_handle);
11241124-11251125- // Verify database now has one fewer record
11261126- let remaining_count: i64 = sqlx::query_scalar(
11271127- "SELECT COUNT(*) FROM handle_resolution_queue"
11281128- )
11291129- .fetch_one(&pool)
11301130- .await
11311131- .expect("Failed to count remaining records");
11321132- assert_eq!(remaining_count, (handles.len() - i - 1) as i64);
11331133-11341134- // Ack the item (no-op)
11351135- adapter.ack(&pulled_work).await.expect("Ack should succeed");
11361136- }
11371137-11381138- // Verify queue is empty in both adapter and database
11391139- assert_eq!(adapter.depth().await, Some(0));
11401140-11411141- let final_records: Vec<(i64, String, i64)> = sqlx::query_as(
11421142- "SELECT id, work, queued_at FROM handle_resolution_queue"
11431143- )
11441144- .fetch_all(&pool)
11451145- .await
11461146- .expect("Failed to query database");
11471147- assert_eq!(final_records.len(), 0);
11481148-11491149- let empty_pull = adapter.pull().await;
11501150- assert!(empty_pull.is_none());
11511151- }
11521152-11531153- #[tokio::test]
11541154- async fn test_sqlite_queue_adapter_simple_pull_delete() {
11551155- // Create in-memory SQLite database for testing
11561156- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
11571157- .await
11581158- .expect("Failed to connect to in-memory SQLite");
11591159-11601160- // Create the queue schema
11611161- crate::sqlite_schema::create_schema(&pool)
11621162- .await
11631163- .expect("Failed to create schema");
11641164-11651165- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::new(pool.clone()));
11661166-11671167- let test_handle = "simple.example.com";
11681168- let work = HandleResolutionWork {
11691169- handle: test_handle.to_string(),
11701170- };
11711171-11721172- // Push item
11731173- adapter.push(work.clone()).await.unwrap();
11741174-11751175- // Verify the record exists in database with correct JSON
11761176- let records: Vec<(i64, String, i64)> = sqlx::query_as(
11771177- "SELECT id, work, queued_at FROM handle_resolution_queue"
11781178- )
11791179- .fetch_all(&pool)
11801180- .await
11811181- .expect("Failed to query database");
11821182-11831183- assert_eq!(records.len(), 1);
11841184- let (db_id, db_work_json, db_queued_at) = &records[0];
11851185- assert!(*db_id > 0);
11861186- assert!(*db_queued_at > 0);
11871187-11881188- // Verify JSON content
11891189- let stored_work: HandleResolutionWork = serde_json::from_str(db_work_json)
11901190- .expect("Failed to deserialize stored work");
11911191- assert_eq!(stored_work.handle, test_handle);
11921192-11931193- // Verify item is in queue using schema stats
11941194- let total_before = crate::sqlite_schema::get_queue_stats(&pool)
11951195- .await
11961196- .expect("Failed to get queue stats");
11971197- assert_eq!(total_before, 1);
11981198-11991199- // Pull item (should delete it immediately)
12001200- let pulled = adapter.pull().await;
12011201- assert!(pulled.is_some());
12021202- let pulled_work = pulled.unwrap();
12031203- assert_eq!(pulled_work.handle, test_handle);
12041204-12051205- // Verify the record was deleted from database
12061206- let records_after_pull: Vec<(i64, String, i64)> = sqlx::query_as(
12071207- "SELECT id, work, queued_at FROM handle_resolution_queue"
12081208- )
12091209- .fetch_all(&pool)
12101210- .await
12111211- .expect("Failed to query database after pull");
12121212- assert_eq!(records_after_pull.len(), 0);
12131213-12141214- // Verify that pulling again returns None (no records left)
12151215- let empty_pull = adapter.pull().await;
12161216- assert!(empty_pull.is_none());
12171217-12181218- // Verify queue is now empty after pull using schema stats
12191219- let total_after = crate::sqlite_schema::get_queue_stats(&pool)
12201220- .await
12211221- .expect("Failed to get queue stats");
12221222- assert_eq!(total_after, 0);
12231223-12241224- // Ack the item (should be no-op)
12251225- adapter.ack(&pulled_work).await.expect("Ack should succeed");
12261226-12271227- // Verify queue is still empty using schema stats
12281228- let total_final = crate::sqlite_schema::get_queue_stats(&pool)
12291229- .await
12301230- .expect("Failed to get queue stats after ack");
12311231- assert_eq!(total_final, 0);
12321232- }
12331233-12341234- #[tokio::test]
12351235- async fn test_sqlite_queue_adapter_health() {
12361236- // Create in-memory SQLite database for testing
12371237- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
12381238- .await
12391239- .expect("Failed to connect to in-memory SQLite");
12401240-12411241- // Create the queue schema
12421242- crate::sqlite_schema::create_schema(&pool)
12431243- .await
12441244- .expect("Failed to create schema");
12451245-12461246- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::new(pool));
12471247-12481248- // Should be healthy if SQLite is working
12491249- assert!(adapter.is_healthy().await);
12501250- }
12511251-12521252- #[tokio::test]
12531253- async fn test_sqlite_queue_adapter_ack_no_op() {
12541254- // Create in-memory SQLite database for testing
12551255- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
12561256- .await
12571257- .expect("Failed to connect to in-memory SQLite");
12581258-12591259- // Create the queue schema
12601260- crate::sqlite_schema::create_schema(&pool)
12611261- .await
12621262- .expect("Failed to create schema");
12631263-12641264- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::new(pool));
12651265-12661266- // Ack should always succeed as it's a no-op
12671267- let any_work = HandleResolutionWork {
12681268- handle: "any.example.com".to_string(),
12691269- };
12701270-12711271- let result = adapter.ack(&any_work).await;
12721272- assert!(result.is_ok());
12731273- }
12741274-12751275- #[tokio::test]
12761276- async fn test_sqlite_queue_adapter_generic_work_type() {
12771277- // Test with a different work type to demonstrate genericity
12781278- #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
12791279- struct CustomWork {
12801280- id: u64,
12811281- name: String,
12821282- data: Vec<i32>,
12831283- }
12841284-12851285- // Create in-memory SQLite database for testing
12861286- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
12871287- .await
12881288- .expect("Failed to connect to in-memory SQLite");
12891289-12901290- // Create the queue schema
12911291- crate::sqlite_schema::create_schema(&pool)
12921292- .await
12931293- .expect("Failed to create schema");
12941294-12951295- let adapter = Arc::new(SqliteQueueAdapter::<CustomWork>::new(pool.clone()));
12961296-12971297- // Create custom work items
12981298- let work1 = CustomWork {
12991299- id: 123,
13001300- name: "test_work".to_string(),
13011301- data: vec![1, 2, 3, 4, 5],
13021302- };
13031303-13041304- let work2 = CustomWork {
13051305- id: 456,
13061306- name: "another_work".to_string(),
13071307- data: vec![10, 20],
13081308- };
13091309-13101310- // Test push for both items
13111311- adapter.push(work1.clone()).await.unwrap();
13121312- adapter.push(work2.clone()).await.unwrap();
13131313-13141314- // Verify the records are in database with correct JSON serialization
13151315- let records: Vec<(i64, String, i64)> = sqlx::query_as(
13161316- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
13171317- )
13181318- .fetch_all(&pool)
13191319- .await
13201320- .expect("Failed to query database");
13211321-13221322- assert_eq!(records.len(), 2);
13231323-13241324- // Verify first work item JSON
13251325- let stored_work1: CustomWork = serde_json::from_str(&records[0].1)
13261326- .expect("Failed to deserialize first work item");
13271327- assert_eq!(stored_work1, work1);
13281328-13291329- // Verify the JSON contains all expected fields
13301330- let json_value1: serde_json::Value = serde_json::from_str(&records[0].1)
13311331- .expect("Failed to parse JSON");
13321332- assert_eq!(json_value1["id"], 123);
13331333- assert_eq!(json_value1["name"], "test_work");
13341334- assert_eq!(json_value1["data"], serde_json::json!([1, 2, 3, 4, 5]));
13351335-13361336- // Verify second work item JSON
13371337- let stored_work2: CustomWork = serde_json::from_str(&records[1].1)
13381338- .expect("Failed to deserialize second work item");
13391339- assert_eq!(stored_work2, work2);
13401340-13411341- let json_value2: serde_json::Value = serde_json::from_str(&records[1].1)
13421342- .expect("Failed to parse JSON");
13431343- assert_eq!(json_value2["id"], 456);
13441344- assert_eq!(json_value2["name"], "another_work");
13451345- assert_eq!(json_value2["data"], serde_json::json!([10, 20]));
13461346-13471347- // Verify depth
13481348- assert_eq!(adapter.depth().await, Some(2));
13491349-13501350- // Test pull - should get items in FIFO order
13511351- let pulled1 = adapter.pull().await;
13521352- assert!(pulled1.is_some());
13531353- let pulled_work1 = pulled1.unwrap();
13541354- assert_eq!(pulled_work1, work1);
13551355-13561356- // Verify database now has one record
13571357- let count_after_first_pull: i64 = sqlx::query_scalar(
13581358- "SELECT COUNT(*) FROM handle_resolution_queue"
13591359- )
13601360- .fetch_one(&pool)
13611361- .await
13621362- .expect("Failed to count records");
13631363- assert_eq!(count_after_first_pull, 1);
13641364-13651365- let pulled2 = adapter.pull().await;
13661366- assert!(pulled2.is_some());
13671367- let pulled_work2 = pulled2.unwrap();
13681368- assert_eq!(pulled_work2, work2);
13691369-13701370- // Verify database is now empty
13711371- let count_after_second_pull: i64 = sqlx::query_scalar(
13721372- "SELECT COUNT(*) FROM handle_resolution_queue"
13731373- )
13741374- .fetch_one(&pool)
13751375- .await
13761376- .expect("Failed to count records");
13771377- assert_eq!(count_after_second_pull, 0);
13781378-13791379- // Verify queue is empty
13801380- assert_eq!(adapter.depth().await, Some(0));
13811381- let empty_pull = adapter.pull().await;
13821382- assert!(empty_pull.is_none());
13831383-13841384- // Test ack (should be no-op)
13851385- adapter.ack(&pulled_work1).await.expect("Ack should succeed");
13861386- adapter.ack(&pulled_work2).await.expect("Ack should succeed");
13871387- }
13881388-13891389- #[tokio::test]
13901390- async fn test_sqlite_queue_adapter_work_shedding() {
13911391- // Create in-memory SQLite database for testing
13921392- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
13931393- .await
13941394- .expect("Failed to connect to in-memory SQLite");
13951395-13961396- // Create the queue schema
13971397- crate::sqlite_schema::create_schema(&pool)
13981398- .await
13991399- .expect("Failed to create schema");
14001400-14011401- // Create adapter with small max_size for testing work shedding
14021402- let max_size = 10; // Use larger size to properly test batch deletion
14031403- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
14041404- pool.clone(),
14051405- max_size
14061406- ));
14071407-14081408- // Verify initial empty state
14091409- assert_eq!(adapter.depth().await, Some(0));
14101410-14111411- // Push items up to the limit (should not trigger shedding)
14121412- let mut handles = Vec::new();
14131413- for i in 0..max_size {
14141414- let handle = format!("test-{:03}", i);
14151415- handles.push(handle.clone());
14161416- let work = HandleResolutionWork { handle };
14171417- adapter.push(work).await.expect("Push should succeed");
14181418- }
14191419-14201420- // Verify all items are present
14211421- assert_eq!(adapter.depth().await, Some(max_size as usize));
14221422-14231423- // Push beyond 110% of max_size to trigger batch shedding
14241424- // The implementation checks at 110% and deletes down to 80%
14251425- let trigger_point = max_size + (max_size / 10) + 1;
14261426- for i in max_size..trigger_point {
14271427- let handle = format!("test-{:03}", i);
14281428- handles.push(handle);
14291429- let work = HandleResolutionWork { handle: handles[i as usize].clone() };
14301430- adapter.push(work).await.expect("Push should succeed");
14311431- }
14321432-14331433- // After triggering shedding, queue should be around 80% of max_size
14341434- let depth_after_shedding = adapter.depth().await.unwrap();
14351435- let expected_size = (max_size as f64 * 0.8) as usize;
14361436-14371437- // Allow some variance due to batch deletion
14381438- assert!(
14391439- depth_after_shedding <= expected_size + 1,
14401440- "Queue size {} should be around 80% of max_size ({})",
14411441- depth_after_shedding,
14421442- expected_size
14431443- );
14441444-14451445- // Verify oldest items were deleted and newest items remain
14461446- let records: Vec<(i64, String, i64)> = sqlx::query_as(
14471447- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
14481448- )
14491449- .fetch_all(&pool)
14501450- .await
14511451- .expect("Failed to query database after shedding");
14521452-14531453- // Some of the oldest items should be gone (but not necessarily all the first ones)
14541454- // With batch deletion to 80%, we keep recent items
14551455- let last_item: HandleResolutionWork = serde_json::from_str(&records[records.len() - 1].1)
14561456- .expect("Failed to deserialize last work");
14571457- // Should have the most recent items
14581458- assert!(last_item.handle.starts_with("test-01"), "Should have recent items");
14591459-14601460- // Verify FIFO order is maintained for remaining items
14611461- let mut prev_id = 0;
14621462- for record in &records {
14631463- let id: i64 = record.0;
14641464- assert!(id > prev_id, "IDs should be in ascending order");
14651465- prev_id = id;
14661466- }
14671467- }
14681468-14691469- #[tokio::test]
14701470- async fn test_sqlite_queue_adapter_work_shedding_disabled() {
14711471- // Create in-memory SQLite database for testing
14721472- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
14731473- .await
14741474- .expect("Failed to connect to in-memory SQLite");
14751475-14761476- // Create the queue schema
14771477- crate::sqlite_schema::create_schema(&pool)
14781478- .await
14791479- .expect("Failed to create schema");
14801480-14811481- // Create adapter with max_size = 0 (disabled work shedding)
14821482- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
14831483- pool.clone(),
14841484- 0
14851485- ));
14861486-14871487- // Push many items (should not trigger any shedding)
14881488- let mut expected_handles = Vec::new();
14891489- for i in 0..100 {
14901490- let handle = format!("test-{:03}", i);
14911491- expected_handles.push(handle.clone());
14921492- let work = HandleResolutionWork { handle };
14931493- adapter.push(work).await.expect("Push should succeed");
14941494- }
14951495-14961496- // Verify all items are present (no shedding occurred)
14971497- assert_eq!(adapter.depth().await, Some(100));
14981498-14991499- // Verify all items are in database
15001500- let records: Vec<(i64, String, i64)> = sqlx::query_as(
15011501- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
15021502- )
15031503- .fetch_all(&pool)
15041504- .await
15051505- .expect("Failed to query database");
15061506-15071507- assert_eq!(records.len(), 100);
15081508-15091509- // Verify all items are present in correct order
15101510- for (i, expected_handle) in expected_handles.iter().enumerate() {
15111511- let stored_work: HandleResolutionWork = serde_json::from_str(&records[i].1)
15121512- .expect("Failed to deserialize stored work");
15131513- assert_eq!(stored_work.handle, *expected_handle);
15141514- }
15151515- }
15161516-15171517- #[tokio::test]
15181518- async fn test_sqlite_queue_adapter_performance_optimizations() {
15191519- // Create in-memory SQLite database for testing
15201520- let pool = sqlx::SqlitePool::connect("sqlite::memory:")
15211521- .await
15221522- .expect("Failed to connect to in-memory SQLite");
15231523-15241524- // Create the queue schema
15251525- crate::sqlite_schema::create_schema(&pool)
15261526- .await
15271527- .expect("Failed to create schema");
15281528-15291529- // Create adapter with reasonable max_size
15301530- let max_size = 100;
15311531- let adapter = Arc::new(SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
15321532- pool.clone(),
15331533- max_size
15341534- ));
15351535-15361536- // Test 1: Verify inserts don't trigger checks when well under limit
15371537- // Push 50 items (50% of max_size) - should not trigger any cleanup checks
15381538- for i in 0..50 {
15391539- let work = HandleResolutionWork {
15401540- handle: format!("handle-{:04}", i),
15411541- };
15421542- adapter.push(work).await.expect("Push should succeed");
15431543- }
15441544- assert_eq!(adapter.depth().await, Some(50));
15451545-15461546- // Test 2: Verify batch deletion efficiency
15471547- // Push to 110% to trigger batch cleanup
15481548- for i in 50..111 {
15491549- let work = HandleResolutionWork {
15501550- handle: format!("handle-{:04}", i),
15511551- };
15521552- adapter.push(work).await.expect("Push should succeed");
15531553- // Add tiny delay to ensure different timestamps for proper ordering
15541554- if i % 10 == 0 {
15551555- tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
15561556- }
15571557- }
15581558-15591559- // Should have deleted down to ~80% of max_size
15601560- let depth_after_batch = adapter.depth().await.unwrap();
15611561- assert!(
15621562- (79..=81).contains(&depth_after_batch),
15631563- "After batch deletion, size should be ~80 (got {})",
15641564- depth_after_batch
15651565- );
15661566-15671567- // Test 3: Verify cleanup doesn't happen again immediately
15681568- // Push a few more items - should not trigger another cleanup
15691569- for i in 111..115 {
15701570- let work = HandleResolutionWork {
15711571- handle: format!("handle-{:04}", i),
15721572- };
15731573- adapter.push(work).await.expect("Push should succeed");
15741574- }
15751575-15761576- let depth_no_cleanup = adapter.depth().await.unwrap();
15771577- assert!(
15781578- depth_no_cleanup > 80 && depth_no_cleanup < 90,
15791579- "Should not have triggered cleanup yet (got {})",
15801580- depth_no_cleanup
15811581- );
15821582-15831583- // Test 4: Verify timestamp-based deletion is working correctly
15841584- // The oldest items should be gone after batch deletion
15851585- let records: Vec<(i64, String, i64)> = sqlx::query_as(
15861586- "SELECT id, work, queued_at FROM handle_resolution_queue
15871587- ORDER BY queued_at ASC LIMIT 5"
15881588- )
15891589- .fetch_all(&pool)
15901590- .await
15911591- .expect("Failed to query database");
15921592-15931593- // Verify we have recent items (not necessarily the oldest)
15941594- let oldest_work: HandleResolutionWork = serde_json::from_str(&records[0].1)
15951595- .expect("Failed to deserialize work");
15961596- let oldest_num: i32 = oldest_work.handle
15971597- .trim_start_matches("handle-")
15981598- .parse()
15991599- .unwrap_or(0);
16001600-16011601- // After batch deletion to 80%, we should have deleted approximately the first 31 items
16021602- // But allow some variance due to timing
16031603- assert!(
16041604- oldest_num >= 20,
16051605- "Should have deleted old items, oldest is now: {}",
16061606- oldest_work.handle
16071607- );
16081608-16091609- // Test 5: Verify FIFO order is maintained after batch operations
16101610- let mut prev_timestamp = 0i64;
16111611- let all_records: Vec<(i64, String, i64)> = sqlx::query_as(
16121612- "SELECT id, work, queued_at FROM handle_resolution_queue ORDER BY queued_at ASC"
16131613- )
16141614- .fetch_all(&pool)
16151615- .await
16161616- .expect("Failed to query database");
16171617-16181618- for record in &all_records {
16191619- assert!(
16201620- record.2 >= prev_timestamp,
16211621- "Timestamps should be in ascending order"
16221622- );
16231623- prev_timestamp = record.2;
16241624- }
16251625- }
16261626-}