···7070async-graphql = { version = "7.0", features = ["dynamic-schema", "dataloader"] }
7171async-graphql-axum = "7.0"
7272lazy_static = "1.5"
7373+async-stream = "0.3"
+67-2
api/src/graphql/handler.rs
···11//! GraphQL HTTP handler for Axum
2233use async_graphql::dynamic::Schema;
44+use async_graphql::http::{WebSocket as GraphQLWebSocket, WebSocketProtocols, WsMessage};
45use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
56use axum::{
66- extract::{Query, State},
77+ extract::{ws::{WebSocket, Message}, Query, State, WebSocketUpgrade},
78 http::{HeaderMap, StatusCode},
88- response::Html,
99+ response::{Html, Response},
910};
1111+use futures_util::{StreamExt, SinkExt};
1012use serde::Deserialize;
1113use std::sync::Arc;
1214use tokio::sync::RwLock;
···167169 );
168170169171 Ok(Html(graphiql_html))
172172+}
173173+174174+/// GraphQL WebSocket handler for subscriptions
175175+/// Accepts slice URI from query parameter (?slice=...)
176176+pub async fn graphql_subscription_handler(
177177+ State(state): State<AppState>,
178178+ Query(params): Query<GraphQLParams>,
179179+ ws: WebSocketUpgrade,
180180+) -> Result<Response, (StatusCode, String)> {
181181+ let slice_uri = params.slice.ok_or_else(|| {
182182+ (
183183+ StatusCode::BAD_REQUEST,
184184+ "Missing slice parameter. Provide ?slice=... query parameter".to_string(),
185185+ )
186186+ })?;
187187+188188+ let schema = match get_or_build_schema(&state, &slice_uri).await {
189189+ Ok(s) => s,
190190+ Err(e) => {
191191+ tracing::error!("Failed to get GraphQL schema: {:?}", e);
192192+ return Err((
193193+ StatusCode::INTERNAL_SERVER_ERROR,
194194+ format!("Schema error: {:?}", e),
195195+ ));
196196+ }
197197+ };
198198+199199+ // Upgrade to WebSocket and handle GraphQL subscriptions manually
200200+ Ok(ws
201201+ .protocols(["graphql-transport-ws", "graphql-ws"])
202202+ .on_upgrade(move |socket| handle_graphql_ws(socket, schema)))
203203+}
204204+205205+/// Handle GraphQL WebSocket connection
206206+async fn handle_graphql_ws(socket: WebSocket, schema: Schema) {
207207+ let (ws_sender, ws_receiver) = socket.split();
208208+209209+ // Convert axum WebSocket messages to strings for async-graphql
210210+ let input = ws_receiver.filter_map(|msg| {
211211+ futures_util::future::ready(match msg {
212212+ Ok(Message::Text(text)) => Some(text.to_string()),
213213+ _ => None, // Ignore other message types
214214+ })
215215+ });
216216+217217+ // Create GraphQL WebSocket handler
218218+ let mut stream = GraphQLWebSocket::new(schema, input, WebSocketProtocols::GraphQLWS);
219219+220220+ // Send GraphQL messages back through WebSocket
221221+ let mut ws_sender = ws_sender;
222222+ while let Some(msg) = stream.next().await {
223223+ let axum_msg = match msg {
224224+ WsMessage::Text(text) => Message::Text(text.into()),
225225+ WsMessage::Close(code, reason) => Message::Close(Some(axum::extract::ws::CloseFrame {
226226+ code,
227227+ reason: reason.into(),
228228+ })),
229229+ };
230230+231231+ if ws_sender.send(axum_msg).await.is_err() {
232232+ break;
233233+ }
234234+ }
170235}
171236172237/// Gets schema from cache or builds it if not cached
+3-1
api/src/graphql/mod.rs
···77mod dataloaders;
88mod types;
99pub mod handler;
1010+pub mod pubsub;
10111112pub use schema_builder::build_graphql_schema;
1212-pub use handler::{graphql_handler, graphql_playground};
1313+pub use handler::{graphql_handler, graphql_playground, graphql_subscription_handler};
1414+pub use pubsub::{RecordUpdateEvent, RecordOperation, PUBSUB};
+246
api/src/graphql/pubsub.rs
···11+//! PubSub infrastructure for broadcasting GraphQL subscription events
22+//!
33+//! This module provides a publish-subscribe mechanism for broadcasting record
44+//! updates from the Jetstream consumer to active GraphQL subscriptions.
55+66+use serde::{Deserialize, Serialize};
77+use std::collections::HashMap;
88+use std::sync::Arc;
99+use tokio::sync::{broadcast, RwLock};
1010+use tracing::{debug, info};
1111+1212+/// Event broadcast when a record is created or updated
1313+#[derive(Clone, Debug, Serialize, Deserialize)]
1414+pub struct RecordUpdateEvent {
1515+ pub uri: String,
1616+ pub cid: String,
1717+ pub did: String,
1818+ pub collection: String,
1919+ pub value: serde_json::Value,
2020+ pub slice_uri: String,
2121+ pub indexed_at: String,
2222+ pub operation: RecordOperation,
2323+}
2424+2525+/// Type of record operation
2626+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
2727+pub enum RecordOperation {
2828+ Create,
2929+ Update,
3030+ Delete,
3131+}
3232+3333+3434+/// PubSub manager for broadcasting events to subscribers
3535+///
3636+/// Each slice has its own broadcast channel to avoid cross-slice event leaking.
3737+/// Channels are created lazily when the first subscriber for a slice connects.
3838+pub struct GraphQLPubSub {
3939+ /// Map of slice_uri -> broadcast sender
4040+ /// Using broadcast channel allows multiple subscribers per slice
4141+ channels: Arc<RwLock<HashMap<String, broadcast::Sender<RecordUpdateEvent>>>>,
4242+ /// Channel capacity (number of events to buffer)
4343+ capacity: usize,
4444+}
4545+4646+impl GraphQLPubSub {
4747+ /// Create a new PubSub manager
4848+ ///
4949+ /// # Arguments
5050+ /// * `capacity` - Number of events to buffer per slice (default: 1000)
5151+ pub fn new(capacity: usize) -> Self {
5252+ info!("Initializing GraphQL PubSub with capacity {}", capacity);
5353+ Self {
5454+ channels: Arc::new(RwLock::new(HashMap::new())),
5555+ capacity,
5656+ }
5757+ }
5858+5959+ /// Publish an event to all subscribers of a slice
6060+ ///
6161+ /// If no subscribers exist, the event is dropped silently.
6262+ pub async fn publish(&self, event: RecordUpdateEvent) {
6363+ let slice_uri = event.slice_uri.clone();
6464+6565+ let channels = self.channels.read().await;
6666+ if let Some(sender) = channels.get(&slice_uri) {
6767+ // Try to send, ignore if no active receivers
6868+ match sender.send(event.clone()) {
6969+ Ok(receiver_count) => {
7070+ debug!(
7171+ "Published {} event for {} to {} subscriber(s)",
7272+ match event.operation {
7373+ RecordOperation::Create => "CREATE",
7474+ RecordOperation::Update => "UPDATE",
7575+ RecordOperation::Delete => "DELETE",
7676+ },
7777+ event.collection,
7878+ receiver_count
7979+ );
8080+ }
8181+ Err(_) => {
8282+ // No receivers, which is fine
8383+ debug!("No active subscribers for slice {}", slice_uri);
8484+ }
8585+ }
8686+ }
8787+ }
8888+8989+ /// Subscribe to events for a specific slice
9090+ ///
9191+ /// Returns a receiver that will receive all future events for the slice.
9292+ /// Creates a new broadcast channel if one doesn't exist yet.
9393+ pub async fn subscribe(&self, slice_uri: &str) -> broadcast::Receiver<RecordUpdateEvent> {
9494+ let mut channels = self.channels.write().await;
9595+9696+ let sender = channels.entry(slice_uri.to_string()).or_insert_with(|| {
9797+ info!("Creating new broadcast channel for slice: {}", slice_uri);
9898+ let (tx, _) = broadcast::channel(self.capacity);
9999+ tx
100100+ });
101101+102102+ sender.subscribe()
103103+ }
104104+105105+ /// Get statistics about active channels and subscribers
106106+ pub async fn stats(&self) -> PubSubStats {
107107+ let channels = self.channels.read().await;
108108+ PubSubStats {
109109+ active_channels: channels.len(),
110110+ total_subscribers: channels.values().map(|s| s.receiver_count()).sum(),
111111+ }
112112+ }
113113+114114+ /// Clean up channels with no subscribers
115115+ ///
116116+ /// Should be called periodically to prevent memory leaks
117117+ pub async fn cleanup_empty_channels(&self) {
118118+ let mut channels = self.channels.write().await;
119119+ let before_count = channels.len();
120120+121121+ channels.retain(|slice_uri, sender| {
122122+ let has_subscribers = sender.receiver_count() > 0;
123123+ if !has_subscribers {
124124+ debug!("Removing empty broadcast channel for slice: {}", slice_uri);
125125+ }
126126+ has_subscribers
127127+ });
128128+129129+ let removed = before_count - channels.len();
130130+ if removed > 0 {
131131+ info!("Cleaned up {} empty broadcast channel(s)", removed);
132132+ }
133133+ }
134134+}
135135+136136+/// Statistics about the PubSub system
137137+#[derive(Debug, Clone)]
138138+pub struct PubSubStats {
139139+ pub active_channels: usize,
140140+ pub total_subscribers: usize,
141141+}
142142+143143+impl Default for GraphQLPubSub {
144144+ fn default() -> Self {
145145+ Self::new(1000)
146146+ }
147147+}
148148+149149+// Global PubSub instance
150150+// This is initialized once at application startup and shared across
151151+// the Jetstream consumer and GraphQL subscription handlers.
152152+lazy_static::lazy_static! {
153153+ pub static ref PUBSUB: GraphQLPubSub = GraphQLPubSub::default();
154154+}
155155+156156+/// Start periodic cleanup task for empty channels
157157+pub fn start_cleanup_task() {
158158+ tokio::spawn(async {
159159+ let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(300)); // Every 5 minutes
160160+ loop {
161161+ interval.tick().await;
162162+ PUBSUB.cleanup_empty_channels().await;
163163+164164+ let stats = PUBSUB.stats().await;
165165+ info!(
166166+ "PubSub stats: {} active channels, {} total subscribers",
167167+ stats.active_channels, stats.total_subscribers
168168+ );
169169+ }
170170+ });
171171+}
172172+173173+#[cfg(test)]
174174+mod tests {
175175+ use super::*;
176176+177177+ #[tokio::test]
178178+ async fn test_pubsub_broadcast() {
179179+ let pubsub = GraphQLPubSub::new(100);
180180+181181+ // Subscribe to events
182182+ let mut rx = pubsub.subscribe("test://slice").await;
183183+184184+ // Publish an event
185185+ let event = RecordUpdateEvent {
186186+ uri: "at://did:plc:test/app.test/123".to_string(),
187187+ cid: "bafytest".to_string(),
188188+ did: "did:plc:test".to_string(),
189189+ collection: "app.test".to_string(),
190190+ value: serde_json::json!({"text": "Hello"}),
191191+ slice_uri: "test://slice".to_string(),
192192+ indexed_at: "2024-01-01T00:00:00Z".to_string(),
193193+ operation: RecordOperation::Create,
194194+ };
195195+196196+ pubsub.publish(event.clone()).await;
197197+198198+ // Receive the event
199199+ let received = rx.recv().await.unwrap();
200200+ assert_eq!(received.uri, event.uri);
201201+ assert_eq!(received.collection, event.collection);
202202+ }
203203+204204+ #[tokio::test]
205205+ async fn test_multiple_subscribers() {
206206+ let pubsub = GraphQLPubSub::new(100);
207207+208208+ let mut rx1 = pubsub.subscribe("test://slice").await;
209209+ let mut rx2 = pubsub.subscribe("test://slice").await;
210210+211211+ let event = RecordUpdateEvent {
212212+ uri: "at://did:plc:test/app.test/123".to_string(),
213213+ cid: "bafytest".to_string(),
214214+ did: "did:plc:test".to_string(),
215215+ collection: "app.test".to_string(),
216216+ value: serde_json::json!({"text": "Hello"}),
217217+ slice_uri: "test://slice".to_string(),
218218+ indexed_at: "2024-01-01T00:00:00Z".to_string(),
219219+ operation: RecordOperation::Create,
220220+ };
221221+222222+ pubsub.publish(event.clone()).await;
223223+224224+ // Both subscribers should receive the event
225225+ let received1 = rx1.recv().await.unwrap();
226226+ let received2 = rx2.recv().await.unwrap();
227227+228228+ assert_eq!(received1.uri, event.uri);
229229+ assert_eq!(received2.uri, event.uri);
230230+ }
231231+232232+ #[tokio::test]
233233+ async fn test_cleanup_empty_channels() {
234234+ let pubsub = GraphQLPubSub::new(100);
235235+236236+ // Create a subscriber and drop it
237237+ {
238238+ let _rx = pubsub.subscribe("test://slice").await;
239239+ assert_eq!(pubsub.stats().await.active_channels, 1);
240240+ }
241241+242242+ // Cleanup should remove the empty channel
243243+ pubsub.cleanup_empty_channels().await;
244244+ assert_eq!(pubsub.stats().await.active_channels, 0);
245245+ }
246246+}
+272-3
api/src/graphql/schema_builder.rs
···33//! This module generates GraphQL schemas at runtime based on lexicon definitions
44//! stored in the database, enabling flexible querying of slice records.
5566-use async_graphql::dynamic::{Field, FieldFuture, FieldValue, Object, Schema, Scalar, TypeRef, InputObject, InputValue, Enum, EnumItem};
66+use async_graphql::dynamic::{Field, FieldFuture, FieldValue, Object, Schema, Scalar, TypeRef, InputObject, InputValue, Enum, EnumItem, Subscription, SubscriptionField, SubscriptionFieldFuture};
77use async_graphql::{Error, Value as GraphQLValue};
88use base64::engine::general_purpose;
99use base64::Engine;
···14141515use crate::database::Database;
1616use crate::graphql::types::{extract_collection_fields, extract_record_key, GraphQLField, GraphQLType};
1717+use crate::graphql::PUBSUB;
17181819/// Metadata about a collection for cross-referencing
1920#[derive(Clone)]
···567568 // Build Mutation type
568569 let mutation = create_mutation_type(database.clone(), slice_uri.clone());
569570571571+ // Build Subscription type with collection-specific subscriptions
572572+ let subscription = create_subscription_type(slice_uri.clone(), &lexicons);
573573+570574 // Build and return the schema
571571- let mut schema_builder = Schema::build(query.type_name(), Some(mutation.type_name()), None)
575575+ let mut schema_builder = Schema::build(query.type_name(), Some(mutation.type_name()), Some(subscription.type_name()))
572576 .register(query)
573573- .register(mutation);
577577+ .register(mutation)
578578+ .register(subscription);
574579575580 // Register JSON scalar type for complex fields
576581 let json_scalar = Scalar::new("JSON");
···606611 // Register PageInfo type
607612 let page_info_type = create_page_info_type();
608613 schema_builder = schema_builder.register(page_info_type);
614614+615615+ // Register RecordUpdate type for subscriptions
616616+ let record_update_type = create_record_update_type();
617617+ schema_builder = schema_builder.register(record_update_type);
609618610619 // Register all object types
611620 for obj in objects_to_register {
···17361745 }
17371746 }
17381747}
17481748+17491749+/// Creates the RecordUpdate type for subscription events
17501750+fn create_record_update_type() -> Object {
17511751+ let mut record_update = Object::new("RecordUpdate");
17521752+17531753+ record_update = record_update.field(Field::new("uri", TypeRef::named_nn(TypeRef::STRING), |ctx| {
17541754+ FieldFuture::new(async move {
17551755+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
17561756+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
17571757+ if let GraphQLValue::Object(obj) = value {
17581758+ if let Some(uri) = obj.get("uri") {
17591759+ return Ok(Some(uri.clone()));
17601760+ }
17611761+ }
17621762+ Ok(None)
17631763+ })
17641764+ }));
17651765+17661766+ record_update = record_update.field(Field::new("cid", TypeRef::named_nn(TypeRef::STRING), |ctx| {
17671767+ FieldFuture::new(async move {
17681768+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
17691769+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
17701770+ if let GraphQLValue::Object(obj) = value {
17711771+ if let Some(cid) = obj.get("cid") {
17721772+ return Ok(Some(cid.clone()));
17731773+ }
17741774+ }
17751775+ Ok(None)
17761776+ })
17771777+ }));
17781778+17791779+ record_update = record_update.field(Field::new("did", TypeRef::named_nn(TypeRef::STRING), |ctx| {
17801780+ FieldFuture::new(async move {
17811781+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
17821782+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
17831783+ if let GraphQLValue::Object(obj) = value {
17841784+ if let Some(did) = obj.get("did") {
17851785+ return Ok(Some(did.clone()));
17861786+ }
17871787+ }
17881788+ Ok(None)
17891789+ })
17901790+ }));
17911791+17921792+ record_update = record_update.field(Field::new("collection", TypeRef::named_nn(TypeRef::STRING), |ctx| {
17931793+ FieldFuture::new(async move {
17941794+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
17951795+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
17961796+ if let GraphQLValue::Object(obj) = value {
17971797+ if let Some(collection) = obj.get("collection") {
17981798+ return Ok(Some(collection.clone()));
17991799+ }
18001800+ }
18011801+ Ok(None)
18021802+ })
18031803+ }));
18041804+18051805+ record_update = record_update.field(Field::new("indexedAt", TypeRef::named_nn(TypeRef::STRING), |ctx| {
18061806+ FieldFuture::new(async move {
18071807+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
18081808+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
18091809+ if let GraphQLValue::Object(obj) = value {
18101810+ if let Some(indexed_at) = obj.get("indexedAt") {
18111811+ return Ok(Some(indexed_at.clone()));
18121812+ }
18131813+ }
18141814+ Ok(None)
18151815+ })
18161816+ }));
18171817+18181818+ record_update = record_update.field(Field::new("operation", TypeRef::named_nn(TypeRef::STRING), |ctx| {
18191819+ FieldFuture::new(async move {
18201820+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
18211821+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
18221822+ if let GraphQLValue::Object(obj) = value {
18231823+ if let Some(operation) = obj.get("operation") {
18241824+ return Ok(Some(operation.clone()));
18251825+ }
18261826+ }
18271827+ Ok(None)
18281828+ })
18291829+ }));
18301830+18311831+ record_update = record_update.field(Field::new("value", TypeRef::named_nn("JSON"), |ctx| {
18321832+ FieldFuture::new(async move {
18331833+ let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
18341834+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
18351835+ if let GraphQLValue::Object(obj) = value {
18361836+ if let Some(val) = obj.get("value") {
18371837+ return Ok(Some(val.clone()));
18381838+ }
18391839+ }
18401840+ Ok(None)
18411841+ })
18421842+ }));
18431843+18441844+ record_update
18451845+}
18461846+18471847+/// Creates the Subscription root type with collection-specific subscriptions
18481848+fn create_subscription_type(slice_uri: String, lexicons: &[serde_json::Value]) -> Subscription {
18491849+ let mut subscription = Subscription::new("Subscription");
18501850+18511851+ // For each record collection, create {collection}Created, {collection}Updated, {collection}Deleted subscriptions
18521852+ for lexicon in lexicons {
18531853+ let nsid = match lexicon.get("id").and_then(|n| n.as_str()) {
18541854+ Some(n) => n,
18551855+ None => continue,
18561856+ };
18571857+18581858+ let defs = match lexicon.get("defs") {
18591859+ Some(d) => d,
18601860+ None => continue,
18611861+ };
18621862+18631863+ // Only process record types (skip queries, procedures, etc.)
18641864+ let is_record = defs
18651865+ .get("main")
18661866+ .and_then(|m| m.get("type"))
18671867+ .and_then(|t| t.as_str())
18681868+ == Some("record");
18691869+18701870+ if !is_record {
18711871+ continue;
18721872+ }
18731873+18741874+ let fields = extract_collection_fields(defs);
18751875+ if fields.is_empty() {
18761876+ continue;
18771877+ }
18781878+18791879+ let type_name = nsid_to_type_name(nsid);
18801880+ let field_base_name = nsid_to_join_field_name(nsid);
18811881+18821882+ // {collection}Created subscription
18831883+ let created_field_name = format!("{}Created", field_base_name);
18841884+ let slice_for_created = slice_uri.clone();
18851885+ let nsid_for_created = nsid.to_string();
18861886+ let type_name_for_created = type_name.clone();
18871887+18881888+ subscription = subscription.field(SubscriptionField::new(
18891889+ &created_field_name,
18901890+ TypeRef::named_nn(&type_name_for_created),
18911891+ move |_ctx| {
18921892+ let slice_uri = slice_for_created.clone();
18931893+ let collection = nsid_for_created.clone();
18941894+18951895+ SubscriptionFieldFuture::new(async move {
18961896+ let mut receiver = PUBSUB.subscribe(&slice_uri).await;
18971897+18981898+ let stream = async_stream::stream! {
18991899+ while let Ok(event) = receiver.recv().await {
19001900+ // Filter by collection and operation
19011901+ if event.collection != collection || event.operation != crate::graphql::RecordOperation::Create {
19021902+ continue;
19031903+ }
19041904+19051905+ // Convert to RecordContainer and yield
19061906+ let indexed_record = crate::models::IndexedRecord {
19071907+ uri: event.uri,
19081908+ cid: event.cid,
19091909+ did: event.did,
19101910+ collection: event.collection,
19111911+ value: event.value,
19121912+ indexed_at: event.indexed_at,
19131913+ };
19141914+ let container = RecordContainer {
19151915+ record: indexed_record,
19161916+ };
19171917+ yield Ok(FieldValue::owned_any(container));
19181918+ }
19191919+ };
19201920+19211921+ Ok(stream)
19221922+ })
19231923+ },
19241924+ )
19251925+ .description(format!("Subscribe to {} record creation events", nsid)));
19261926+19271927+ // {collection}Updated subscription
19281928+ let updated_field_name = format!("{}Updated", field_base_name);
19291929+ let slice_for_updated = slice_uri.clone();
19301930+ let nsid_for_updated = nsid.to_string();
19311931+ let type_name_for_updated = type_name.clone();
19321932+19331933+ subscription = subscription.field(SubscriptionField::new(
19341934+ &updated_field_name,
19351935+ TypeRef::named_nn(&type_name_for_updated),
19361936+ move |_ctx| {
19371937+ let slice_uri = slice_for_updated.clone();
19381938+ let collection = nsid_for_updated.clone();
19391939+19401940+ SubscriptionFieldFuture::new(async move {
19411941+ let mut receiver = PUBSUB.subscribe(&slice_uri).await;
19421942+19431943+ let stream = async_stream::stream! {
19441944+ while let Ok(event) = receiver.recv().await {
19451945+ // Filter by collection and operation
19461946+ if event.collection != collection || event.operation != crate::graphql::RecordOperation::Update {
19471947+ continue;
19481948+ }
19491949+19501950+ // Convert to RecordContainer and yield
19511951+ let indexed_record = crate::models::IndexedRecord {
19521952+ uri: event.uri,
19531953+ cid: event.cid,
19541954+ did: event.did,
19551955+ collection: event.collection,
19561956+ value: event.value,
19571957+ indexed_at: event.indexed_at,
19581958+ };
19591959+ let container = RecordContainer {
19601960+ record: indexed_record,
19611961+ };
19621962+ yield Ok(FieldValue::owned_any(container));
19631963+ }
19641964+ };
19651965+19661966+ Ok(stream)
19671967+ })
19681968+ },
19691969+ )
19701970+ .description(format!("Subscribe to {} record update events", nsid)));
19711971+19721972+ // {collection}Deleted subscription - returns just the URI string
19731973+ let deleted_field_name = format!("{}Deleted", field_base_name);
19741974+ let slice_for_deleted = slice_uri.clone();
19751975+ let nsid_for_deleted = nsid.to_string();
19761976+19771977+ subscription = subscription.field(SubscriptionField::new(
19781978+ &deleted_field_name,
19791979+ TypeRef::named_nn(TypeRef::STRING),
19801980+ move |_ctx| {
19811981+ let slice_uri = slice_for_deleted.clone();
19821982+ let collection = nsid_for_deleted.clone();
19831983+19841984+ SubscriptionFieldFuture::new(async move {
19851985+ let mut receiver = PUBSUB.subscribe(&slice_uri).await;
19861986+19871987+ let stream = async_stream::stream! {
19881988+ while let Ok(event) = receiver.recv().await {
19891989+ // Filter by collection and operation
19901990+ if event.collection != collection || event.operation != crate::graphql::RecordOperation::Delete {
19911991+ continue;
19921992+ }
19931993+19941994+ // For deletes, just return the URI
19951995+ yield Ok(FieldValue::value(GraphQLValue::String(event.uri)));
19961996+ }
19971997+ };
19981998+19991999+ Ok(stream)
20002000+ })
20012001+ },
20022002+ )
20032003+ .description(format!("Subscribe to {} record deletion events. Returns the URI of deleted records.", nsid)));
20042004+ }
20052005+20062006+ subscription
20072007+}