···33 type Value = Vec<IndexedRecord>;
34 type Error = Arc<String>;
3536- async fn load(&self, keys: &[CollectionDidKey]) -> Result<HashMap<CollectionDidKey, Self::Value>, Self::Error> {
00037 // Group keys by slice_uri and collection for optimal batching
38 let mut grouped: HashMap<(String, String), Vec<String>> = HashMap::new();
39···59 where_clause.conditions.insert(
60 "collection".to_string(),
61 WhereCondition {
62- gt: None,
63- gte: None,
64- lt: None,
65- lte: None,
66 eq: Some(serde_json::Value::String(collection.clone())),
67 in_values: None,
68 contains: None,
0000069 },
70 );
71···73 where_clause.conditions.insert(
74 "did".to_string(),
75 WhereCondition {
76- gt: None,
77- gte: None,
78- lt: None,
79- lte: None,
80 eq: None,
81 in_values: Some(
82 dids.iter()
83 .map(|did| serde_json::Value::String(did.clone()))
84- .collect()
85 ),
86 contains: None,
0000087 },
88 );
8990 // Query database with no limit - load all records for batched filtering
91- match self.db.get_slice_collections_records(
92- &slice_uri,
93- None, // No limit - load all records for this DID
94- None, // cursor
95- None, // sort
96- Some(&where_clause),
97- ).await {
000098 Ok((records, _cursor)) => {
99 // Group results by DID
100 for record in records {
···121 }
122 }
123 Err(e) => {
124- tracing::error!("DataLoader batch query failed for {}/{}: {}", slice_uri, collection, e);
00000125 // Return empty results for failed queries rather than failing the entire batch
126 }
127 }
···161 type Value = Vec<IndexedRecord>;
162 type Error = Arc<String>;
163164- async fn load(&self, keys: &[CollectionUriKey]) -> Result<HashMap<CollectionUriKey, Self::Value>, Self::Error> {
000165 // Group keys by (slice_uri, collection, reference_field) for optimal batching
166 let mut grouped: HashMap<(String, String, String), Vec<String>> = HashMap::new();
167168 for key in keys {
169 grouped
170- .entry((key.slice_uri.clone(), key.collection.clone(), key.reference_field.clone()))
0000171 .or_insert_with(Vec::new)
172 .push(key.parent_uri.clone());
173 }
···187 where_clause.conditions.insert(
188 "collection".to_string(),
189 WhereCondition {
190- gt: None,
191- gte: None,
192- lt: None,
193- lte: None,
194 eq: Some(serde_json::Value::String(collection.clone())),
195 in_values: None,
196 contains: None,
00000197 },
198 );
199···202 where_clause.conditions.insert(
203 reference_field.clone(),
204 WhereCondition {
205- gt: None,
206- gte: None,
207- lt: None,
208- lte: None,
209 eq: None,
210 in_values: Some(
211- parent_uris.iter()
0212 .map(|uri| serde_json::Value::String(uri.clone()))
213- .collect()
214 ),
215 contains: None,
00000216 },
217 );
218219 // Query database with no limit - load all records for batched filtering
220- match self.db.get_slice_collections_records(
221- &slice_uri,
222- None, // No limit - load all records matching parent URIs
223- None, // cursor
224- None, // sort
225- Some(&where_clause),
226- ).await {
0000227 Ok((records, _cursor)) => {
228 // Group results by parent URI (extract from the reference field)
229 for record in records {
···263 }
264 }
265 Err(e) => {
266- tracing::error!("CollectionUriLoader batch query failed for {}/{}: {}", slice_uri, collection, e);
00000267 // Return empty results for failed queries rather than failing the entire batch
268 }
269 }
···291 Self {
292 collection_did_loader: Arc::new(AsyncGraphQLDataLoader::new(
293 CollectionDidLoader::new(db.clone()),
294- tokio::spawn
295 )),
296 collection_uri_loader: Arc::new(AsyncGraphQLDataLoader::new(
297 CollectionUriLoader::new(db),
298- tokio::spawn
299 )),
300 }
301 }
···33 type Value = Vec<IndexedRecord>;
34 type Error = Arc<String>;
3536+ async fn load(
37+ &self,
38+ keys: &[CollectionDidKey],
39+ ) -> Result<HashMap<CollectionDidKey, Self::Value>, Self::Error> {
40 // Group keys by slice_uri and collection for optimal batching
41 let mut grouped: HashMap<(String, String), Vec<String>> = HashMap::new();
42···62 where_clause.conditions.insert(
63 "collection".to_string(),
64 WhereCondition {
000065 eq: Some(serde_json::Value::String(collection.clone())),
66 in_values: None,
67 contains: None,
68+ fuzzy: None,
69+ gt: None,
70+ gte: None,
71+ lt: None,
72+ lte: None,
73 },
74 );
75···77 where_clause.conditions.insert(
78 "did".to_string(),
79 WhereCondition {
000080 eq: None,
81 in_values: Some(
82 dids.iter()
83 .map(|did| serde_json::Value::String(did.clone()))
84+ .collect(),
85 ),
86 contains: None,
87+ fuzzy: None,
88+ gt: None,
89+ gte: None,
90+ lt: None,
91+ lte: None,
92 },
93 );
9495 // Query database with no limit - load all records for batched filtering
96+ match self
97+ .db
98+ .get_slice_collections_records(
99+ &slice_uri,
100+ None, // No limit - load all records for this DID
101+ None, // cursor
102+ None, // sort
103+ Some(&where_clause),
104+ )
105+ .await
106+ {
107 Ok((records, _cursor)) => {
108 // Group results by DID
109 for record in records {
···130 }
131 }
132 Err(e) => {
133+ tracing::error!(
134+ "DataLoader batch query failed for {}/{}: {}",
135+ slice_uri,
136+ collection,
137+ e
138+ );
139 // Return empty results for failed queries rather than failing the entire batch
140 }
141 }
···175 type Value = Vec<IndexedRecord>;
176 type Error = Arc<String>;
177178+ async fn load(
179+ &self,
180+ keys: &[CollectionUriKey],
181+ ) -> Result<HashMap<CollectionUriKey, Self::Value>, Self::Error> {
182 // Group keys by (slice_uri, collection, reference_field) for optimal batching
183 let mut grouped: HashMap<(String, String, String), Vec<String>> = HashMap::new();
184185 for key in keys {
186 grouped
187+ .entry((
188+ key.slice_uri.clone(),
189+ key.collection.clone(),
190+ key.reference_field.clone(),
191+ ))
192 .or_insert_with(Vec::new)
193 .push(key.parent_uri.clone());
194 }
···208 where_clause.conditions.insert(
209 "collection".to_string(),
210 WhereCondition {
0000211 eq: Some(serde_json::Value::String(collection.clone())),
212 in_values: None,
213 contains: None,
214+ fuzzy: None,
215+ gt: None,
216+ gte: None,
217+ lt: None,
218+ lte: None,
219 },
220 );
221···224 where_clause.conditions.insert(
225 reference_field.clone(),
226 WhereCondition {
0000227 eq: None,
228 in_values: Some(
229+ parent_uris
230+ .iter()
231 .map(|uri| serde_json::Value::String(uri.clone()))
232+ .collect(),
233 ),
234 contains: None,
235+ fuzzy: None,
236+ gt: None,
237+ gte: None,
238+ lt: None,
239+ lte: None,
240 },
241 );
242243 // Query database with no limit - load all records for batched filtering
244+ match self
245+ .db
246+ .get_slice_collections_records(
247+ &slice_uri,
248+ None, // No limit - load all records matching parent URIs
249+ None, // cursor
250+ None, // sort
251+ Some(&where_clause),
252+ )
253+ .await
254+ {
255 Ok((records, _cursor)) => {
256 // Group results by parent URI (extract from the reference field)
257 for record in records {
···291 }
292 }
293 Err(e) => {
294+ tracing::error!(
295+ "CollectionUriLoader batch query failed for {}/{}: {}",
296+ slice_uri,
297+ collection,
298+ e
299+ );
300 // Return empty results for failed queries rather than failing the entire batch
301 }
302 }
···324 Self {
325 collection_did_loader: Arc::new(AsyncGraphQLDataLoader::new(
326 CollectionDidLoader::new(db.clone()),
327+ tokio::spawn,
328 )),
329 collection_uri_loader: Arc::new(AsyncGraphQLDataLoader::new(
330 CollectionUriLoader::new(db),
331+ tokio::spawn,
332 )),
333 }
334 }
+4-1
api/src/graphql/dataloaders.rs
···9 // Check if this is a strongRef
10 if let Some(type_val) = obj.get("$type") {
11 if type_val.as_str() == Some("com.atproto.repo.strongRef") {
12- return obj.get("uri").and_then(|u| u.as_str()).map(|s| s.to_string());
00013 }
14 }
15
···9 // Check if this is a strongRef
10 if let Some(type_val) = obj.get("$type") {
11 if type_val.as_str() == Some("com.atproto.repo.strongRef") {
12+ return obj
13+ .get("uri")
14+ .and_then(|u| u.as_str())
15+ .map(|s| s.to_string());
16 }
17 }
18
···4use async_graphql::http::{WebSocket as GraphQLWebSocket, WebSocketProtocols, WsMessage};
5use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
6use axum::{
7+ extract::{
8+ Query, State, WebSocketUpgrade,
9+ ws::{Message, WebSocket},
10+ },
11 http::{HeaderMap, StatusCode},
12 response::{Html, Response},
13};
14+use futures_util::{SinkExt, StreamExt};
15use serde::Deserialize;
16use std::sync::Arc;
17use tokio::sync::RwLock;
18019use crate::AppState;
20+use crate::errors::AppError;
21use crate::graphql::GraphQLContext;
2223/// Global schema cache (one schema per slice)
···61 Ok(s) => s,
62 Err(e) => {
63 tracing::error!("Failed to get GraphQL schema: {:?}", e);
64+ return Ok(async_graphql::Response::from_errors(vec![
65+ async_graphql::ServerError::new(format!("Schema error: {:?}", e), None),
66+ ])
067 .into());
68 }
69 };
···258}
259260/// Gets schema from cache or builds it if not cached
261+async fn get_or_build_schema(state: &AppState, slice_uri: &str) -> Result<Schema, AppError> {
000262 // Check cache first
263 {
264 let cache = SCHEMA_CACHE.read().await;
···268 }
269270 // Build schema
271+ let schema =
272+ crate::graphql::build_graphql_schema(state.database.clone(), slice_uri.to_string())
273+ .await
274+ .map_err(|e| AppError::Internal(format!("Failed to build GraphQL schema: {}", e)))?;
00275276 // Cache it
277 {
+6-6
api/src/graphql/mod.rs
···3//! This module provides a GraphQL interface to query slice records with support
4//! for joining linked records through AT Protocol strongRef references.
56-mod schema_builder;
7-mod dataloaders;
8mod dataloader;
9-mod types;
10pub mod handler;
11pub mod pubsub;
001213-pub use schema_builder::build_graphql_schema;
14-pub use handler::{graphql_handler, graphql_playground, graphql_subscription_handler};
15-pub use pubsub::{RecordUpdateEvent, RecordOperation, PUBSUB};
16pub use dataloader::GraphQLContext;
000
···3//! This module provides a GraphQL interface to query slice records with support
4//! for joining linked records through AT Protocol strongRef references.
5006mod dataloader;
7+mod dataloaders;
8pub mod handler;
9pub mod pubsub;
10+mod schema_builder;
11+mod types;
1200013pub use dataloader::GraphQLContext;
14+pub use handler::{graphql_handler, graphql_playground, graphql_subscription_handler};
15+pub use pubsub::{PUBSUB, RecordOperation, RecordUpdateEvent};
16+pub use schema_builder::build_graphql_schema;
+1-2
api/src/graphql/pubsub.rs
···6use serde::{Deserialize, Serialize};
7use std::collections::HashMap;
8use std::sync::Arc;
9-use tokio::sync::{broadcast, RwLock};
10use tracing::{debug, info};
1112/// Event broadcast when a record is created or updated
···29 Update,
30 Delete,
31}
32-3334/// PubSub manager for broadcasting events to subscribers
35///
···6use serde::{Deserialize, Serialize};
7use std::collections::HashMap;
8use std::sync::Arc;
9+use tokio::sync::{RwLock, broadcast};
10use tracing::{debug, info};
1112/// Event broadcast when a record is created or updated
···29 Update,
30 Delete,
31}
03233/// PubSub manager for broadcasting events to subscribers
34///
+725-479
api/src/graphql/schema_builder.rs
···3//! This module generates GraphQL schemas at runtime based on lexicon definitions
4//! stored in the database, enabling flexible querying of slice records.
56-use async_graphql::dynamic::{Field, FieldFuture, FieldValue, Object, Schema, Scalar, TypeRef, InputObject, InputValue, Enum, EnumItem, Subscription, SubscriptionField, SubscriptionFieldFuture};
0007use async_graphql::{Error, Value as GraphQLValue};
8-use base64::engine::general_purpose;
9use base64::Engine;
010use serde_json;
11use std::collections::HashMap;
12use std::sync::Arc;
13use tokio::sync::Mutex;
1415use crate::database::Database;
16-use crate::graphql::types::{extract_collection_fields, extract_record_key, GraphQLField, GraphQLType};
17use crate::graphql::PUBSUB;
18use crate::graphql::dataloader::GraphQLContext;
0001920/// Metadata about a collection for cross-referencing
21#[derive(Clone)]
22struct CollectionMeta {
23 nsid: String,
24- key_type: String, // "tid", "literal:self", or "any"
25- type_name: String, // GraphQL type name for this collection
26 at_uri_fields: Vec<String>, // Fields with format "at-uri" for reverse joins
27}
2829/// Builds a dynamic GraphQL schema from lexicons for a given slice
30-pub async fn build_graphql_schema(
31- database: Database,
32- slice_uri: String,
33-) -> Result<Schema, String> {
34 // Fetch all lexicons for this slice
35 let all_lexicons = database
36 .get_lexicons_by_slice(&slice_uri)
···73 if !fields.is_empty() {
74 if let Some(key_type) = extract_record_key(defs) {
75 // Extract at-uri field names for reverse joins
76- let at_uri_fields: Vec<String> = fields.iter()
077 .filter(|f| f.format.as_deref() == Some("at-uri"))
78 .map(|f| f.name.clone())
79 .collect();
8081 if !at_uri_fields.is_empty() {
82- tracing::debug!(
83- "Collection {} has at-uri fields: {:?}",
84- nsid,
85- at_uri_fields
86- );
87 }
8889 all_collections.push(CollectionMeta {
···115 if !fields.is_empty() {
116 // Create a GraphQL type for this collection
117 let type_name = nsid_to_type_name(nsid);
118- let record_type = create_record_type(&type_name, &fields, database.clone(), slice_uri.clone(), &all_collections);
000000119120 // Create edge and connection types for this collection (Relay standard)
121 let edge_type = create_edge_type(&type_name);
···140141 for (field_name, filter_type) in system_fields {
142 if !lexicon_field_names.contains(field_name) {
143- where_input = where_input.field(InputValue::new(field_name, TypeRef::named(filter_type)));
0144 }
145 }
146···150 GraphQLType::Int => "IntFilter",
151 _ => "StringFilter", // Default to StringFilter for strings and other types
152 };
153- where_input = where_input.field(InputValue::new(&field.name, TypeRef::named(filter_type)));
0154 }
155156 // Add nested and/or support
157 where_input = where_input
158- .field(InputValue::new("and", TypeRef::named_list(format!("{}WhereInput", type_name))))
159- .field(InputValue::new("or", TypeRef::named_list(format!("{}WhereInput", type_name))));
000000160161 // Create GroupByField enum for this collection
162 let mut group_by_enum = Enum::new(format!("{}GroupByField", type_name));
···168169 // Create collection-specific GroupByFieldInput
170 let group_by_input = InputObject::new(format!("{}GroupByFieldInput", type_name))
171- .field(InputValue::new("field", TypeRef::named_nn(format!("{}GroupByField", type_name))))
000172 .field(InputValue::new("interval", TypeRef::named("DateInterval")));
173174 // Create collection-specific SortFieldInput
175 let sort_field_input = InputObject::new(format!("{}SortFieldInput", type_name))
176- .field(InputValue::new("field", TypeRef::named_nn(format!("{}GroupByField", type_name))))
177- .field(InputValue::new("direction", TypeRef::named("SortDirection")));
000000178179 // Collect the types to register with schema later
180 objects_to_register.push(record_type);
···214 };
215216 // Parse sortBy argument
217- let sort_by: Option<Vec<crate::models::SortField>> = match ctx.args.get("sortBy") {
000218 Some(val) => {
219 if let Ok(list) = val.list() {
220 let mut sort_fields = Vec::new();
221 for item in list.iter() {
222 if let Ok(obj) = item.object() {
223- let field = obj.get("field")
224- .and_then(|v| v.enum_name().ok().map(|s| s.to_string()))
000225 .unwrap_or_else(|| "indexedAt".to_string());
226- let direction = obj.get("direction")
227- .and_then(|v| v.enum_name().ok().map(|s| s.to_string()))
000228 .unwrap_or_else(|| "desc".to_string());
229- sort_fields.push(crate::models::SortField { field, direction });
000230 }
231 }
232 Some(sort_fields)
233 } else {
234 None
235 }
236- },
237 None => None,
238 };
239···249 where_clause.conditions.insert(
250 "collection".to_string(),
251 crate::models::WhereCondition {
252- gt: None,
253- gte: None,
254- lt: None,
255- lte: None,
256 eq: Some(serde_json::Value::String(collection.clone())),
257 in_values: None,
258 contains: None,
0259 },
260 );
261···272 }
273274 // Resolve actorHandle to did if present
275- if let Some(actor_handle_condition) = where_clause.conditions.remove("actorHandle") {
00276 // Collect handles to resolve
277 let mut handles = Vec::new();
278 if let Some(eq_value) = &actor_handle_condition.eq {
···296 // Replace actorHandle condition with did condition
297 let did_condition = if dids.len() == 1 {
298 crate::models::WhereCondition {
299- gt: None,
300- gte: None,
301- lt: None,
302- lte: None,
303- eq: Some(serde_json::Value::String(dids[0].clone())),
00304 in_values: None,
305 contains: None,
0306 }
307 } else {
308 crate::models::WhereCondition {
309- gt: None,
310- gte: None,
311- lt: None,
312- lte: None,
313 eq: None,
314- in_values: Some(dids.into_iter().map(|d| serde_json::Value::String(d)).collect()),
000000315 contains: None,
0316 }
317 };
318- where_clause.conditions.insert("did".to_string(), did_condition);
00319 }
320 // If no DIDs found, the query will return 0 results naturally
321 }
···336 Some(&where_clause),
337 )
338 .await
339- .map_err(|e| {
340- Error::new(format!("Database query failed: {}", e))
341- })?;
342343 // Query database for total count
344 let total_count = db
345 .count_slice_collections_records(&slice, Some(&where_clause))
346 .await
347- .map_err(|e| {
348- Error::new(format!("Count query failed: {}", e))
349- })? as i32;
350351 // Convert records to RecordContainers
352 let record_containers: Vec<RecordContainer> = records
···521 eq: Some(serde_json::Value::String(collection.clone())),
522 in_values: None,
523 contains: None,
0524 },
525 );
526···565 eq: Some(serde_json::Value::String(dids[0].clone())),
566 in_values: None,
567 contains: None,
0568 }
569 } else {
570 crate::models::WhereCondition {
···575 eq: None,
576 in_values: Some(dids.into_iter().map(|d| serde_json::Value::String(d)).collect()),
577 contains: None,
0578 }
579 };
580 where_clause.conditions.insert("did".to_string(), did_condition);
···639 let subscription = create_subscription_type(slice_uri.clone(), &lexicons);
640641 // Build and return the schema with complexity limits
642- let mut schema_builder = Schema::build(query.type_name(), Some(mutation.type_name()), Some(subscription.type_name()))
643- .register(query)
644- .register(mutation)
645- .register(subscription)
646- .limit_depth(50) // Higher limit to support GraphiQL introspection with reverse joins
647- .limit_complexity(5000); // Prevent expensive deeply nested queries
0000648649 // Register JSON scalar type for complex fields
650 let json_scalar = Scalar::new("JSON");
···655 .field(InputValue::new("eq", TypeRef::named(TypeRef::STRING)))
656 .field(InputValue::new("in", TypeRef::named_list(TypeRef::STRING)))
657 .field(InputValue::new("contains", TypeRef::named(TypeRef::STRING)))
0658 .field(InputValue::new("gt", TypeRef::named(TypeRef::STRING)))
659 .field(InputValue::new("gte", TypeRef::named(TypeRef::STRING)))
660 .field(InputValue::new("lt", TypeRef::named(TypeRef::STRING)))
···746/// Container to hold blob data and DID for URL generation
747#[derive(Clone)]
748struct BlobContainer {
749- blob_ref: String, // CID reference
750- mime_type: String, // MIME type
751- size: i64, // Size in bytes
752- did: String, // DID for CDN URL generation
753}
754755/// Creates a GraphQL Object type for a record collection
···768769 // Add standard AT Protocol fields only if they don't conflict with lexicon fields
770 if !lexicon_field_names.contains("uri") {
771- object = object.field(Field::new("uri", TypeRef::named_nn(TypeRef::STRING), |ctx| {
772- FieldFuture::new(async move {
773- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
774- Ok(Some(GraphQLValue::from(container.record.uri.clone())))
775- })
776- }));
0000777 }
778779 if !lexicon_field_names.contains("cid") {
780- object = object.field(Field::new("cid", TypeRef::named_nn(TypeRef::STRING), |ctx| {
781- FieldFuture::new(async move {
782- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
783- Ok(Some(GraphQLValue::from(container.record.cid.clone())))
784- })
785- }));
0000786 }
787788 if !lexicon_field_names.contains("did") {
789- object = object.field(Field::new("did", TypeRef::named_nn(TypeRef::STRING), |ctx| {
790- FieldFuture::new(async move {
791- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
792- Ok(Some(GraphQLValue::from(container.record.did.clone())))
793- })
794- }));
0000795 }
796797 if !lexicon_field_names.contains("indexedAt") {
···817 "actorHandle",
818 TypeRef::named(TypeRef::STRING),
819 move |ctx| {
820- let db = db_for_actor.clone();
821- let slice = slice_for_actor.clone();
822- FieldFuture::new(async move {
823- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
824- let did = &container.record.did;
825826- // Build where clause to find actor by DID
827- let mut where_clause = crate::models::WhereClause {
828- conditions: std::collections::HashMap::new(),
829- or_conditions: None,
830- and: None,
831- or: None,
832- };
833- where_clause.conditions.insert(
834- "did".to_string(),
835- crate::models::WhereCondition {
836- gt: None,
837- gte: None,
838- lt: None,
839- lte: None,
840- eq: Some(serde_json::Value::String(did.clone())),
841- in_values: None,
842- contains: None,
843- },
844- );
0845846- match db.get_slice_actors(&slice, Some(1), None, Some(&where_clause)).await {
847- Ok((actors, _cursor)) => {
848- if let Some(actor) = actors.first() {
849- if let Some(handle) = &actor.handle {
850- Ok(Some(GraphQLValue::from(handle.clone())))
851- } else {
852- Ok(None)
853- }
854 } else {
855 Ok(None)
856 }
857- }
858- Err(e) => {
859- tracing::debug!("Actor not found for {}: {}", did, e);
860 Ok(None)
861 }
862 }
863- })
864- },
865- ));
00000866867 // Add fields from lexicon
868 for field in fields {
···910 .unwrap_or("image/jpeg")
911 .to_string();
912913- let size = obj
914- .get("size")
915- .and_then(|s| s.as_i64())
916- .unwrap_or(0);
917918 let blob_container = BlobContainer {
919 blob_ref,
···952 .unwrap_or("image/jpeg")
953 .to_string();
954955- let size = obj
956- .get("size")
957- .and_then(|s| s.as_i64())
958- .unwrap_or(0);
959960 let blob_container = BlobContainer {
961 blob_ref,
···980 match db.get_record(&uri).await {
981 Ok(Some(linked_record)) => {
982 // Convert the linked record to a JSON value
983- let record_json = serde_json::to_value(linked_record)
984- .map_err(|e| {
985 Error::new(format!("Serialization error: {}", e))
986 })?;
987···10211022 // Collect all string fields with format "at-uri" that might reference this collection
1023 // We'll check each one at runtime to see if it contains a URI to this collection
1024- let uri_ref_fields: Vec<_> = fields.iter()
01025 .filter(|f| matches!(f.format.as_deref(), Some("at-uri")))
1026 .collect();
1027···10311032 // If we found at-uri fields, create a resolver that checks each one at runtime
1033 if !uri_ref_fields.is_empty() {
1034- let ref_field_names: Vec<String> = uri_ref_fields.iter().map(|f| f.name.clone()).collect();
01035 let db_for_uri_join = database.clone();
1036 let target_collection = collection_nsid.clone();
1037···1055 match db.get_record(uri).await {
1056 Ok(Some(record)) => {
1057 let new_container = RecordContainer { record };
1058- return Ok(Some(FieldValue::owned_any(new_container)));
001059 }
1060 Ok(None) => continue, // Try next field
1061- Err(_) => continue, // Try next field
1062 }
1063 }
1064 }
···1083 let db = db_for_join.clone();
1084 let nsid = collection_nsid.clone();
1085 FieldFuture::new(async move {
1086- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
01087 let uri = format!("at://{}/{}/self", container.record.did, nsid);
10881089 match db.get_record(&uri).await {
1090 Ok(Some(record)) => {
1091- let new_container = RecordContainer {
1092- record,
1093- };
1094 Ok(Some(FieldValue::owned_any(new_container)))
1095 }
1096 Ok(None) => Ok(None),
···1179 eq: Some(serde_json::Value::String(nsid.clone())),
1180 in_values: None,
1181 contains: None,
01182 },
1183 );
1184 where_clause.conditions.insert(
···1191 eq: Some(serde_json::Value::String(did.clone())),
1192 in_values: None,
1193 contains: None,
01194 },
1195 );
1196···1354 let collection_for_count = collection.nsid.clone();
1355 let at_uri_fields_for_count = collection.at_uri_fields.clone();
13561357- object = object.field(
1358- Field::new(
1359- &count_field_name,
1360- TypeRef::named_nn(TypeRef::INT),
1361- move |ctx| {
1362- let slice = slice_for_count.clone();
1363- let nsid = collection_for_count.clone();
1364- let db = db_for_count.clone();
1365- let ref_fields = at_uri_fields_for_count.clone();
1366- FieldFuture::new(async move {
1367- let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
1368- let parent_uri = &container.record.uri;
13691370- // Build where clause to count records referencing this URI
1371- for ref_field in &ref_fields {
1372- let mut where_clause = crate::models::WhereClause {
1373- conditions: HashMap::new(),
1374- or_conditions: None,
1375- and: None,
1376- or: None,
1377- };
13781379- where_clause.conditions.insert(
1380- "collection".to_string(),
1381- crate::models::WhereCondition {
1382- gt: None,
1383- gte: None,
1384- lt: None,
1385- lte: None,
1386- eq: Some(serde_json::Value::String(nsid.clone())),
1387- in_values: None,
1388- contains: None,
1389- },
1390- );
013911392- where_clause.conditions.insert(
1393- ref_field.clone(),
1394- crate::models::WhereCondition {
1395- gt: None,
1396- gte: None,
1397- lt: None,
1398- lte: None,
1399- eq: Some(serde_json::Value::String(parent_uri.clone())),
1400- in_values: None,
1401- contains: None,
1402- },
1403- );
014041405- match db.count_slice_collections_records(&slice, Some(&where_clause)).await {
1406- Ok(count) if count > 0 => {
1407- return Ok(Some(FieldValue::value(count as i32)));
1408- }
1409- Ok(_) => continue,
1410- Err(e) => {
1411- tracing::debug!("Count error for {}: {}", nsid, e);
1412- continue;
1413- }
001414 }
1415 }
014161417- // No matching field found, return 0
1418- Ok(Some(FieldValue::value(0)))
1419- })
1420- },
1421- )
1422- );
1423 }
14241425 object
···1504 // For arrays of primitives, use typed arrays
1505 // For arrays of complex types, use JSON scalar
1506 match inner.as_ref() {
1507- GraphQLType::String | GraphQLType::Int | GraphQLType::Boolean | GraphQLType::Float => {
0001508 let inner_ref = match inner.as_ref() {
1509 GraphQLType::String => TypeRef::STRING,
1510 GraphQLType::Int => TypeRef::INT,
···1545 let mut blob = Object::new("Blob");
15461547 // ref field - CID reference
1548- blob = blob.field(Field::new("ref", TypeRef::named_nn(TypeRef::STRING), |ctx| {
1549- FieldFuture::new(async move {
1550- let container = ctx.parent_value.try_downcast_ref::<BlobContainer>()?;
1551- Ok(Some(GraphQLValue::from(container.blob_ref.clone())))
1552- })
1553- }));
000015541555 // mimeType field
1556- blob = blob.field(Field::new("mimeType", TypeRef::named_nn(TypeRef::STRING), |ctx| {
1557- FieldFuture::new(async move {
1558- let container = ctx.parent_value.try_downcast_ref::<BlobContainer>()?;
1559- Ok(Some(GraphQLValue::from(container.mime_type.clone())))
1560- })
1561- }));
000015621563 // size field
1564 blob = blob.field(Field::new("size", TypeRef::named_nn(TypeRef::INT), |ctx| {
···1607fn create_sync_result_type() -> Object {
1608 let mut sync_result = Object::new("SyncResult");
16091610- sync_result = sync_result.field(Field::new("success", TypeRef::named_nn(TypeRef::BOOLEAN), |ctx| {
1611- FieldFuture::new(async move {
1612- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1613- .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1614- if let GraphQLValue::Object(obj) = value {
1615- if let Some(success) = obj.get("success") {
1616- return Ok(Some(success.clone()));
0000001617 }
1618- }
1619- Ok(None)
1620- })
1621- }));
16221623- sync_result = sync_result.field(Field::new("reposProcessed", TypeRef::named_nn(TypeRef::INT), |ctx| {
1624- FieldFuture::new(async move {
1625- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1626- .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1627- if let GraphQLValue::Object(obj) = value {
1628- if let Some(repos) = obj.get("reposProcessed") {
1629- return Ok(Some(repos.clone()));
0000001630 }
1631- }
1632- Ok(None)
1633- })
1634- }));
16351636- sync_result = sync_result.field(Field::new("recordsSynced", TypeRef::named_nn(TypeRef::INT), |ctx| {
1637- FieldFuture::new(async move {
1638- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1639- .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1640- if let GraphQLValue::Object(obj) = value {
1641- if let Some(records) = obj.get("recordsSynced") {
1642- return Ok(Some(records.clone()));
0000001643 }
1644- }
1645- Ok(None)
1646- })
1647- }));
16481649- sync_result = sync_result.field(Field::new("timedOut", TypeRef::named_nn(TypeRef::BOOLEAN), |ctx| {
1650- FieldFuture::new(async move {
1651- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1652- .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1653- if let GraphQLValue::Object(obj) = value {
1654- if let Some(timed_out) = obj.get("timedOut") {
1655- return Ok(Some(timed_out.clone()));
0000001656 }
1657- }
1658- Ok(None)
1659- })
1660- }));
16611662- sync_result = sync_result.field(Field::new("message", TypeRef::named_nn(TypeRef::STRING), |ctx| {
1663- FieldFuture::new(async move {
1664- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1665- .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1666- if let GraphQLValue::Object(obj) = value {
1667- if let Some(message) = obj.get("message") {
1668- return Ok(Some(message.clone()));
0000001669 }
1670- }
1671- Ok(None)
1672- })
1673- }));
16741675 sync_result
1676}
···1698 .field(InputValue::new("eq", TypeRef::named(TypeRef::STRING)))
1699 .field(InputValue::new("in", TypeRef::named_list(TypeRef::STRING)))
1700 .field(InputValue::new("contains", TypeRef::named(TypeRef::STRING)))
01701}
17021703/// Creates the IntCondition input type for int field filtering
···1711fn create_page_info_type() -> Object {
1712 let mut page_info = Object::new("PageInfo");
17131714- page_info = page_info.field(Field::new("hasNextPage", TypeRef::named_nn(TypeRef::BOOLEAN), |ctx| {
1715- FieldFuture::new(async move {
1716- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1717- .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1718- if let GraphQLValue::Object(obj) = value {
1719- if let Some(has_next) = obj.get("hasNextPage") {
1720- return Ok(Some(has_next.clone()));
0000001721 }
1722- }
1723- Ok(Some(GraphQLValue::from(false)))
1724- })
1725- }));
17261727- page_info = page_info.field(Field::new("hasPreviousPage", TypeRef::named_nn(TypeRef::BOOLEAN), |ctx| {
1728- FieldFuture::new(async move {
1729- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1730- .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1731- if let GraphQLValue::Object(obj) = value {
1732- if let Some(has_prev) = obj.get("hasPreviousPage") {
1733- return Ok(Some(has_prev.clone()));
0000001734 }
1735- }
1736- Ok(Some(GraphQLValue::from(false)))
1737- })
1738- }));
17391740- page_info = page_info.field(Field::new("startCursor", TypeRef::named(TypeRef::STRING), |ctx| {
1741- FieldFuture::new(async move {
1742- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1743- .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1744- if let GraphQLValue::Object(obj) = value {
1745- if let Some(cursor) = obj.get("startCursor") {
1746- return Ok(Some(cursor.clone()));
0000001747 }
1748- }
1749- Ok(None)
1750- })
1751- }));
17521753- page_info = page_info.field(Field::new("endCursor", TypeRef::named(TypeRef::STRING), |ctx| {
1754- FieldFuture::new(async move {
1755- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
1756- .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1757- if let GraphQLValue::Object(obj) = value {
1758- if let Some(cursor) = obj.get("endCursor") {
1759- return Ok(Some(cursor.clone()));
0000001760 }
1761- }
1762- Ok(None)
1763- })
1764- }));
17651766 page_info
1767}
···1798 }));
17991800 // Add cursor field
1801- edge = edge.field(Field::new("cursor", TypeRef::named_nn(TypeRef::STRING), |ctx| {
1802- FieldFuture::new(async move {
1803- let edge_data = ctx.parent_value.try_downcast_ref::<EdgeData>()?;
1804- Ok(Some(GraphQLValue::from(edge_data.cursor.clone())))
1805- })
1806- }));
000018071808 edge
1809}
···1815 let mut connection = Object::new(&connection_name);
18161817 // Add totalCount field
1818- connection = connection.field(Field::new("totalCount", TypeRef::named_nn(TypeRef::INT), |ctx| {
1819- FieldFuture::new(async move {
1820- let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
1821- Ok(Some(GraphQLValue::from(data.total_count)))
1822- })
1823- }));
000018241825 // Add pageInfo field
1826- connection = connection.field(Field::new("pageInfo", TypeRef::named_nn("PageInfo"), |ctx| {
1827- FieldFuture::new(async move {
1828- let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
00018291830- let mut page_info = async_graphql::indexmap::IndexMap::new();
1831- page_info.insert(
1832- async_graphql::Name::new("hasNextPage"),
1833- GraphQLValue::from(data.has_next_page)
1834- );
1835- // For forward pagination only, hasPreviousPage is always false
1836- page_info.insert(
1837- async_graphql::Name::new("hasPreviousPage"),
1838- GraphQLValue::from(false)
1839- );
18401841- // Add startCursor (first node's cid if available)
1842- if !data.nodes.is_empty() {
1843- if let Some(first_record) = data.nodes.first() {
1844- let start_cursor = general_purpose::URL_SAFE_NO_PAD.encode(first_record.record.cid.clone());
00000000001845 page_info.insert(
1846- async_graphql::Name::new("startCursor"),
1847- GraphQLValue::from(start_cursor)
1848 );
1849 }
1850- }
18511852- // Add endCursor
1853- if let Some(ref cursor) = data.end_cursor {
1854- page_info.insert(
1855- async_graphql::Name::new("endCursor"),
1856- GraphQLValue::from(cursor.clone())
1857- );
1858- }
1859-1860- Ok(Some(FieldValue::owned_any(GraphQLValue::Object(page_info))))
1861- })
1862- }));
18631864 // Add edges field (Relay standard)
1865 let edge_type = format!("{}Edge", record_type_name);
1866- connection = connection.field(Field::new("edges", TypeRef::named_nn_list_nn(&edge_type), |ctx| {
1867- FieldFuture::new(async move {
1868- let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
00018691870- let field_values: Vec<FieldValue<'_>> = data.nodes.iter()
1871- .map(|node| {
1872- // Use base64-encoded CID as cursor
1873- let cursor = general_purpose::URL_SAFE_NO_PAD.encode(node.record.cid.clone());
1874- let edge = EdgeData {
1875- node: node.clone(),
1876- cursor,
1877- };
1878- FieldValue::owned_any(edge)
1879- })
1880- .collect();
00018811882- Ok(Some(FieldValue::list(field_values)))
1883- })
1884- }));
018851886 // Add nodes field (convenience, direct access to records without edges wrapper)
1887- connection = connection.field(Field::new("nodes", TypeRef::named_nn_list_nn(record_type_name), |ctx| {
1888- FieldFuture::new(async move {
1889- let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
00018901891- let field_values: Vec<FieldValue<'_>> = data.nodes.iter()
1892- .map(|node| FieldValue::owned_any(node.clone()))
1893- .collect();
0018941895- Ok(Some(FieldValue::list(field_values)))
1896- })
1897- }));
018981899 connection
1900}
···1916 let slice = slice_clone.clone();
19171918 FieldFuture::new(async move {
1919- let did = ctx.args.get("did")
001920 .and_then(|v| v.string().ok())
1921 .ok_or_else(|| Error::new("did argument is required"))?;
19221923 // Create sync service and call sync_user_collections
1924 let cache_backend = crate::cache::CacheFactory::create_cache(
1925- crate::cache::CacheBackend::InMemory { ttl_seconds: None }
1926- ).await.map_err(|e| Error::new(format!("Failed to create cache: {}", e)))?;
001927 let cache = Arc::new(Mutex::new(crate::cache::SliceCache::new(cache_backend)));
1928 let sync_service = crate::sync::SyncService::with_cache(
1929 db.clone(),
···19391940 // Convert result to GraphQL object
1941 let mut obj = async_graphql::indexmap::IndexMap::new();
1942- obj.insert(async_graphql::Name::new("success"), GraphQLValue::from(result.success));
1943- obj.insert(async_graphql::Name::new("reposProcessed"), GraphQLValue::from(result.repos_processed));
1944- obj.insert(async_graphql::Name::new("recordsSynced"), GraphQLValue::from(result.records_synced));
1945- obj.insert(async_graphql::Name::new("timedOut"), GraphQLValue::from(result.timed_out));
1946- obj.insert(async_graphql::Name::new("message"), GraphQLValue::from(result.message));
00000000000000019471948 Ok(Some(FieldValue::owned_any(GraphQLValue::Object(obj))))
1949 })
···1953 "did",
1954 TypeRef::named_nn(TypeRef::STRING),
1955 ))
1956- .description("Sync user collections for a given DID")
1957 );
19581959 mutation
···1982 let camel_case = nsid_to_join_field_name(nsid);
19831984 // Then pluralize the end
1985- if camel_case.ends_with("s") || camel_case.ends_with("x") || camel_case.ends_with("ch") || camel_case.ends_with("sh") {
00001986 format!("{}es", camel_case) // status -> statuses, box -> boxes
1987 } else if camel_case.ends_with("y") && camel_case.len() > 1 {
1988 let chars: Vec<char> = camel_case.chars().collect();
···2027 for field in fields {
2028 let field_name = field.name.clone();
2029 let field_name_clone = field_name.clone();
2030- aggregated = aggregated.field(Field::new(&field_name, TypeRef::named("JSON"), move |ctx| {
2031- let field_name = field_name_clone.clone();
000000000000000000000002032 FieldFuture::new(async move {
2033 let json_value = ctx.parent_value.try_downcast_ref::<serde_json::Value>()?;
2034 if let Some(obj) = json_value.as_object() {
2035- if let Some(value) = obj.get(&field_name) {
2036- // Convert serde_json::Value to async_graphql::Value
2037- let graphql_value = serde_json_to_graphql_value(value);
2038- return Ok(Some(graphql_value));
2039 }
2040 }
2041- Ok(None)
2042 })
2043- }));
2044- }
2045-2046- // Add count field
2047- aggregated = aggregated.field(Field::new("count", TypeRef::named_nn(TypeRef::INT), |ctx| {
2048- FieldFuture::new(async move {
2049- let json_value = ctx.parent_value.try_downcast_ref::<serde_json::Value>()?;
2050- if let Some(obj) = json_value.as_object() {
2051- if let Some(count) = obj.get("count") {
2052- if let Some(count_i64) = count.as_i64() {
2053- return Ok(Some(GraphQLValue::from(count_i64 as i32)));
2054- }
2055- }
2056- }
2057- Ok(Some(GraphQLValue::from(0)))
2058- })
2059- }));
20602061 aggregated
2062}
···2113fn create_record_update_type() -> Object {
2114 let mut record_update = Object::new("RecordUpdate");
21152116- record_update = record_update.field(Field::new("uri", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2117- FieldFuture::new(async move {
2118- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2119- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2120- if let GraphQLValue::Object(obj) = value {
2121- if let Some(uri) = obj.get("uri") {
2122- return Ok(Some(uri.clone()));
0000002123 }
2124- }
2125- Ok(None)
2126- })
2127- }));
21282129- record_update = record_update.field(Field::new("cid", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2130- FieldFuture::new(async move {
2131- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2132- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2133- if let GraphQLValue::Object(obj) = value {
2134- if let Some(cid) = obj.get("cid") {
2135- return Ok(Some(cid.clone()));
0000002136 }
2137- }
2138- Ok(None)
2139- })
2140- }));
21412142- record_update = record_update.field(Field::new("did", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2143- FieldFuture::new(async move {
2144- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2145- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2146- if let GraphQLValue::Object(obj) = value {
2147- if let Some(did) = obj.get("did") {
2148- return Ok(Some(did.clone()));
0000002149 }
2150- }
2151- Ok(None)
2152- })
2153- }));
21542155- record_update = record_update.field(Field::new("collection", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2156- FieldFuture::new(async move {
2157- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2158- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2159- if let GraphQLValue::Object(obj) = value {
2160- if let Some(collection) = obj.get("collection") {
2161- return Ok(Some(collection.clone()));
0000002162 }
2163- }
2164- Ok(None)
2165- })
2166- }));
21672168- record_update = record_update.field(Field::new("indexedAt", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2169- FieldFuture::new(async move {
2170- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2171- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2172- if let GraphQLValue::Object(obj) = value {
2173- if let Some(indexed_at) = obj.get("indexedAt") {
2174- return Ok(Some(indexed_at.clone()));
0000002175 }
2176- }
2177- Ok(None)
2178- })
2179- }));
21802181- record_update = record_update.field(Field::new("operation", TypeRef::named_nn(TypeRef::STRING), |ctx| {
2182- FieldFuture::new(async move {
2183- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
2184- .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2185- if let GraphQLValue::Object(obj) = value {
2186- if let Some(operation) = obj.get("operation") {
2187- return Ok(Some(operation.clone()));
0000002188 }
2189- }
2190- Ok(None)
2191- })
2192- }));
21932194 record_update = record_update.field(Field::new("value", TypeRef::named_nn("JSON"), |ctx| {
2195 FieldFuture::new(async move {
2196- let value = ctx.parent_value.downcast_ref::<GraphQLValue>()
002197 .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2198 if let GraphQLValue::Object(obj) = value {
2199 if let Some(val) = obj.get("value") {
···2370}
23712372/// Helper function to parse GraphQL where clause recursively
2373-fn parse_where_clause(where_obj: async_graphql::dynamic::ObjectAccessor) -> crate::models::WhereClause {
002374 let mut where_clause = crate::models::WhereClause {
2375 conditions: HashMap::new(),
2376 or_conditions: None,
···2419 eq: None,
2420 in_values: None,
2421 contains: None,
02422 gt: None,
2423 gte: None,
2424 lt: None,
···2456 }
2457 }
245800000002459 // Parse gt condition
2460 if let Some(gt_val) = condition_obj.get("gt") {
2461 if let Ok(gt_str) = gt_val.string() {
···2499 field_str.to_string()
2500 };
25012502- where_clause.conditions.insert(db_field_name, where_condition);
002503 }
2504 }
2505
···3//! This module generates GraphQL schemas at runtime based on lexicon definitions
4//! stored in the database, enabling flexible querying of slice records.
56+use async_graphql::dynamic::{
7+ Enum, EnumItem, Field, FieldFuture, FieldValue, InputObject, InputValue, Object, Scalar,
8+ Schema, Subscription, SubscriptionField, SubscriptionFieldFuture, TypeRef,
9+};
10use async_graphql::{Error, Value as GraphQLValue};
011use base64::Engine;
12+use base64::engine::general_purpose;
13use serde_json;
14use std::collections::HashMap;
15use std::sync::Arc;
16use tokio::sync::Mutex;
1718use crate::database::Database;
019use crate::graphql::PUBSUB;
20use crate::graphql::dataloader::GraphQLContext;
21+use crate::graphql::types::{
22+ GraphQLField, GraphQLType, extract_collection_fields, extract_record_key,
23+};
2425/// Metadata about a collection for cross-referencing
26#[derive(Clone)]
27struct CollectionMeta {
28 nsid: String,
29+ key_type: String, // "tid", "literal:self", or "any"
30+ type_name: String, // GraphQL type name for this collection
31 at_uri_fields: Vec<String>, // Fields with format "at-uri" for reverse joins
32}
3334/// Builds a dynamic GraphQL schema from lexicons for a given slice
35+pub async fn build_graphql_schema(database: Database, slice_uri: String) -> Result<Schema, String> {
00036 // Fetch all lexicons for this slice
37 let all_lexicons = database
38 .get_lexicons_by_slice(&slice_uri)
···75 if !fields.is_empty() {
76 if let Some(key_type) = extract_record_key(defs) {
77 // Extract at-uri field names for reverse joins
78+ let at_uri_fields: Vec<String> = fields
79+ .iter()
80 .filter(|f| f.format.as_deref() == Some("at-uri"))
81 .map(|f| f.name.clone())
82 .collect();
8384 if !at_uri_fields.is_empty() {
85+ tracing::debug!("Collection {} has at-uri fields: {:?}", nsid, at_uri_fields);
000086 }
8788 all_collections.push(CollectionMeta {
···114 if !fields.is_empty() {
115 // Create a GraphQL type for this collection
116 let type_name = nsid_to_type_name(nsid);
117+ let record_type = create_record_type(
118+ &type_name,
119+ &fields,
120+ database.clone(),
121+ slice_uri.clone(),
122+ &all_collections,
123+ );
124125 // Create edge and connection types for this collection (Relay standard)
126 let edge_type = create_edge_type(&type_name);
···145146 for (field_name, filter_type) in system_fields {
147 if !lexicon_field_names.contains(field_name) {
148+ where_input =
149+ where_input.field(InputValue::new(field_name, TypeRef::named(filter_type)));
150 }
151 }
152···156 GraphQLType::Int => "IntFilter",
157 _ => "StringFilter", // Default to StringFilter for strings and other types
158 };
159+ where_input =
160+ where_input.field(InputValue::new(&field.name, TypeRef::named(filter_type)));
161 }
162163 // Add nested and/or support
164 where_input = where_input
165+ .field(InputValue::new(
166+ "and",
167+ TypeRef::named_list(format!("{}WhereInput", type_name)),
168+ ))
169+ .field(InputValue::new(
170+ "or",
171+ TypeRef::named_list(format!("{}WhereInput", type_name)),
172+ ));
173174 // Create GroupByField enum for this collection
175 let mut group_by_enum = Enum::new(format!("{}GroupByField", type_name));
···181182 // Create collection-specific GroupByFieldInput
183 let group_by_input = InputObject::new(format!("{}GroupByFieldInput", type_name))
184+ .field(InputValue::new(
185+ "field",
186+ TypeRef::named_nn(format!("{}GroupByField", type_name)),
187+ ))
188 .field(InputValue::new("interval", TypeRef::named("DateInterval")));
189190 // Create collection-specific SortFieldInput
191 let sort_field_input = InputObject::new(format!("{}SortFieldInput", type_name))
192+ .field(InputValue::new(
193+ "field",
194+ TypeRef::named_nn(format!("{}GroupByField", type_name)),
195+ ))
196+ .field(InputValue::new(
197+ "direction",
198+ TypeRef::named("SortDirection"),
199+ ));
200201 // Collect the types to register with schema later
202 objects_to_register.push(record_type);
···236 };
237238 // Parse sortBy argument
239+ let sort_by: Option<Vec<crate::models::SortField>> = match ctx
240+ .args
241+ .get("sortBy")
242+ {
243 Some(val) => {
244 if let Ok(list) = val.list() {
245 let mut sort_fields = Vec::new();
246 for item in list.iter() {
247 if let Ok(obj) = item.object() {
248+ let field = obj
249+ .get("field")
250+ .and_then(|v| {
251+ v.enum_name().ok().map(|s| s.to_string())
252+ })
253 .unwrap_or_else(|| "indexedAt".to_string());
254+ let direction = obj
255+ .get("direction")
256+ .and_then(|v| {
257+ v.enum_name().ok().map(|s| s.to_string())
258+ })
259 .unwrap_or_else(|| "desc".to_string());
260+ sort_fields.push(crate::models::SortField {
261+ field,
262+ direction,
263+ });
264 }
265 }
266 Some(sort_fields)
267 } else {
268 None
269 }
270+ }
271 None => None,
272 };
273···283 where_clause.conditions.insert(
284 "collection".to_string(),
285 crate::models::WhereCondition {
286+ gt: None,
287+ gte: None,
288+ lt: None,
289+ lte: None,
290 eq: Some(serde_json::Value::String(collection.clone())),
291 in_values: None,
292 contains: None,
293+ fuzzy: None,
294 },
295 );
296···307 }
308309 // Resolve actorHandle to did if present
310+ if let Some(actor_handle_condition) =
311+ where_clause.conditions.remove("actorHandle")
312+ {
313 // Collect handles to resolve
314 let mut handles = Vec::new();
315 if let Some(eq_value) = &actor_handle_condition.eq {
···333 // Replace actorHandle condition with did condition
334 let did_condition = if dids.len() == 1 {
335 crate::models::WhereCondition {
336+ gt: None,
337+ gte: None,
338+ lt: None,
339+ lte: None,
340+ eq: Some(serde_json::Value::String(
341+ dids[0].clone(),
342+ )),
343 in_values: None,
344 contains: None,
345+ fuzzy: None,
346 }
347 } else {
348 crate::models::WhereCondition {
349+ gt: None,
350+ gte: None,
351+ lt: None,
352+ lte: None,
353 eq: None,
354+ in_values: Some(
355+ dids.into_iter()
356+ .map(|d| {
357+ serde_json::Value::String(d)
358+ })
359+ .collect(),
360+ ),
361 contains: None,
362+ fuzzy: None,
363 }
364 };
365+ where_clause
366+ .conditions
367+ .insert("did".to_string(), did_condition);
368 }
369 // If no DIDs found, the query will return 0 results naturally
370 }
···385 Some(&where_clause),
386 )
387 .await
388+ .map_err(|e| Error::new(format!("Database query failed: {}", e)))?;
00389390 // Query database for total count
391 let total_count = db
392 .count_slice_collections_records(&slice, Some(&where_clause))
393 .await
394+ .map_err(|e| Error::new(format!("Count query failed: {}", e)))?
395+ as i32;
0396397 // Convert records to RecordContainers
398 let record_containers: Vec<RecordContainer> = records
···567 eq: Some(serde_json::Value::String(collection.clone())),
568 in_values: None,
569 contains: None,
570+ fuzzy: None,
571 },
572 );
573···612 eq: Some(serde_json::Value::String(dids[0].clone())),
613 in_values: None,
614 contains: None,
615+ fuzzy: None,
616 }
617 } else {
618 crate::models::WhereCondition {
···623 eq: None,
624 in_values: Some(dids.into_iter().map(|d| serde_json::Value::String(d)).collect()),
625 contains: None,
626+ fuzzy: None,
627 }
628 };
629 where_clause.conditions.insert("did".to_string(), did_condition);
···688 let subscription = create_subscription_type(slice_uri.clone(), &lexicons);
689690 // Build and return the schema with complexity limits
691+ let mut schema_builder = Schema::build(
692+ query.type_name(),
693+ Some(mutation.type_name()),
694+ Some(subscription.type_name()),
695+ )
696+ .register(query)
697+ .register(mutation)
698+ .register(subscription)
699+ .limit_depth(50) // Higher limit to support GraphiQL introspection with reverse joins
700+ .limit_complexity(5000); // Prevent expensive deeply nested queries
701702 // Register JSON scalar type for complex fields
703 let json_scalar = Scalar::new("JSON");
···708 .field(InputValue::new("eq", TypeRef::named(TypeRef::STRING)))
709 .field(InputValue::new("in", TypeRef::named_list(TypeRef::STRING)))
710 .field(InputValue::new("contains", TypeRef::named(TypeRef::STRING)))
711+ .field(InputValue::new("fuzzy", TypeRef::named(TypeRef::STRING)))
712 .field(InputValue::new("gt", TypeRef::named(TypeRef::STRING)))
713 .field(InputValue::new("gte", TypeRef::named(TypeRef::STRING)))
714 .field(InputValue::new("lt", TypeRef::named(TypeRef::STRING)))
···800/// Container to hold blob data and DID for URL generation
801#[derive(Clone)]
802struct BlobContainer {
803+ blob_ref: String, // CID reference
804+ mime_type: String, // MIME type
805+ size: i64, // Size in bytes
806+ did: String, // DID for CDN URL generation
807}
808809/// Creates a GraphQL Object type for a record collection
···822823 // Add standard AT Protocol fields only if they don't conflict with lexicon fields
824 if !lexicon_field_names.contains("uri") {
825+ object = object.field(Field::new(
826+ "uri",
827+ TypeRef::named_nn(TypeRef::STRING),
828+ |ctx| {
829+ FieldFuture::new(async move {
830+ let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
831+ Ok(Some(GraphQLValue::from(container.record.uri.clone())))
832+ })
833+ },
834+ ));
835 }
836837 if !lexicon_field_names.contains("cid") {
838+ object = object.field(Field::new(
839+ "cid",
840+ TypeRef::named_nn(TypeRef::STRING),
841+ |ctx| {
842+ FieldFuture::new(async move {
843+ let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
844+ Ok(Some(GraphQLValue::from(container.record.cid.clone())))
845+ })
846+ },
847+ ));
848 }
849850 if !lexicon_field_names.contains("did") {
851+ object = object.field(Field::new(
852+ "did",
853+ TypeRef::named_nn(TypeRef::STRING),
854+ |ctx| {
855+ FieldFuture::new(async move {
856+ let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
857+ Ok(Some(GraphQLValue::from(container.record.did.clone())))
858+ })
859+ },
860+ ));
861 }
862863 if !lexicon_field_names.contains("indexedAt") {
···883 "actorHandle",
884 TypeRef::named(TypeRef::STRING),
885 move |ctx| {
886+ let db = db_for_actor.clone();
887+ let slice = slice_for_actor.clone();
888+ FieldFuture::new(async move {
889+ let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
890+ let did = &container.record.did;
891892+ // Build where clause to find actor by DID
893+ let mut where_clause = crate::models::WhereClause {
894+ conditions: std::collections::HashMap::new(),
895+ or_conditions: None,
896+ and: None,
897+ or: None,
898+ };
899+ where_clause.conditions.insert(
900+ "did".to_string(),
901+ crate::models::WhereCondition {
902+ gt: None,
903+ gte: None,
904+ lt: None,
905+ lte: None,
906+ eq: Some(serde_json::Value::String(did.clone())),
907+ in_values: None,
908+ contains: None,
909+ fuzzy: None,
910+ },
911+ );
912913+ match db
914+ .get_slice_actors(&slice, Some(1), None, Some(&where_clause))
915+ .await
916+ {
917+ Ok((actors, _cursor)) => {
918+ if let Some(actor) = actors.first() {
919+ if let Some(handle) = &actor.handle {
920+ Ok(Some(GraphQLValue::from(handle.clone())))
921 } else {
922 Ok(None)
923 }
924+ } else {
00925 Ok(None)
926 }
927 }
928+ Err(e) => {
929+ tracing::debug!("Actor not found for {}: {}", did, e);
930+ Ok(None)
931+ }
932+ }
933+ })
934+ },
935+ ));
936937 // Add fields from lexicon
938 for field in fields {
···980 .unwrap_or("image/jpeg")
981 .to_string();
982983+ let size =
984+ obj.get("size").and_then(|s| s.as_i64()).unwrap_or(0);
00985986 let blob_container = BlobContainer {
987 blob_ref,
···1020 .unwrap_or("image/jpeg")
1021 .to_string();
10221023+ let size = obj.get("size").and_then(|s| s.as_i64()).unwrap_or(0);
00010241025 let blob_container = BlobContainer {
1026 blob_ref,
···1045 match db.get_record(&uri).await {
1046 Ok(Some(linked_record)) => {
1047 // Convert the linked record to a JSON value
1048+ let record_json =
1049+ serde_json::to_value(linked_record).map_err(|e| {
1050 Error::new(format!("Serialization error: {}", e))
1051 })?;
1052···10861087 // Collect all string fields with format "at-uri" that might reference this collection
1088 // We'll check each one at runtime to see if it contains a URI to this collection
1089+ let uri_ref_fields: Vec<_> = fields
1090+ .iter()
1091 .filter(|f| matches!(f.format.as_deref(), Some("at-uri")))
1092 .collect();
1093···10971098 // If we found at-uri fields, create a resolver that checks each one at runtime
1099 if !uri_ref_fields.is_empty() {
1100+ let ref_field_names: Vec<String> =
1101+ uri_ref_fields.iter().map(|f| f.name.clone()).collect();
1102 let db_for_uri_join = database.clone();
1103 let target_collection = collection_nsid.clone();
1104···1122 match db.get_record(uri).await {
1123 Ok(Some(record)) => {
1124 let new_container = RecordContainer { record };
1125+ return Ok(Some(FieldValue::owned_any(
1126+ new_container,
1127+ )));
1128 }
1129 Ok(None) => continue, // Try next field
1130+ Err(_) => continue, // Try next field
1131 }
1132 }
1133 }
···1152 let db = db_for_join.clone();
1153 let nsid = collection_nsid.clone();
1154 FieldFuture::new(async move {
1155+ let container =
1156+ ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
1157 let uri = format!("at://{}/{}/self", container.record.did, nsid);
11581159 match db.get_record(&uri).await {
1160 Ok(Some(record)) => {
1161+ let new_container = RecordContainer { record };
001162 Ok(Some(FieldValue::owned_any(new_container)))
1163 }
1164 Ok(None) => Ok(None),
···1247 eq: Some(serde_json::Value::String(nsid.clone())),
1248 in_values: None,
1249 contains: None,
1250+ fuzzy: None,
1251 },
1252 );
1253 where_clause.conditions.insert(
···1260 eq: Some(serde_json::Value::String(did.clone())),
1261 in_values: None,
1262 contains: None,
1263+ fuzzy: None,
1264 },
1265 );
1266···1424 let collection_for_count = collection.nsid.clone();
1425 let at_uri_fields_for_count = collection.at_uri_fields.clone();
14261427+ object = object.field(Field::new(
1428+ &count_field_name,
1429+ TypeRef::named_nn(TypeRef::INT),
1430+ move |ctx| {
1431+ let slice = slice_for_count.clone();
1432+ let nsid = collection_for_count.clone();
1433+ let db = db_for_count.clone();
1434+ let ref_fields = at_uri_fields_for_count.clone();
1435+ FieldFuture::new(async move {
1436+ let container = ctx.parent_value.try_downcast_ref::<RecordContainer>()?;
1437+ let parent_uri = &container.record.uri;
014381439+ // Build where clause to count records referencing this URI
1440+ for ref_field in &ref_fields {
1441+ let mut where_clause = crate::models::WhereClause {
1442+ conditions: HashMap::new(),
1443+ or_conditions: None,
1444+ and: None,
1445+ or: None,
1446+ };
14471448+ where_clause.conditions.insert(
1449+ "collection".to_string(),
1450+ crate::models::WhereCondition {
1451+ gt: None,
1452+ gte: None,
1453+ lt: None,
1454+ lte: None,
1455+ eq: Some(serde_json::Value::String(nsid.clone())),
1456+ in_values: None,
1457+ contains: None,
1458+ fuzzy: None,
1459+ },
1460+ );
14611462+ where_clause.conditions.insert(
1463+ ref_field.clone(),
1464+ crate::models::WhereCondition {
1465+ gt: None,
1466+ gte: None,
1467+ lt: None,
1468+ lte: None,
1469+ eq: Some(serde_json::Value::String(parent_uri.clone())),
1470+ in_values: None,
1471+ contains: None,
1472+ fuzzy: None,
1473+ },
1474+ );
14751476+ match db
1477+ .count_slice_collections_records(&slice, Some(&where_clause))
1478+ .await
1479+ {
1480+ Ok(count) if count > 0 => {
1481+ return Ok(Some(FieldValue::value(count as i32)));
1482+ }
1483+ Ok(_) => continue,
1484+ Err(e) => {
1485+ tracing::debug!("Count error for {}: {}", nsid, e);
1486+ continue;
1487 }
1488 }
1489+ }
14901491+ // No matching field found, return 0
1492+ Ok(Some(FieldValue::value(0)))
1493+ })
1494+ },
1495+ ));
01496 }
14971498 object
···1577 // For arrays of primitives, use typed arrays
1578 // For arrays of complex types, use JSON scalar
1579 match inner.as_ref() {
1580+ GraphQLType::String
1581+ | GraphQLType::Int
1582+ | GraphQLType::Boolean
1583+ | GraphQLType::Float => {
1584 let inner_ref = match inner.as_ref() {
1585 GraphQLType::String => TypeRef::STRING,
1586 GraphQLType::Int => TypeRef::INT,
···1621 let mut blob = Object::new("Blob");
16221623 // ref field - CID reference
1624+ blob = blob.field(Field::new(
1625+ "ref",
1626+ TypeRef::named_nn(TypeRef::STRING),
1627+ |ctx| {
1628+ FieldFuture::new(async move {
1629+ let container = ctx.parent_value.try_downcast_ref::<BlobContainer>()?;
1630+ Ok(Some(GraphQLValue::from(container.blob_ref.clone())))
1631+ })
1632+ },
1633+ ));
16341635 // mimeType field
1636+ blob = blob.field(Field::new(
1637+ "mimeType",
1638+ TypeRef::named_nn(TypeRef::STRING),
1639+ |ctx| {
1640+ FieldFuture::new(async move {
1641+ let container = ctx.parent_value.try_downcast_ref::<BlobContainer>()?;
1642+ Ok(Some(GraphQLValue::from(container.mime_type.clone())))
1643+ })
1644+ },
1645+ ));
16461647 // size field
1648 blob = blob.field(Field::new("size", TypeRef::named_nn(TypeRef::INT), |ctx| {
···1691fn create_sync_result_type() -> Object {
1692 let mut sync_result = Object::new("SyncResult");
16931694+ sync_result = sync_result.field(Field::new(
1695+ "success",
1696+ TypeRef::named_nn(TypeRef::BOOLEAN),
1697+ |ctx| {
1698+ FieldFuture::new(async move {
1699+ let value = ctx
1700+ .parent_value
1701+ .downcast_ref::<GraphQLValue>()
1702+ .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1703+ if let GraphQLValue::Object(obj) = value {
1704+ if let Some(success) = obj.get("success") {
1705+ return Ok(Some(success.clone()));
1706+ }
1707 }
1708+ Ok(None)
1709+ })
1710+ },
1711+ ));
17121713+ sync_result = sync_result.field(Field::new(
1714+ "reposProcessed",
1715+ TypeRef::named_nn(TypeRef::INT),
1716+ |ctx| {
1717+ FieldFuture::new(async move {
1718+ let value = ctx
1719+ .parent_value
1720+ .downcast_ref::<GraphQLValue>()
1721+ .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1722+ if let GraphQLValue::Object(obj) = value {
1723+ if let Some(repos) = obj.get("reposProcessed") {
1724+ return Ok(Some(repos.clone()));
1725+ }
1726 }
1727+ Ok(None)
1728+ })
1729+ },
1730+ ));
17311732+ sync_result = sync_result.field(Field::new(
1733+ "recordsSynced",
1734+ TypeRef::named_nn(TypeRef::INT),
1735+ |ctx| {
1736+ FieldFuture::new(async move {
1737+ let value = ctx
1738+ .parent_value
1739+ .downcast_ref::<GraphQLValue>()
1740+ .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1741+ if let GraphQLValue::Object(obj) = value {
1742+ if let Some(records) = obj.get("recordsSynced") {
1743+ return Ok(Some(records.clone()));
1744+ }
1745 }
1746+ Ok(None)
1747+ })
1748+ },
1749+ ));
17501751+ sync_result = sync_result.field(Field::new(
1752+ "timedOut",
1753+ TypeRef::named_nn(TypeRef::BOOLEAN),
1754+ |ctx| {
1755+ FieldFuture::new(async move {
1756+ let value = ctx
1757+ .parent_value
1758+ .downcast_ref::<GraphQLValue>()
1759+ .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1760+ if let GraphQLValue::Object(obj) = value {
1761+ if let Some(timed_out) = obj.get("timedOut") {
1762+ return Ok(Some(timed_out.clone()));
1763+ }
1764 }
1765+ Ok(None)
1766+ })
1767+ },
1768+ ));
17691770+ sync_result = sync_result.field(Field::new(
1771+ "message",
1772+ TypeRef::named_nn(TypeRef::STRING),
1773+ |ctx| {
1774+ FieldFuture::new(async move {
1775+ let value = ctx
1776+ .parent_value
1777+ .downcast_ref::<GraphQLValue>()
1778+ .ok_or_else(|| Error::new("Failed to downcast sync result"))?;
1779+ if let GraphQLValue::Object(obj) = value {
1780+ if let Some(message) = obj.get("message") {
1781+ return Ok(Some(message.clone()));
1782+ }
1783 }
1784+ Ok(None)
1785+ })
1786+ },
1787+ ));
17881789 sync_result
1790}
···1812 .field(InputValue::new("eq", TypeRef::named(TypeRef::STRING)))
1813 .field(InputValue::new("in", TypeRef::named_list(TypeRef::STRING)))
1814 .field(InputValue::new("contains", TypeRef::named(TypeRef::STRING)))
1815+ .field(InputValue::new("fuzzy", TypeRef::named(TypeRef::STRING)))
1816}
18171818/// Creates the IntCondition input type for int field filtering
···1826fn create_page_info_type() -> Object {
1827 let mut page_info = Object::new("PageInfo");
18281829+ page_info = page_info.field(Field::new(
1830+ "hasNextPage",
1831+ TypeRef::named_nn(TypeRef::BOOLEAN),
1832+ |ctx| {
1833+ FieldFuture::new(async move {
1834+ let value = ctx
1835+ .parent_value
1836+ .downcast_ref::<GraphQLValue>()
1837+ .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1838+ if let GraphQLValue::Object(obj) = value {
1839+ if let Some(has_next) = obj.get("hasNextPage") {
1840+ return Ok(Some(has_next.clone()));
1841+ }
1842 }
1843+ Ok(Some(GraphQLValue::from(false)))
1844+ })
1845+ },
1846+ ));
18471848+ page_info = page_info.field(Field::new(
1849+ "hasPreviousPage",
1850+ TypeRef::named_nn(TypeRef::BOOLEAN),
1851+ |ctx| {
1852+ FieldFuture::new(async move {
1853+ let value = ctx
1854+ .parent_value
1855+ .downcast_ref::<GraphQLValue>()
1856+ .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1857+ if let GraphQLValue::Object(obj) = value {
1858+ if let Some(has_prev) = obj.get("hasPreviousPage") {
1859+ return Ok(Some(has_prev.clone()));
1860+ }
1861 }
1862+ Ok(Some(GraphQLValue::from(false)))
1863+ })
1864+ },
1865+ ));
18661867+ page_info = page_info.field(Field::new(
1868+ "startCursor",
1869+ TypeRef::named(TypeRef::STRING),
1870+ |ctx| {
1871+ FieldFuture::new(async move {
1872+ let value = ctx
1873+ .parent_value
1874+ .downcast_ref::<GraphQLValue>()
1875+ .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1876+ if let GraphQLValue::Object(obj) = value {
1877+ if let Some(cursor) = obj.get("startCursor") {
1878+ return Ok(Some(cursor.clone()));
1879+ }
1880 }
1881+ Ok(None)
1882+ })
1883+ },
1884+ ));
18851886+ page_info = page_info.field(Field::new(
1887+ "endCursor",
1888+ TypeRef::named(TypeRef::STRING),
1889+ |ctx| {
1890+ FieldFuture::new(async move {
1891+ let value = ctx
1892+ .parent_value
1893+ .downcast_ref::<GraphQLValue>()
1894+ .ok_or_else(|| Error::new("Failed to downcast PageInfo"))?;
1895+ if let GraphQLValue::Object(obj) = value {
1896+ if let Some(cursor) = obj.get("endCursor") {
1897+ return Ok(Some(cursor.clone()));
1898+ }
1899 }
1900+ Ok(None)
1901+ })
1902+ },
1903+ ));
19041905 page_info
1906}
···1937 }));
19381939 // Add cursor field
1940+ edge = edge.field(Field::new(
1941+ "cursor",
1942+ TypeRef::named_nn(TypeRef::STRING),
1943+ |ctx| {
1944+ FieldFuture::new(async move {
1945+ let edge_data = ctx.parent_value.try_downcast_ref::<EdgeData>()?;
1946+ Ok(Some(GraphQLValue::from(edge_data.cursor.clone())))
1947+ })
1948+ },
1949+ ));
19501951 edge
1952}
···1958 let mut connection = Object::new(&connection_name);
19591960 // Add totalCount field
1961+ connection = connection.field(Field::new(
1962+ "totalCount",
1963+ TypeRef::named_nn(TypeRef::INT),
1964+ |ctx| {
1965+ FieldFuture::new(async move {
1966+ let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
1967+ Ok(Some(GraphQLValue::from(data.total_count)))
1968+ })
1969+ },
1970+ ));
19711972 // Add pageInfo field
1973+ connection = connection.field(Field::new(
1974+ "pageInfo",
1975+ TypeRef::named_nn("PageInfo"),
1976+ |ctx| {
1977+ FieldFuture::new(async move {
1978+ let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
19791980+ let mut page_info = async_graphql::indexmap::IndexMap::new();
1981+ page_info.insert(
1982+ async_graphql::Name::new("hasNextPage"),
1983+ GraphQLValue::from(data.has_next_page),
1984+ );
1985+ // For forward pagination only, hasPreviousPage is always false
1986+ page_info.insert(
1987+ async_graphql::Name::new("hasPreviousPage"),
1988+ GraphQLValue::from(false),
1989+ );
19901991+ // Add startCursor (first node's cid if available)
1992+ if !data.nodes.is_empty() {
1993+ if let Some(first_record) = data.nodes.first() {
1994+ let start_cursor = general_purpose::URL_SAFE_NO_PAD
1995+ .encode(first_record.record.cid.clone());
1996+ page_info.insert(
1997+ async_graphql::Name::new("startCursor"),
1998+ GraphQLValue::from(start_cursor),
1999+ );
2000+ }
2001+ }
2002+2003+ // Add endCursor
2004+ if let Some(ref cursor) = data.end_cursor {
2005 page_info.insert(
2006+ async_graphql::Name::new("endCursor"),
2007+ GraphQLValue::from(cursor.clone()),
2008 );
2009 }
020102011+ Ok(Some(FieldValue::owned_any(GraphQLValue::Object(page_info))))
2012+ })
2013+ },
2014+ ));
000000020152016 // Add edges field (Relay standard)
2017 let edge_type = format!("{}Edge", record_type_name);
2018+ connection = connection.field(Field::new(
2019+ "edges",
2020+ TypeRef::named_nn_list_nn(&edge_type),
2021+ |ctx| {
2022+ FieldFuture::new(async move {
2023+ let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
20242025+ let field_values: Vec<FieldValue<'_>> = data
2026+ .nodes
2027+ .iter()
2028+ .map(|node| {
2029+ // Use base64-encoded CID as cursor
2030+ let cursor =
2031+ general_purpose::URL_SAFE_NO_PAD.encode(node.record.cid.clone());
2032+ let edge = EdgeData {
2033+ node: node.clone(),
2034+ cursor,
2035+ };
2036+ FieldValue::owned_any(edge)
2037+ })
2038+ .collect();
20392040+ Ok(Some(FieldValue::list(field_values)))
2041+ })
2042+ },
2043+ ));
20442045 // Add nodes field (convenience, direct access to records without edges wrapper)
2046+ connection = connection.field(Field::new(
2047+ "nodes",
2048+ TypeRef::named_nn_list_nn(record_type_name),
2049+ |ctx| {
2050+ FieldFuture::new(async move {
2051+ let data = ctx.parent_value.try_downcast_ref::<ConnectionData>()?;
20522053+ let field_values: Vec<FieldValue<'_>> = data
2054+ .nodes
2055+ .iter()
2056+ .map(|node| FieldValue::owned_any(node.clone()))
2057+ .collect();
20582059+ Ok(Some(FieldValue::list(field_values)))
2060+ })
2061+ },
2062+ ));
20632064 connection
2065}
···2081 let slice = slice_clone.clone();
20822083 FieldFuture::new(async move {
2084+ let did = ctx
2085+ .args
2086+ .get("did")
2087 .and_then(|v| v.string().ok())
2088 .ok_or_else(|| Error::new("did argument is required"))?;
20892090 // Create sync service and call sync_user_collections
2091 let cache_backend = crate::cache::CacheFactory::create_cache(
2092+ crate::cache::CacheBackend::InMemory { ttl_seconds: None },
2093+ )
2094+ .await
2095+ .map_err(|e| Error::new(format!("Failed to create cache: {}", e)))?;
2096 let cache = Arc::new(Mutex::new(crate::cache::SliceCache::new(cache_backend)));
2097 let sync_service = crate::sync::SyncService::with_cache(
2098 db.clone(),
···21082109 // Convert result to GraphQL object
2110 let mut obj = async_graphql::indexmap::IndexMap::new();
2111+ obj.insert(
2112+ async_graphql::Name::new("success"),
2113+ GraphQLValue::from(result.success),
2114+ );
2115+ obj.insert(
2116+ async_graphql::Name::new("reposProcessed"),
2117+ GraphQLValue::from(result.repos_processed),
2118+ );
2119+ obj.insert(
2120+ async_graphql::Name::new("recordsSynced"),
2121+ GraphQLValue::from(result.records_synced),
2122+ );
2123+ obj.insert(
2124+ async_graphql::Name::new("timedOut"),
2125+ GraphQLValue::from(result.timed_out),
2126+ );
2127+ obj.insert(
2128+ async_graphql::Name::new("message"),
2129+ GraphQLValue::from(result.message),
2130+ );
21312132 Ok(Some(FieldValue::owned_any(GraphQLValue::Object(obj))))
2133 })
···2137 "did",
2138 TypeRef::named_nn(TypeRef::STRING),
2139 ))
2140+ .description("Sync user collections for a given DID"),
2141 );
21422143 mutation
···2166 let camel_case = nsid_to_join_field_name(nsid);
21672168 // Then pluralize the end
2169+ if camel_case.ends_with("s")
2170+ || camel_case.ends_with("x")
2171+ || camel_case.ends_with("ch")
2172+ || camel_case.ends_with("sh")
2173+ {
2174 format!("{}es", camel_case) // status -> statuses, box -> boxes
2175 } else if camel_case.ends_with("y") && camel_case.len() > 1 {
2176 let chars: Vec<char> = camel_case.chars().collect();
···2215 for field in fields {
2216 let field_name = field.name.clone();
2217 let field_name_clone = field_name.clone();
2218+ aggregated = aggregated.field(Field::new(
2219+ &field_name,
2220+ TypeRef::named("JSON"),
2221+ move |ctx| {
2222+ let field_name = field_name_clone.clone();
2223+ FieldFuture::new(async move {
2224+ let json_value = ctx.parent_value.try_downcast_ref::<serde_json::Value>()?;
2225+ if let Some(obj) = json_value.as_object() {
2226+ if let Some(value) = obj.get(&field_name) {
2227+ // Convert serde_json::Value to async_graphql::Value
2228+ let graphql_value = serde_json_to_graphql_value(value);
2229+ return Ok(Some(graphql_value));
2230+ }
2231+ }
2232+ Ok(None)
2233+ })
2234+ },
2235+ ));
2236+ }
2237+2238+ // Add count field
2239+ aggregated = aggregated.field(Field::new(
2240+ "count",
2241+ TypeRef::named_nn(TypeRef::INT),
2242+ |ctx| {
2243 FieldFuture::new(async move {
2244 let json_value = ctx.parent_value.try_downcast_ref::<serde_json::Value>()?;
2245 if let Some(obj) = json_value.as_object() {
2246+ if let Some(count) = obj.get("count") {
2247+ if let Some(count_i64) = count.as_i64() {
2248+ return Ok(Some(GraphQLValue::from(count_i64 as i32)));
2249+ }
2250 }
2251 }
2252+ Ok(Some(GraphQLValue::from(0)))
2253 })
2254+ },
2255+ ));
00000000000000022562257 aggregated
2258}
···2309fn create_record_update_type() -> Object {
2310 let mut record_update = Object::new("RecordUpdate");
23112312+ record_update = record_update.field(Field::new(
2313+ "uri",
2314+ TypeRef::named_nn(TypeRef::STRING),
2315+ |ctx| {
2316+ FieldFuture::new(async move {
2317+ let value = ctx
2318+ .parent_value
2319+ .downcast_ref::<GraphQLValue>()
2320+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2321+ if let GraphQLValue::Object(obj) = value {
2322+ if let Some(uri) = obj.get("uri") {
2323+ return Ok(Some(uri.clone()));
2324+ }
2325 }
2326+ Ok(None)
2327+ })
2328+ },
2329+ ));
23302331+ record_update = record_update.field(Field::new(
2332+ "cid",
2333+ TypeRef::named_nn(TypeRef::STRING),
2334+ |ctx| {
2335+ FieldFuture::new(async move {
2336+ let value = ctx
2337+ .parent_value
2338+ .downcast_ref::<GraphQLValue>()
2339+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2340+ if let GraphQLValue::Object(obj) = value {
2341+ if let Some(cid) = obj.get("cid") {
2342+ return Ok(Some(cid.clone()));
2343+ }
2344 }
2345+ Ok(None)
2346+ })
2347+ },
2348+ ));
23492350+ record_update = record_update.field(Field::new(
2351+ "did",
2352+ TypeRef::named_nn(TypeRef::STRING),
2353+ |ctx| {
2354+ FieldFuture::new(async move {
2355+ let value = ctx
2356+ .parent_value
2357+ .downcast_ref::<GraphQLValue>()
2358+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2359+ if let GraphQLValue::Object(obj) = value {
2360+ if let Some(did) = obj.get("did") {
2361+ return Ok(Some(did.clone()));
2362+ }
2363 }
2364+ Ok(None)
2365+ })
2366+ },
2367+ ));
23682369+ record_update = record_update.field(Field::new(
2370+ "collection",
2371+ TypeRef::named_nn(TypeRef::STRING),
2372+ |ctx| {
2373+ FieldFuture::new(async move {
2374+ let value = ctx
2375+ .parent_value
2376+ .downcast_ref::<GraphQLValue>()
2377+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2378+ if let GraphQLValue::Object(obj) = value {
2379+ if let Some(collection) = obj.get("collection") {
2380+ return Ok(Some(collection.clone()));
2381+ }
2382 }
2383+ Ok(None)
2384+ })
2385+ },
2386+ ));
23872388+ record_update = record_update.field(Field::new(
2389+ "indexedAt",
2390+ TypeRef::named_nn(TypeRef::STRING),
2391+ |ctx| {
2392+ FieldFuture::new(async move {
2393+ let value = ctx
2394+ .parent_value
2395+ .downcast_ref::<GraphQLValue>()
2396+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2397+ if let GraphQLValue::Object(obj) = value {
2398+ if let Some(indexed_at) = obj.get("indexedAt") {
2399+ return Ok(Some(indexed_at.clone()));
2400+ }
2401 }
2402+ Ok(None)
2403+ })
2404+ },
2405+ ));
24062407+ record_update = record_update.field(Field::new(
2408+ "operation",
2409+ TypeRef::named_nn(TypeRef::STRING),
2410+ |ctx| {
2411+ FieldFuture::new(async move {
2412+ let value = ctx
2413+ .parent_value
2414+ .downcast_ref::<GraphQLValue>()
2415+ .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2416+ if let GraphQLValue::Object(obj) = value {
2417+ if let Some(operation) = obj.get("operation") {
2418+ return Ok(Some(operation.clone()));
2419+ }
2420 }
2421+ Ok(None)
2422+ })
2423+ },
2424+ ));
24252426 record_update = record_update.field(Field::new("value", TypeRef::named_nn("JSON"), |ctx| {
2427 FieldFuture::new(async move {
2428+ let value = ctx
2429+ .parent_value
2430+ .downcast_ref::<GraphQLValue>()
2431 .ok_or_else(|| Error::new("Failed to downcast RecordUpdate"))?;
2432 if let GraphQLValue::Object(obj) = value {
2433 if let Some(val) = obj.get("value") {
···2604}
26052606/// Helper function to parse GraphQL where clause recursively
2607+fn parse_where_clause(
2608+ where_obj: async_graphql::dynamic::ObjectAccessor,
2609+) -> crate::models::WhereClause {
2610 let mut where_clause = crate::models::WhereClause {
2611 conditions: HashMap::new(),
2612 or_conditions: None,
···2655 eq: None,
2656 in_values: None,
2657 contains: None,
2658+ fuzzy: None,
2659 gt: None,
2660 gte: None,
2661 lt: None,
···2693 }
2694 }
26952696+ // Parse fuzzy condition
2697+ if let Some(fuzzy_val) = condition_obj.get("fuzzy") {
2698+ if let Ok(fuzzy_str) = fuzzy_val.string() {
2699+ where_condition.fuzzy = Some(fuzzy_str.to_string());
2700+ }
2701+ }
2702+2703 // Parse gt condition
2704 if let Some(gt_val) = condition_obj.get("gt") {
2705 if let Ok(gt_str) = gt_val.string() {
···2743 field_str.to_string()
2744 };
27452746+ where_clause
2747+ .conditions
2748+ .insert(db_field_name, where_condition);
2749 }
2750 }
2751
···14use crate::cache::{CacheBackend, CacheFactory, SliceCache};
15use crate::database::Database;
16use crate::errors::JetstreamError;
17-use crate::graphql::{RecordOperation, RecordUpdateEvent, PUBSUB};
18use crate::jetstream_cursor::PostgresCursorHandler;
19use crate::logging::{LogLevel, Logger};
20use crate::models::{Actor, Record};
···324325 // Check if this is a primary collection (starts with slice domain)
326 // Lexicon records for this slice are always treated as primary
327- let is_primary_collection = commit.collection.starts_with(&domain) || is_lexicon_for_this_slice;
0328329 // For external collections, check actor status BEFORE expensive validation
330 if !is_primary_collection {
···428 };
429430 // Insert into database
431- if let Err(e) = self.database.batch_insert_actors(&[actor]).await {
00432 error!("Failed to create actor {}: {}", did, e);
433 } else {
434 // Add to cache after successful database insert
···14use crate::cache::{CacheBackend, CacheFactory, SliceCache};
15use crate::database::Database;
16use crate::errors::JetstreamError;
17+use crate::graphql::{PUBSUB, RecordOperation, RecordUpdateEvent};
18use crate::jetstream_cursor::PostgresCursorHandler;
19use crate::logging::{LogLevel, Logger};
20use crate::models::{Actor, Record};
···324325 // Check if this is a primary collection (starts with slice domain)
326 // Lexicon records for this slice are always treated as primary
327+ let is_primary_collection =
328+ commit.collection.starts_with(&domain) || is_lexicon_for_this_slice;
329330 // For external collections, check actor status BEFORE expensive validation
331 if !is_primary_collection {
···429 };
430431 // Insert into database
432+ if let Err(e) =
433+ self.database.batch_insert_actors(&[actor]).await
434+ {
435 error!("Failed to create actor {}: {}", did, e);
436 } else {
437 // Add to cache after successful database insert
···253 // First, get all repos from primary collections
254 let mut primary_repos = std::collections::HashSet::new();
255 for collection in &primary_collections {
256- match self.get_repos_for_collection(collection, slice_uri, max_repos).await {
000257 Ok(repos) => {
258 info!(
259 "Found {} repositories for primary collection \"{}\"",
···465 match database.batch_insert_records(&batch).await {
466 Ok(_) => {
467 write_count += batch_size;
468- info!("Database writer: Inserted batch of {} records (total: {})", batch_size, write_count);
000469 }
470 Err(e) => {
471 error!("Database writer: Failed to insert batch: {}", e);
···611612 // Send batch to writer when buffer is full
613 if batch_buffer.len() >= BATCH_SIZE {
614- let batch_to_send = std::mem::replace(&mut batch_buffer, Vec::with_capacity(BATCH_SIZE));
0615 let batch_count = batch_to_send.len() as i64;
616- info!("Sending batch of {} records to database writer", batch_count);
000617618 // Send to writer channel (non-blocking)
619 if let Err(e) = tx.send(batch_to_send).await {
620 error!("Failed to send batch to writer: {}", e);
621- return Err(SyncError::Generic(format!("Failed to send batch to writer: {}", e)));
000622 }
623624 let mut total = total_indexed_records.lock().await;
···629 // Flush any remaining records in the buffer
630 if !batch_buffer.is_empty() {
631 let batch_count = batch_buffer.len() as i64;
632- info!("Sending final batch of {} records to database writer", batch_count);
000633634 if let Err(e) = tx.send(batch_buffer).await {
635 error!("Failed to send final batch to writer: {}", e);
636- return Err(SyncError::Generic(format!("Failed to send final batch to writer: {}", e)));
000637 }
638639 let mut total = total_indexed_records.lock().await;
···642643 // Close the channel and wait for writer to finish
644 drop(tx);
645- let write_result = writer_task.await
0646 .map_err(|e| SyncError::Generic(format!("Writer task panicked: {}", e)))?;
647648 let final_count = match write_result {
···655 successful_tasks, failed_tasks
656 );
657658- info!(
659- "Indexed {} new/changed records in batches",
660- final_count
661- );
662663 info!("Backfill complete!");
664···699 if page_count > max_pages {
700 warn!(
701 "Reached maximum page limit ({}) for collection {} (based on repo limit {:?}, estimated max {} repos at {} per page)",
702- max_pages, collection, max_repos, max_pages * REPOS_PER_PAGE, REPOS_PER_PAGE
0000703 );
704 break;
705 }
···979 const CHUNK_SIZE: usize = 50; // Process DIDs in chunks
980 const MAX_CONCURRENT: usize = 10; // Limit concurrent resolutions
981982- info!("Resolving ATP data for {} repositories in chunks", repos.len());
000983984 for (chunk_idx, chunk) in repos.chunks(CHUNK_SIZE).enumerate() {
985 let chunk_start = chunk_idx * CHUNK_SIZE;
···1031 }
1032 }
10331034- info!("Successfully resolved ATP data for {}/{} repositories", atp_map.len(), repos.len());
00001035 Ok(atp_map)
1036 }
1037···1193 Some(&external_collections),
1194 Some(&[user_did.to_string()]), // Only sync this user's repos
1195 false, // Always validate user collections
1196- None, // No limit for user-specific sync
1197 )
1198 .await
1199 };
···253 // First, get all repos from primary collections
254 let mut primary_repos = std::collections::HashSet::new();
255 for collection in &primary_collections {
256+ match self
257+ .get_repos_for_collection(collection, slice_uri, max_repos)
258+ .await
259+ {
260 Ok(repos) => {
261 info!(
262 "Found {} repositories for primary collection \"{}\"",
···468 match database.batch_insert_records(&batch).await {
469 Ok(_) => {
470 write_count += batch_size;
471+ info!(
472+ "Database writer: Inserted batch of {} records (total: {})",
473+ batch_size, write_count
474+ );
475 }
476 Err(e) => {
477 error!("Database writer: Failed to insert batch: {}", e);
···617618 // Send batch to writer when buffer is full
619 if batch_buffer.len() >= BATCH_SIZE {
620+ let batch_to_send =
621+ std::mem::replace(&mut batch_buffer, Vec::with_capacity(BATCH_SIZE));
622 let batch_count = batch_to_send.len() as i64;
623+ info!(
624+ "Sending batch of {} records to database writer",
625+ batch_count
626+ );
627628 // Send to writer channel (non-blocking)
629 if let Err(e) = tx.send(batch_to_send).await {
630 error!("Failed to send batch to writer: {}", e);
631+ return Err(SyncError::Generic(format!(
632+ "Failed to send batch to writer: {}",
633+ e
634+ )));
635 }
636637 let mut total = total_indexed_records.lock().await;
···642 // Flush any remaining records in the buffer
643 if !batch_buffer.is_empty() {
644 let batch_count = batch_buffer.len() as i64;
645+ info!(
646+ "Sending final batch of {} records to database writer",
647+ batch_count
648+ );
649650 if let Err(e) = tx.send(batch_buffer).await {
651 error!("Failed to send final batch to writer: {}", e);
652+ return Err(SyncError::Generic(format!(
653+ "Failed to send final batch to writer: {}",
654+ e
655+ )));
656 }
657658 let mut total = total_indexed_records.lock().await;
···661662 // Close the channel and wait for writer to finish
663 drop(tx);
664+ let write_result = writer_task
665+ .await
666 .map_err(|e| SyncError::Generic(format!("Writer task panicked: {}", e)))?;
667668 let final_count = match write_result {
···675 successful_tasks, failed_tasks
676 );
677678+ info!("Indexed {} new/changed records in batches", final_count);
000679680 info!("Backfill complete!");
681···716 if page_count > max_pages {
717 warn!(
718 "Reached maximum page limit ({}) for collection {} (based on repo limit {:?}, estimated max {} repos at {} per page)",
719+ max_pages,
720+ collection,
721+ max_repos,
722+ max_pages * REPOS_PER_PAGE,
723+ REPOS_PER_PAGE
724 );
725 break;
726 }
···1000 const CHUNK_SIZE: usize = 50; // Process DIDs in chunks
1001 const MAX_CONCURRENT: usize = 10; // Limit concurrent resolutions
10021003+ info!(
1004+ "Resolving ATP data for {} repositories in chunks",
1005+ repos.len()
1006+ );
10071008 for (chunk_idx, chunk) in repos.chunks(CHUNK_SIZE).enumerate() {
1009 let chunk_start = chunk_idx * CHUNK_SIZE;
···1055 }
1056 }
10571058+ info!(
1059+ "Successfully resolved ATP data for {}/{} repositories",
1060+ atp_map.len(),
1061+ repos.len()
1062+ );
1063 Ok(atp_map)
1064 }
1065···1221 Some(&external_collections),
1222 Some(&[user_did.to_string()]), // Only sync this user's repos
1223 false, // Always validate user collections
1224+ None, // No limit for user-specific sync
1225 )
1226 .await
1227 };
···1use crate::{AppState, auth, errors::AppError, sync::SyncService};
2-use axum::{extract::{Query, State}, http::HeaderMap, response::Json};
00003use serde::{Deserialize, Serialize};
4use std::collections::HashMap;
5···144145 // First, get repos ONLY from primary collections
146 for collection in &primary_collections {
147- match sync_service.get_repos_for_collection(collection, slice_uri, Some(applied_limit)).await {
000148 Ok(repos) => {
149 counts.insert(collection.clone(), repos.len() as i64);
150 discovered_repos.extend(repos);
···1use crate::{AppState, auth, errors::AppError, sync::SyncService};
2+use axum::{
3+ extract::{Query, State},
4+ http::HeaderMap,
5+ response::Json,
6+};
7use serde::{Deserialize, Serialize};
8use std::collections::HashMap;
9···148149 // First, get repos ONLY from primary collections
150 for collection in &primary_collections {
151+ match sync_service
152+ .get_repos_for_collection(collection, slice_uri, Some(applied_limit))
153+ .await
154+ {
155 Ok(repos) => {
156 counts.insert(collection.clone(), repos.len() as i64);
157 discovered_repos.extend(repos);
+37
docs/graphql-api.md
···79- `eq`: Exact match
80- `in`: Match any value in array
81- `contains`: Substring match (case-insensitive)
082- `gt`: Greater than (lexicographic)
83- `gte`: Greater than or equal to
84- `lt`: Less than
···100- `gte`: At or after datetime
101- `lt`: Before datetime
102- `lte`: At or before datetime
000000000000000000000000000000000000103104#### Date Range Example
105
···79- `eq`: Exact match
80- `in`: Match any value in array
81- `contains`: Substring match (case-insensitive)
82+- `fuzzy`: Fuzzy/similarity match (typo-tolerant)
83- `gt`: Greater than (lexicographic)
84- `gte`: Greater than or equal to
85- `lt`: Less than
···101- `gte`: At or after datetime
102- `lt`: Before datetime
103- `lte`: At or before datetime
104+105+#### Fuzzy Matching Example
106+107+The `fuzzy` filter uses PostgreSQL's trigram similarity for typo-tolerant search:
108+109+```graphql
110+query FuzzySearch {
111+ fmTealAlphaFeedPlays(
112+ where: {
113+ trackName: { fuzzy: "love" }
114+ }
115+ ) {
116+ edges {
117+ node {
118+ trackName
119+ artists
120+ }
121+ }
122+ }
123+}
124+```
125+126+This will match track names like:
127+- "Love" (exact)
128+- "Love Song"
129+- "Lovely"
130+- "I Love You"
131+- "Lover"
132+- "Loveless"
133+134+The fuzzy filter is great for:
135+- Handling typos and misspellings
136+- Finding similar variations of text
137+- Flexible search without exact matching
138+139+**Note**: Fuzzy matching works on the similarity between strings (using trigrams), so it's more flexible than `contains` but may return unexpected matches if the similarity threshold is met.
140141#### Date Range Example
142