···11+-- Add index for efficient RSVP queries by event, status, and time ordering
22+-- This supports queries that filter by event_aturi and status, then order by updated_at DESC
33+-- Enables fast pagination and counting without full table scans
44+CREATE INDEX IF NOT EXISTS idx_rsvps_event_status_updated
55+ON rsvps (event_aturi, status, updated_at DESC);
+59-21
src/http/handle_view_event.rs
···2222};
2323use crate::storage::event::event_exists;
2424use crate::storage::event::event_get;
2525-use crate::storage::event::get_event_rsvps_with_validation;
2625use crate::storage::event::get_going_rsvp_count;
2626+use crate::storage::event::get_grouped_event_rsvps;
2727use crate::storage::event::get_recent_going_rsvps;
2828use crate::storage::event::get_user_rsvp_status_and_validation;
2929use crate::storage::event::rsvp_get_by_event_and_did;
···660660 // Extract event details for display
661661 let details = crate::storage::event::extract_event_details(&event);
662662663663- // Fetch up to 500 recent RSVPs with handles (all statuses)
664664- let rsvps = get_event_rsvps_with_validation(&ctx.web_context.pool, &aturi, None)
665665- .await
666666- .unwrap_or_default();
663663+ // Fetch all RSVPs grouped by status in a single optimized query
664664+ // This replaces 6 separate queries (3 for data + 3 for counts) with 1 query
665665+ let grouped_data = match get_grouped_event_rsvps(&ctx.web_context.pool, &aturi).await {
666666+ Ok(data) => data,
667667+ Err(err) => {
668668+ tracing::error!("Error fetching grouped RSVPs: {:?}", err);
669669+ return contextual_error!(
670670+ ctx.web_context,
671671+ ctx.language,
672672+ error_template,
673673+ default_context,
674674+ ViewEventError::EventNotFound(err.to_string()),
675675+ StatusCode::INTERNAL_SERVER_ERROR
676676+ );
677677+ }
678678+ };
667679668668- // Extract DIDs (limited to 500)
669669- let dids: Vec<String> = rsvps.iter().take(500).map(|(did, _, _)| did.clone()).collect();
680680+ // Extract DIDs from all groups for batch lookup
681681+ let mut all_dids: Vec<String> = Vec::new();
682682+ all_dids.extend(grouped_data.going.iter().map(|(did, _, _)| did.clone()));
683683+ all_dids.extend(grouped_data.interested.iter().map(|(did, _, _)| did.clone()));
684684+ all_dids.extend(grouped_data.notgoing.iter().map(|(did, _, _)| did.clone()));
670685671686 // Perform batch lookup for handles
672672- let handle_profiles = handles_by_did(&ctx.web_context.pool, dids.clone())
687687+ let handle_profiles = handles_by_did(&ctx.web_context.pool, all_dids)
673688 .await
674689 .unwrap_or_default();
675690676676- // Create RsvpDisplay objects with handles
677677- let attendees: Vec<RsvpDisplay> = rsvps
678678- .into_iter()
679679- .filter_map(|(did, status, validated_at)| {
680680- handle_profiles.get(&did).map(|profile| RsvpDisplay {
681681- did: did.clone(),
682682- handle: profile.handle.clone(),
683683- status,
684684- verified: validated_at.is_some(),
691691+ // Helper closure to create RsvpDisplay objects
692692+ let create_attendees = |rsvps: Vec<(String, String, Option<chrono::DateTime<chrono::Utc>>)>| -> Vec<RsvpDisplay> {
693693+ rsvps
694694+ .into_iter()
695695+ .filter_map(|(did, status, validated_at)| {
696696+ handle_profiles.get(&did).map(|profile| RsvpDisplay {
697697+ did: did.clone(),
698698+ handle: profile.handle.clone(),
699699+ status,
700700+ verified: validated_at.is_some(),
701701+ })
685702 })
686686- })
687687- .collect();
703703+ .collect()
704704+ };
705705+706706+ // Create separate attendee lists
707707+ let going_attendees = create_attendees(grouped_data.going);
708708+ let interested_attendees = create_attendees(grouped_data.interested);
709709+ let notgoing_attendees = create_attendees(grouped_data.notgoing);
710710+711711+ // Extract counts from grouped data
712712+ let going_total = grouped_data.going_total;
713713+ let interested_total = grouped_data.interested_total;
714714+ let notgoing_total = grouped_data.notgoing_total;
715715+716716+ // Calculate total count from database totals
717717+ let total_count = going_total + interested_total + notgoing_total;
688718689719 Ok((
690720 StatusCode::OK,
···698728 handle_slug,
699729 event_rkey,
700730 collection => collection.clone(),
701701- attendees,
702702- attendee_count => attendees.len(),
731731+ going_attendees,
732732+ interested_attendees,
733733+ notgoing_attendees,
734734+ going_count => going_total,
735735+ interested_count => interested_total,
736736+ notgoing_count => notgoing_total,
737737+ going_displayed => going_attendees.len(),
738738+ interested_displayed => interested_attendees.len(),
739739+ notgoing_displayed => notgoing_attendees.len(),
740740+ total_count,
703741 },
704742 ),
705743 )
+121
src/storage/event.rs
···980980 )))
981981}
982982983983+/// Grouped RSVP data with limited results per status and total counts
984984+pub struct GroupedRsvpData {
985985+ pub going: Vec<(String, String, Option<chrono::DateTime<chrono::Utc>>)>,
986986+ pub interested: Vec<(String, String, Option<chrono::DateTime<chrono::Utc>>)>,
987987+ pub notgoing: Vec<(String, String, Option<chrono::DateTime<chrono::Utc>>)>,
988988+ pub going_total: u32,
989989+ pub interested_total: u32,
990990+ pub notgoing_total: u32,
991991+}
992992+993993+/// Fetch RSVPs grouped by status with limits and total counts in a single optimized query
994994+///
995995+/// This function uses a window function to efficiently:
996996+/// - Limit results per status (going: 500, interested: 200, notgoing: 100)
997997+/// - Order by updated_at DESC for each status
998998+/// - Return total counts for each status
999999+/// - Execute as a single database query with one table scan
10001000+///
10011001+/// This is significantly more efficient than making 6 separate queries (3 for data + 3 for counts)
10021002+pub async fn get_grouped_event_rsvps(
10031003+ pool: &StoragePool,
10041004+ event_aturi: &str,
10051005+) -> Result<GroupedRsvpData, StorageError> {
10061006+ // Validate event_aturi is not empty
10071007+ if event_aturi.trim().is_empty() {
10081008+ return Err(StorageError::UnableToExecuteQuery(sqlx::Error::Protocol(
10091009+ "Event URI cannot be empty".into(),
10101010+ )));
10111011+ }
10121012+10131013+ let mut tx = pool
10141014+ .begin()
10151015+ .await
10161016+ .map_err(StorageError::CannotBeginDatabaseTransaction)?;
10171017+10181018+ // Single query with window functions for efficient grouped pagination
10191019+ // ROW_NUMBER partitions by status and orders by updated_at DESC
10201020+ // COUNT(*) OVER gives us the total count per status
10211021+ let rows = sqlx::query_as::<_, (String, String, Option<chrono::DateTime<chrono::Utc>>, i64)>(
10221022+ r#"
10231023+ WITH ranked_rsvps AS (
10241024+ SELECT
10251025+ did,
10261026+ status,
10271027+ validated_at,
10281028+ ROW_NUMBER() OVER (PARTITION BY status ORDER BY updated_at DESC) as rn,
10291029+ COUNT(*) OVER (PARTITION BY status) as status_count
10301030+ FROM rsvps
10311031+ WHERE event_aturi = $1
10321032+ AND status IN ('going', 'interested', 'notgoing')
10331033+ )
10341034+ SELECT
10351035+ did,
10361036+ status,
10371037+ validated_at,
10381038+ status_count
10391039+ FROM ranked_rsvps
10401040+ WHERE (status = 'going' AND rn <= 500)
10411041+ OR (status = 'interested' AND rn <= 200)
10421042+ OR (status = 'notgoing' AND rn <= 100)
10431043+ ORDER BY
10441044+ CASE status
10451045+ WHEN 'going' THEN 1
10461046+ WHEN 'interested' THEN 2
10471047+ WHEN 'notgoing' THEN 3
10481048+ END,
10491049+ rn
10501050+ "#,
10511051+ )
10521052+ .bind(event_aturi)
10531053+ .fetch_all(tx.as_mut())
10541054+ .await
10551055+ .map_err(StorageError::UnableToExecuteQuery)?;
10561056+10571057+ tx.commit()
10581058+ .await
10591059+ .map_err(StorageError::CannotCommitDatabaseTransaction)?;
10601060+10611061+ // Separate rows by status and extract counts
10621062+ let mut going = Vec::new();
10631063+ let mut interested = Vec::new();
10641064+ let mut notgoing = Vec::new();
10651065+ let mut going_total: u32 = 0;
10661066+ let mut interested_total: u32 = 0;
10671067+ let mut notgoing_total: u32 = 0;
10681068+10691069+ for (did, status, validated_at, count) in rows {
10701070+ let count_u32 = count as u32;
10711071+ match status.as_str() {
10721072+ "going" => {
10731073+ if going_total == 0 {
10741074+ going_total = count_u32;
10751075+ }
10761076+ going.push((did, status, validated_at));
10771077+ }
10781078+ "interested" => {
10791079+ if interested_total == 0 {
10801080+ interested_total = count_u32;
10811081+ }
10821082+ interested.push((did, status, validated_at));
10831083+ }
10841084+ "notgoing" => {
10851085+ if notgoing_total == 0 {
10861086+ notgoing_total = count_u32;
10871087+ }
10881088+ notgoing.push((did, status, validated_at));
10891089+ }
10901090+ _ => {} // Ignore unknown statuses
10911091+ }
10921092+ }
10931093+10941094+ Ok(GroupedRsvpData {
10951095+ going,
10961096+ interested,
10971097+ notgoing,
10981098+ going_total,
10991099+ interested_total,
11001100+ notgoing_total,
11011101+ })
11021102+}
11031103+9831104pub(crate) async fn event_list(
9841105 pool: &StoragePool,
9851106 page: i64,