Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

Convert existing REST /links/count endpoint to XRPC equivalent #8

closed opened by seoul.systems targeting main from seoul.systems/microcosm-rs: xrpc_backlinks_count

Add getCounts XRPC equivalent to REST /links/count

Simple conversion of the existing endpoint from REST to XRPC. Consequtively marked the pre-existing REST endpoint as deprecated.

In addition we now ignore rocks.test

Labels

None yet.

Participants 2
AT URI
at://did:plc:53wellrw53o7sw4zlpfenvuh/sh.tangled.repo.pull/3mcybnzocig22
+240 -390
Interdiff #2 #3
.gitignore

This file has not been changed.

.prettierrc

This file has not been changed.

+9 -17
constellation/src/server/mod.rs
··· 17 use tokio::task::spawn_blocking; 18 use tokio_util::sync::CancellationToken; 19 20 - use crate::storage::{LinkReader, Order, StorageStats}; 21 use crate::{CountsByCount, Did, RecordId}; 22 23 mod acceptable; ··· 66 } 67 }), 68 ) 69 .route( 70 "/links/count", 71 get({ ··· 78 }), 79 ) 80 .route( 81 - "/xrpc/blue.microcosm.links.getCounts", 82 get({ 83 let store = store.clone(); 84 move |accept, query| async { 85 - spawn_blocking(|| get_counts(accept, query, store)) 86 .await 87 .map_err(to500)? 88 } ··· 378 source: String, 379 } 380 #[derive(Template, Serialize)] 381 - #[template(path = "get-counts.html.j2")] 382 struct GetItemsCountResponse { 383 total: u64, 384 #[serde(skip_serializing)] 385 query: GetItemsCountQuery, 386 } 387 - fn get_counts( 388 accept: ExtractAccept, 389 query: axum_extra::extract::Query<GetItemsCountQuery>, 390 store: impl LinkReader, ··· 455 /// Set the max number of links to return per page of results 456 #[serde(default = "get_default_cursor_limit")] 457 limit: u64, 458 - /// Allow returning links in reverse order (default: false) 459 - #[serde(default)] 460 - reverse: bool, 461 } 462 #[derive(Template, Serialize)] 463 #[template(path = "get-backlinks.html.j2")] ··· 503 }; 504 let path = format!(".{path}"); 505 506 - let order = if query.reverse { 507 - Order::OldestToNewest 508 - } else { 509 - Order::NewestToOldest 510 - }; 511 - 512 let paged = store 513 .get_links( 514 &query.subject, 515 collection, 516 &path, 517 - order, 518 limit, 519 until, 520 &filter_dids, ··· 563 from_dids: Option<String>, // comma separated: gross 564 #[serde(default = "get_default_cursor_limit")] 565 limit: u64, 566 } 567 #[derive(Template, Serialize)] 568 #[template(path = "links.html.j2")] ··· 616 &query.target, 617 &query.collection, 618 &query.path, 619 - Order::NewestToOldest, 620 limit, 621 until, 622 &filter_dids, ··· 705 #[serde(skip_serializing)] 706 query: GetAllLinksQuery, 707 } 708 - #[deprecated] 709 fn count_all_links( 710 accept: ExtractAccept, 711 query: Query<GetAllLinksQuery>, ··· 715 #[serde(skip_serializing)] 716 query: GetAllLinksQuery, 717 } 718 fn count_all_links( 719 accept: ExtractAccept, 720 query: Query<GetAllLinksQuery>,
··· 17 use tokio::task::spawn_blocking; 18 use tokio_util::sync::CancellationToken; 19 20 + use crate::storage::{LinkReader, StorageStats}; 21 use crate::{CountsByCount, Did, RecordId}; 22 23 mod acceptable; ··· 66 } 67 }), 68 ) 69 + // deprecated 70 .route( 71 "/links/count", 72 get({ ··· 79 }), 80 ) 81 .route( 82 + "/xrpc/blue.microcosm.links.getBacklinksCount", 83 get({ 84 let store = store.clone(); 85 move |accept, query| async { 86 + spawn_blocking(|| get_backlink_counts(accept, query, store)) 87 .await 88 .map_err(to500)? 89 } ··· 379 source: String, 380 } 381 #[derive(Template, Serialize)] 382 + #[template(path = "get-backlinks-count.html.j2")] 383 struct GetItemsCountResponse { 384 total: u64, 385 #[serde(skip_serializing)] 386 query: GetItemsCountQuery, 387 } 388 + fn get_backlink_counts( 389 accept: ExtractAccept, 390 query: axum_extra::extract::Query<GetItemsCountQuery>, 391 store: impl LinkReader, ··· 456 /// Set the max number of links to return per page of results 457 #[serde(default = "get_default_cursor_limit")] 458 limit: u64, 459 + // TODO: allow reverse (er, forward) order as well 460 } 461 #[derive(Template, Serialize)] 462 #[template(path = "get-backlinks.html.j2")] ··· 502 }; 503 let path = format!(".{path}"); 504 505 let paged = store 506 .get_links( 507 &query.subject, 508 collection, 509 &path, 510 limit, 511 until, 512 &filter_dids, ··· 555 from_dids: Option<String>, // comma separated: gross 556 #[serde(default = "get_default_cursor_limit")] 557 limit: u64, 558 + // TODO: allow reverse (er, forward) order as well 559 } 560 #[derive(Template, Serialize)] 561 #[template(path = "links.html.j2")] ··· 609 &query.target, 610 &query.collection, 611 &query.path, 612 limit, 613 until, 614 &filter_dids, ··· 697 #[serde(skip_serializing)] 698 query: GetAllLinksQuery, 699 } 700 fn count_all_links( 701 accept: ExtractAccept, 702 query: Query<GetAllLinksQuery>, ··· 706 #[serde(skip_serializing)] 707 query: GetAllLinksQuery, 708 } 709 + #[deprecated] 710 fn count_all_links( 711 accept: ExtractAccept, 712 query: Query<GetAllLinksQuery>,
-38
constellation/templates/get-counts.html.j2
··· 1 - {% extends "base.html.j2" %} 2 - {% import "try-it-macros.html.j2" as try_it %} 3 - 4 - {% block title %}Link Count{% endblock %} 5 - {% block description %}Count of {{ query.source }} records linking to {{ query.subject }}{% endblock %} 6 - 7 - {% block content %} 8 - 9 - {% call try_it::get_counts( 10 - query.subject, 11 - query.source, 12 - ) %} 13 - 14 - <h2> 15 - Total links to <code>{{ query.subject }}</code> 16 - {% if let Some(browseable_uri) = query.subject|to_browseable %} 17 - <small style="font-weight: normal; font-size: 1rem"><a href="{{ browseable_uri }}">browse record</a></small> 18 - {% endif %} 19 - </h2> 20 - 21 - <p><strong><code>{{ total|human_number }}</code></strong> total links from <code>{{ query.source }}</code> to <code>{{ query.subject }}</code></p> 22 - 23 - <ul> 24 - <li> 25 - See direct backlinks at <code>/xrpc/blue.microcosm.links.getBacklinks</code>: 26 - <a href="/xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject|urlencode }}&source={{ query.source|urlencode }}"> 27 - /xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject }}&source={{ query.source }} 28 - </a> 29 - </li> 30 - <li>See all links to this target at <code>/links/all</code>: <a href="/links/all?target={{ query.subject|urlencode }}">/links/all?target={{ query.subject }}</a></li> 31 - </ul> 32 - 33 - <details> 34 - <summary>Raw JSON response</summary> 35 - <pre class="code">{{ self|tojson }}</pre> 36 - </details> 37 - 38 - {% endblock %}
···
constellation/templates/hello.html.j2

This patch was likely rebased, as context lines do not match.

constellation/templates/try-it-macros.html.j2

This patch was likely rebased, as context lines do not match.

lexicons/blue.microcosm/links/getBacklinks.json

This file has not been changed.

-38
lexicons/blue.microcosm/links/getCounts.json
··· 1 - { 2 - "lexicon": 1, 3 - "id": "blue.microcosm.links.getCounts", 4 - "defs": { 5 - "main": { 6 - "type": "query", 7 - "description": "count records that link to another record", 8 - "parameters": { 9 - "type": "params", 10 - "required": ["subject", "source"], 11 - "properties": { 12 - "subject": { 13 - "type": "string", 14 - "format": "uri", 15 - "description": "the primary target being linked to (at-uri, did, or uri)" 16 - }, 17 - "source": { 18 - "type": "string", 19 - "description": "collection and path specification for the primary link" 20 - } 21 - } 22 - }, 23 - "output": { 24 - "encoding": "application/json", 25 - "schema": { 26 - "type": "object", 27 - "required": ["total"], 28 - "properties": { 29 - "total": { 30 - "type": "integer", 31 - "description": "total number of matching links" 32 - } 33 - } 34 - } 35 - } 36 - } 37 - } 38 - }
···
lexicons/blue.microcosm/links/getManyToMany.json

This file has not been changed.

lexicons/blue.microcosm/links/getManyToManyCounts.json

This file has not been changed.

+53 -55
constellation/src/storage/mem_store.rs
··· 1 use super::{ 2 - LinkReader, LinkStorage, Order, PagedAppendingCollection, PagedOrderedCollection, StorageStats, 3 }; 4 use crate::{ActionableEvent, CountsByCount, Did, RecordId}; 5 use anyhow::Result; ··· 147 ) -> Result<PagedOrderedCollection<(String, u64, u64), String>> { 148 let data = self.0.lock().unwrap(); 149 let Some(paths) = data.targets.get(&Target::new(target)) else { 150 - return Ok(PagedOrderedCollection::empty()); 151 }; 152 let Some(linkers) = paths.get(&Source::new(collection, path)) else { 153 - return Ok(PagedOrderedCollection::empty()); 154 }; 155 156 let path_to_other = RecordPath::new(path_to_other); ··· 239 target: &str, 240 collection: &str, 241 path: &str, 242 - order: Order, 243 limit: u64, 244 - until: Option<u64>, // paged iteration endpoint 245 filter_dids: &HashSet<Did>, 246 ) -> Result<PagedAppendingCollection<RecordId>> { 247 let data = self.0.lock().unwrap(); 248 let Some(paths) = data.targets.get(&Target::new(target)) else { 249 - return Ok(PagedAppendingCollection::empty()); 250 }; 251 let Some(did_rkeys) = paths.get(&Source::new(collection, path)) else { 252 - return Ok(PagedAppendingCollection::empty()); 253 }; 254 255 let did_rkeys: Vec<_> = if !filter_dids.is_empty() { ··· 266 did_rkeys.to_vec() 267 }; 268 269 - let total = did_rkeys.len() as u64; 270 - 271 - // backlinks are stored oldest-to-newest (ascending index with increasing age) 272 - let (start, take, next_until) = match order { 273 - Order::OldestToNewest => { 274 - let start = until.unwrap_or(0); 275 - let next = start + limit + 1; 276 - let next_until = if next < total { Some(next) } else { None }; 277 - (start, limit, next_until) 278 - } 279 - Order::NewestToOldest => { 280 - let until = until.unwrap_or(total); 281 - match until.checked_sub(limit) { 282 - Some(s) if s > 0 => (s, limit, Some(s)), 283 - Some(s) => (s, limit, None), 284 - None => (0, until, None), 285 - } 286 - } 287 - }; 288 289 - let alive = did_rkeys.iter().flatten().count() as u64; 290 let gone = total - alive; 291 292 - let items = did_rkeys 293 .iter() 294 - .skip(start as usize) 295 - .take(take as usize) 296 .flatten() 297 .filter(|(did, _)| *data.dids.get(did).expect("did must be in dids")) 298 .map(|(did, rkey)| RecordId { 299 did: did.clone(), 300 rkey: rkey.0.clone(), 301 collection: collection.to_string(), 302 - }); 303 - 304 - let items: Vec<_> = match order { 305 - Order::OldestToNewest => items.collect(), // links are stored oldest first 306 - Order::NewestToOldest => items.rev().collect(), 307 - }; 308 309 Ok(PagedAppendingCollection { 310 - version: (total, gone), 311 items, 312 - next: next_until, 313 - total: alive, 314 }) 315 } 316 ··· 324 ) -> Result<PagedAppendingCollection<Did>> { 325 let data = self.0.lock().unwrap(); 326 let Some(paths) = data.targets.get(&Target::new(target)) else { 327 - return Ok(PagedAppendingCollection::empty()); 328 }; 329 let Some(did_rkeys) = paths.get(&Source::new(collection, path)) else { 330 - return Ok(PagedAppendingCollection::empty()); 331 }; 332 333 let dids: Vec<Option<Did>> = { ··· 347 .collect() 348 }; 349 350 - let total = dids.len() as u64; 351 - let until = until.unwrap_or(total); 352 - let (start, take, next_until) = match until.checked_sub(limit) { 353 - Some(s) if s > 0 => (s, limit, Some(s)), 354 - Some(s) => (s, limit, None), 355 - None => (0, until, None), 356 - }; 357 358 - let alive = dids.iter().flatten().count() as u64; 359 let gone = total - alive; 360 361 - let items: Vec<Did> = dids 362 .iter() 363 - .skip(start as usize) 364 - .take(take as usize) 365 .rev() 366 .flatten() 367 .filter(|did| *data.dids.get(did).expect("did must be in dids")) ··· 369 .collect(); 370 371 Ok(PagedAppendingCollection { 372 - version: (total, gone), 373 items, 374 - next: next_until, 375 - total: alive, 376 }) 377 } 378
··· 1 use super::{ 2 + LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection, StorageStats, 3 }; 4 use crate::{ActionableEvent, CountsByCount, Did, RecordId}; 5 use anyhow::Result; ··· 147 ) -> Result<PagedOrderedCollection<(String, u64, u64), String>> { 148 let data = self.0.lock().unwrap(); 149 let Some(paths) = data.targets.get(&Target::new(target)) else { 150 + return Ok(PagedOrderedCollection::default()); 151 }; 152 let Some(linkers) = paths.get(&Source::new(collection, path)) else { 153 + return Ok(PagedOrderedCollection::default()); 154 }; 155 156 let path_to_other = RecordPath::new(path_to_other); ··· 239 target: &str, 240 collection: &str, 241 path: &str, 242 limit: u64, 243 + until: Option<u64>, 244 filter_dids: &HashSet<Did>, 245 ) -> Result<PagedAppendingCollection<RecordId>> { 246 let data = self.0.lock().unwrap(); 247 let Some(paths) = data.targets.get(&Target::new(target)) else { 248 + return Ok(PagedAppendingCollection { 249 + version: (0, 0), 250 + items: Vec::new(), 251 + next: None, 252 + total: 0, 253 + }); 254 }; 255 let Some(did_rkeys) = paths.get(&Source::new(collection, path)) else { 256 + return Ok(PagedAppendingCollection { 257 + version: (0, 0), 258 + items: Vec::new(), 259 + next: None, 260 + total: 0, 261 + }); 262 }; 263 264 let did_rkeys: Vec<_> = if !filter_dids.is_empty() { ··· 275 did_rkeys.to_vec() 276 }; 277 278 + let total = did_rkeys.len(); 279 + let end = until 280 + .map(|u| std::cmp::min(u as usize, total)) 281 + .unwrap_or(total); 282 + let begin = end.saturating_sub(limit as usize); 283 + let next = if begin == 0 { None } else { Some(begin as u64) }; 284 285 + let alive = did_rkeys.iter().flatten().count(); 286 let gone = total - alive; 287 288 + let items: Vec<_> = did_rkeys[begin..end] 289 .iter() 290 + .rev() 291 .flatten() 292 .filter(|(did, _)| *data.dids.get(did).expect("did must be in dids")) 293 .map(|(did, rkey)| RecordId { 294 did: did.clone(), 295 rkey: rkey.0.clone(), 296 collection: collection.to_string(), 297 + }) 298 + .collect(); 299 300 Ok(PagedAppendingCollection { 301 + version: (total as u64, gone as u64), 302 items, 303 + next, 304 + total: alive as u64, 305 }) 306 } 307 ··· 315 ) -> Result<PagedAppendingCollection<Did>> { 316 let data = self.0.lock().unwrap(); 317 let Some(paths) = data.targets.get(&Target::new(target)) else { 318 + return Ok(PagedAppendingCollection { 319 + version: (0, 0), 320 + items: Vec::new(), 321 + next: None, 322 + total: 0, 323 + }); 324 }; 325 let Some(did_rkeys) = paths.get(&Source::new(collection, path)) else { 326 + return Ok(PagedAppendingCollection { 327 + version: (0, 0), 328 + items: Vec::new(), 329 + next: None, 330 + total: 0, 331 + }); 332 }; 333 334 let dids: Vec<Option<Did>> = { ··· 348 .collect() 349 }; 350 351 + let total = dids.len(); 352 + let end = until 353 + .map(|u| std::cmp::min(u as usize, total)) 354 + .unwrap_or(total); 355 + let begin = end.saturating_sub(limit as usize); 356 + let next = if begin == 0 { None } else { Some(begin as u64) }; 357 358 + let alive = dids.iter().flatten().count(); 359 let gone = total - alive; 360 361 + let items: Vec<Did> = dids[begin..end] 362 .iter() 363 .rev() 364 .flatten() 365 .filter(|did| *data.dids.get(did).expect("did must be in dids")) ··· 367 .collect(); 368 369 Ok(PagedAppendingCollection { 370 + version: (total as u64, gone as u64), 371 items, 372 + next, 373 + total: alive as u64, 374 }) 375 } 376
+76 -195
constellation/src/storage/mod.rs
··· 11 #[cfg(feature = "rocks")] 12 pub use rocks_store::RocksStorage; 13 14 - /// Ordering for paginated link queries 15 - #[derive(Debug, Clone, Copy, PartialEq, Eq)] 16 - pub enum Order { 17 - /// Newest links first (default) 18 - NewestToOldest, 19 - /// Oldest links first 20 - OldestToNewest, 21 - } 22 - 23 - #[derive(Debug, Default, PartialEq)] 24 pub struct PagedAppendingCollection<T> { 25 pub version: (u64, u64), // (collection length, deleted item count) // TODO: change to (total, active)? since dedups isn't "deleted" 26 pub items: Vec<T>, ··· 28 pub total: u64, 29 } 30 31 - impl<T> PagedAppendingCollection<T> { 32 - pub(crate) fn empty() -> Self { 33 - Self { 34 - version: (0, 0), 35 - items: Vec::new(), 36 - next: None, 37 - total: 0, 38 - } 39 - } 40 - } 41 - 42 /// A paged collection whose keys are sorted instead of indexed 43 /// 44 /// this has weaker guarantees than PagedAppendingCollection: it might 45 /// return a totally consistent snapshot. but it should avoid duplicates 46 /// and each page should at least be internally consistent. 47 - #[derive(Debug, PartialEq)] 48 pub struct PagedOrderedCollection<T, K: Ord> { 49 pub items: Vec<T>, 50 pub next: Option<K>, 51 } 52 53 - impl<T, K: Ord> PagedOrderedCollection<T, K> { 54 - pub(crate) fn empty() -> Self { 55 - Self { 56 - items: Vec::new(), 57 - next: None, 58 - } 59 - } 60 - } 61 - 62 #[derive(Debug, Deserialize, Serialize, PartialEq)] 63 pub struct StorageStats { 64 /// estimate of how many accounts we've seen create links. the _subjects_ of any links are not represented here. ··· 111 112 fn get_distinct_did_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>; 113 114 - #[allow(clippy::too_many_arguments)] 115 fn get_links( 116 &self, 117 target: &str, 118 collection: &str, 119 path: &str, 120 - order: Order, 121 limit: u64, 122 until: Option<u64>, 123 filter_dids: &HashSet<Did>, ··· 211 "a.com", 212 "app.t.c", 213 ".abc.uri", 214 - Order::NewestToOldest, 215 100, 216 None, 217 &HashSet::default() 218 )?, 219 - PagedAppendingCollection::empty() 220 ); 221 assert_eq!( 222 storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 100, None)?, 223 - PagedAppendingCollection::empty() 224 ); 225 assert_eq!(storage.get_all_counts("bad-example.com")?, HashMap::new()); 226 assert_eq!( ··· 705 "a.com", 706 "app.t.c", 707 ".abc.uri", 708 - Order::NewestToOldest, 709 100, 710 None, 711 &HashSet::default() ··· 750 0, 751 )?; 752 } 753 - 754 - let sub = "a.com"; 755 - let col = "app.t.c"; 756 - let path = ".abc.uri"; 757 - let order = Order::NewestToOldest; 758 - let dids_filter = HashSet::new(); 759 - 760 - // --- --- round one! --- --- // 761 - // all backlinks 762 - let links = storage.get_links(sub, col, path, order, 2, None, &dids_filter)?; 763 assert_eq!( 764 links, 765 PagedAppendingCollection { ··· 767 items: vec![ 768 RecordId { 769 did: "did:plc:asdf-5".into(), 770 - collection: col.into(), 771 rkey: "asdf".into(), 772 }, 773 RecordId { 774 did: "did:plc:asdf-4".into(), 775 - collection: col.into(), 776 rkey: "asdf".into(), 777 }, 778 ], ··· 780 total: 5, 781 } 782 ); 783 - // distinct dids 784 - let dids = storage.get_distinct_dids(sub, col, path, 2, None)?; 785 assert_eq!( 786 dids, 787 PagedAppendingCollection { ··· 791 total: 5, 792 } 793 ); 794 - 795 - // --- --- round two! --- --- // 796 - // all backlinks 797 - let links = storage.get_links(sub, col, path, order, 2, links.next, &dids_filter)?; 798 assert_eq!( 799 links, 800 PagedAppendingCollection { ··· 802 items: vec![ 803 RecordId { 804 did: "did:plc:asdf-3".into(), 805 - collection: col.into(), 806 rkey: "asdf".into(), 807 }, 808 RecordId { 809 did: "did:plc:asdf-2".into(), 810 - collection: col.into(), 811 rkey: "asdf".into(), 812 }, 813 ], ··· 815 total: 5, 816 } 817 ); 818 - // distinct dids 819 - let dids = storage.get_distinct_dids(sub, col, path, 2, dids.next)?; 820 assert_eq!( 821 dids, 822 PagedAppendingCollection { ··· 826 total: 5, 827 } 828 ); 829 - 830 - // --- --- round three! --- --- // 831 - // all backlinks 832 - let links = storage.get_links(sub, col, path, order, 2, links.next, &dids_filter)?; 833 assert_eq!( 834 links, 835 PagedAppendingCollection { 836 version: (5, 0), 837 items: vec![RecordId { 838 did: "did:plc:asdf-1".into(), 839 - collection: col.into(), 840 rkey: "asdf".into(), 841 },], 842 next: None, 843 total: 5, 844 } 845 ); 846 - // distinct dids 847 - let dids = storage.get_distinct_dids(sub, col, path, 2, dids.next)?; 848 assert_eq!( 849 dids, 850 PagedAppendingCollection { ··· 854 total: 5, 855 } 856 ); 857 - 858 assert_stats(storage.get_stats()?, 5..=5, 1..=1, 5..=5); 859 }); 860 861 - test_each_storage!(get_links_reverse_order, |storage| { 862 - for i in 1..=5 { 863 - storage.push( 864 - &ActionableEvent::CreateLinks { 865 - record_id: RecordId { 866 - did: format!("did:plc:asdf-{i}").into(), 867 - collection: "app.t.c".into(), 868 - rkey: "asdf".into(), 869 - }, 870 - links: vec![CollectedLink { 871 - target: Link::Uri("a.com".into()), 872 - path: ".abc.uri".into(), 873 - }], 874 - }, 875 - 0, 876 - )?; 877 - } 878 - 879 - // Test OldestToNewest order (oldest first) 880 let links = storage.get_links( 881 "a.com", 882 "app.t.c", 883 ".abc.uri", 884 - Order::OldestToNewest, 885 2, 886 None, 887 - &HashSet::default(), 888 )?; 889 assert_eq!( 890 links, 891 PagedAppendingCollection { 892 - version: (5, 0), 893 - items: vec![ 894 - RecordId { 895 - did: "did:plc:asdf-1".into(), 896 - collection: "app.t.c".into(), 897 - rkey: "asdf".into(), 898 - }, 899 - RecordId { 900 - did: "did:plc:asdf-2".into(), 901 - collection: "app.t.c".into(), 902 - rkey: "asdf".into(), 903 - }, 904 - ], 905 - next: Some(3), 906 - total: 5, 907 } 908 ); 909 - // Test NewestToOldest order (newest first) 910 - let links = storage.get_links( 911 - "a.com", 912 - "app.t.c", 913 - ".abc.uri", 914 - Order::NewestToOldest, 915 - 2, 916 - None, 917 - &HashSet::default(), 918 - )?; 919 - assert_eq!( 920 - links, 921 - PagedAppendingCollection { 922 - version: (5, 0), 923 - items: vec![ 924 - RecordId { 925 - did: "did:plc:asdf-5".into(), 926 - collection: "app.t.c".into(), 927 - rkey: "asdf".into(), 928 - }, 929 - RecordId { 930 - did: "did:plc:asdf-4".into(), 931 - collection: "app.t.c".into(), 932 - rkey: "asdf".into(), 933 - }, 934 - ], 935 - next: Some(3), 936 - total: 5, 937 - } 938 - ); 939 - assert_stats(storage.get_stats()?, 5..=5, 1..=1, 5..=5); 940 - }); 941 - 942 - test_each_storage!(get_filtered_links, |storage| { 943 - let links = storage.get_links( 944 - "a.com", 945 - "app.t.c", 946 - ".abc.uri", 947 - Order::NewestToOldest, 948 - 2, 949 - None, 950 - &HashSet::from([Did("did:plc:linker".to_string())]), 951 - )?; 952 - assert_eq!(links, PagedAppendingCollection::empty()); 953 954 storage.push( 955 &ActionableEvent::CreateLinks { ··· 970 "a.com", 971 "app.t.c", 972 ".abc.uri", 973 - Order::NewestToOldest, 974 2, 975 None, 976 &HashSet::from([Did("did:plc:linker".to_string())]), ··· 993 "a.com", 994 "app.t.c", 995 ".abc.uri", 996 - Order::NewestToOldest, 997 2, 998 None, 999 &HashSet::from([Did("did:plc:someone-else".to_string())]), 1000 )?; 1001 - assert_eq!(links, PagedAppendingCollection::empty()); 1002 1003 storage.push( 1004 &ActionableEvent::CreateLinks { ··· 1033 "a.com", 1034 "app.t.c", 1035 ".abc.uri", 1036 - Order::NewestToOldest, 1037 2, 1038 None, 1039 &HashSet::from([Did("did:plc:linker".to_string())]), ··· 1063 "a.com", 1064 "app.t.c", 1065 ".abc.uri", 1066 - Order::NewestToOldest, 1067 2, 1068 None, 1069 &HashSet::from([ ··· 1096 "a.com", 1097 "app.t.c", 1098 ".abc.uri", 1099 - Order::NewestToOldest, 1100 2, 1101 None, 1102 &HashSet::from([Did("did:plc:someone-unknown".to_string())]), 1103 )?; 1104 - assert_eq!(links, PagedAppendingCollection::empty()); 1105 }); 1106 1107 test_each_storage!(get_links_exact_multiple, |storage| { ··· 1121 0, 1122 )?; 1123 } 1124 - let links = storage.get_links( 1125 - "a.com", 1126 - "app.t.c", 1127 - ".abc.uri", 1128 - Order::NewestToOldest, 1129 - 2, 1130 - None, 1131 - &HashSet::default(), 1132 - )?; 1133 assert_eq!( 1134 links, 1135 PagedAppendingCollection { ··· 1154 "a.com", 1155 "app.t.c", 1156 ".abc.uri", 1157 - Order::NewestToOldest, 1158 2, 1159 links.next, 1160 &HashSet::default(), ··· 1199 0, 1200 )?; 1201 } 1202 - let links = storage.get_links( 1203 - "a.com", 1204 - "app.t.c", 1205 - ".abc.uri", 1206 - Order::NewestToOldest, 1207 - 2, 1208 - None, 1209 - &HashSet::default(), 1210 - )?; 1211 assert_eq!( 1212 links, 1213 PagedAppendingCollection { ··· 1246 "a.com", 1247 "app.t.c", 1248 ".abc.uri", 1249 - Order::NewestToOldest, 1250 2, 1251 links.next, 1252 &HashSet::default(), ··· 1291 0, 1292 )?; 1293 } 1294 - let links = storage.get_links( 1295 - "a.com", 1296 - "app.t.c", 1297 - ".abc.uri", 1298 - Order::NewestToOldest, 1299 - 2, 1300 - None, 1301 - &HashSet::default(), 1302 - )?; 1303 assert_eq!( 1304 links, 1305 PagedAppendingCollection { ··· 1332 "a.com", 1333 "app.t.c", 1334 ".abc.uri", 1335 - Order::NewestToOldest, 1336 2, 1337 links.next, 1338 &HashSet::default(), ··· 1370 0, 1371 )?; 1372 } 1373 - let links = storage.get_links( 1374 - "a.com", 1375 - "app.t.c", 1376 - ".abc.uri", 1377 - Order::NewestToOldest, 1378 - 2, 1379 - None, 1380 - &HashSet::default(), 1381 - )?; 1382 assert_eq!( 1383 links, 1384 PagedAppendingCollection { ··· 1407 "a.com", 1408 "app.t.c", 1409 ".abc.uri", 1410 - Order::NewestToOldest, 1411 2, 1412 links.next, 1413 &HashSet::default(), ··· 1494 &HashSet::new(), 1495 &HashSet::new(), 1496 )?, 1497 - PagedOrderedCollection::empty() 1498 ); 1499 }); 1500
··· 11 #[cfg(feature = "rocks")] 12 pub use rocks_store::RocksStorage; 13 14 + #[derive(Debug, PartialEq)] 15 pub struct PagedAppendingCollection<T> { 16 pub version: (u64, u64), // (collection length, deleted item count) // TODO: change to (total, active)? since dedups isn't "deleted" 17 pub items: Vec<T>, ··· 19 pub total: u64, 20 } 21 22 /// A paged collection whose keys are sorted instead of indexed 23 /// 24 /// this has weaker guarantees than PagedAppendingCollection: it might 25 /// return a totally consistent snapshot. but it should avoid duplicates 26 /// and each page should at least be internally consistent. 27 + #[derive(Debug, PartialEq, Default)] 28 pub struct PagedOrderedCollection<T, K: Ord> { 29 pub items: Vec<T>, 30 pub next: Option<K>, 31 } 32 33 #[derive(Debug, Deserialize, Serialize, PartialEq)] 34 pub struct StorageStats { 35 /// estimate of how many accounts we've seen create links. the _subjects_ of any links are not represented here. ··· 82 83 fn get_distinct_did_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>; 84 85 fn get_links( 86 &self, 87 target: &str, 88 collection: &str, 89 path: &str, 90 limit: u64, 91 until: Option<u64>, 92 filter_dids: &HashSet<Did>, ··· 180 "a.com", 181 "app.t.c", 182 ".abc.uri", 183 100, 184 None, 185 &HashSet::default() 186 )?, 187 + PagedAppendingCollection { 188 + version: (0, 0), 189 + items: vec![], 190 + next: None, 191 + total: 0, 192 + } 193 ); 194 assert_eq!( 195 storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 100, None)?, 196 + PagedAppendingCollection { 197 + version: (0, 0), 198 + items: vec![], 199 + next: None, 200 + total: 0, 201 + } 202 ); 203 assert_eq!(storage.get_all_counts("bad-example.com")?, HashMap::new()); 204 assert_eq!( ··· 683 "a.com", 684 "app.t.c", 685 ".abc.uri", 686 100, 687 None, 688 &HashSet::default() ··· 727 0, 728 )?; 729 } 730 + let links = 731 + storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?; 732 + let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, None)?; 733 assert_eq!( 734 links, 735 PagedAppendingCollection { ··· 737 items: vec![ 738 RecordId { 739 did: "did:plc:asdf-5".into(), 740 + collection: "app.t.c".into(), 741 rkey: "asdf".into(), 742 }, 743 RecordId { 744 did: "did:plc:asdf-4".into(), 745 + collection: "app.t.c".into(), 746 rkey: "asdf".into(), 747 }, 748 ], ··· 750 total: 5, 751 } 752 ); 753 assert_eq!( 754 dids, 755 PagedAppendingCollection { ··· 759 total: 5, 760 } 761 ); 762 + let links = storage.get_links( 763 + "a.com", 764 + "app.t.c", 765 + ".abc.uri", 766 + 2, 767 + links.next, 768 + &HashSet::default(), 769 + )?; 770 + let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?; 771 assert_eq!( 772 links, 773 PagedAppendingCollection { ··· 775 items: vec![ 776 RecordId { 777 did: "did:plc:asdf-3".into(), 778 + collection: "app.t.c".into(), 779 rkey: "asdf".into(), 780 }, 781 RecordId { 782 did: "did:plc:asdf-2".into(), 783 + collection: "app.t.c".into(), 784 rkey: "asdf".into(), 785 }, 786 ], ··· 788 total: 5, 789 } 790 ); 791 assert_eq!( 792 dids, 793 PagedAppendingCollection { ··· 797 total: 5, 798 } 799 ); 800 + let links = storage.get_links( 801 + "a.com", 802 + "app.t.c", 803 + ".abc.uri", 804 + 2, 805 + links.next, 806 + &HashSet::default(), 807 + )?; 808 + let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?; 809 assert_eq!( 810 links, 811 PagedAppendingCollection { 812 version: (5, 0), 813 items: vec![RecordId { 814 did: "did:plc:asdf-1".into(), 815 + collection: "app.t.c".into(), 816 rkey: "asdf".into(), 817 },], 818 next: None, 819 total: 5, 820 } 821 ); 822 assert_eq!( 823 dids, 824 PagedAppendingCollection { ··· 828 total: 5, 829 } 830 ); 831 assert_stats(storage.get_stats()?, 5..=5, 1..=1, 5..=5); 832 }); 833 834 + test_each_storage!(get_filtered_links, |storage| { 835 let links = storage.get_links( 836 "a.com", 837 "app.t.c", 838 ".abc.uri", 839 2, 840 None, 841 + &HashSet::from([Did("did:plc:linker".to_string())]), 842 )?; 843 assert_eq!( 844 links, 845 PagedAppendingCollection { 846 + version: (0, 0), 847 + items: vec![], 848 + next: None, 849 + total: 0, 850 } 851 ); 852 853 storage.push( 854 &ActionableEvent::CreateLinks { ··· 869 "a.com", 870 "app.t.c", 871 ".abc.uri", 872 2, 873 None, 874 &HashSet::from([Did("did:plc:linker".to_string())]), ··· 891 "a.com", 892 "app.t.c", 893 ".abc.uri", 894 2, 895 None, 896 &HashSet::from([Did("did:plc:someone-else".to_string())]), 897 )?; 898 + assert_eq!( 899 + links, 900 + PagedAppendingCollection { 901 + version: (0, 0), 902 + items: vec![], 903 + next: None, 904 + total: 0, 905 + } 906 + ); 907 908 storage.push( 909 &ActionableEvent::CreateLinks { ··· 938 "a.com", 939 "app.t.c", 940 ".abc.uri", 941 2, 942 None, 943 &HashSet::from([Did("did:plc:linker".to_string())]), ··· 967 "a.com", 968 "app.t.c", 969 ".abc.uri", 970 2, 971 None, 972 &HashSet::from([ ··· 999 "a.com", 1000 "app.t.c", 1001 ".abc.uri", 1002 2, 1003 None, 1004 &HashSet::from([Did("did:plc:someone-unknown".to_string())]), 1005 )?; 1006 + assert_eq!( 1007 + links, 1008 + PagedAppendingCollection { 1009 + version: (0, 0), 1010 + items: vec![], 1011 + next: None, 1012 + total: 0, 1013 + } 1014 + ); 1015 }); 1016 1017 test_each_storage!(get_links_exact_multiple, |storage| { ··· 1031 0, 1032 )?; 1033 } 1034 + let links = 1035 + storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?; 1036 assert_eq!( 1037 links, 1038 PagedAppendingCollection { ··· 1057 "a.com", 1058 "app.t.c", 1059 ".abc.uri", 1060 2, 1061 links.next, 1062 &HashSet::default(), ··· 1101 0, 1102 )?; 1103 } 1104 + let links = 1105 + storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?; 1106 assert_eq!( 1107 links, 1108 PagedAppendingCollection { ··· 1141 "a.com", 1142 "app.t.c", 1143 ".abc.uri", 1144 2, 1145 links.next, 1146 &HashSet::default(), ··· 1185 0, 1186 )?; 1187 } 1188 + let links = 1189 + storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?; 1190 assert_eq!( 1191 links, 1192 PagedAppendingCollection { ··· 1219 "a.com", 1220 "app.t.c", 1221 ".abc.uri", 1222 2, 1223 links.next, 1224 &HashSet::default(), ··· 1256 0, 1257 )?; 1258 } 1259 + let links = 1260 + storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?; 1261 assert_eq!( 1262 links, 1263 PagedAppendingCollection { ··· 1286 "a.com", 1287 "app.t.c", 1288 ".abc.uri", 1289 2, 1290 links.next, 1291 &HashSet::default(), ··· 1372 &HashSet::new(), 1373 &HashSet::new(), 1374 )?, 1375 + PagedOrderedCollection { 1376 + items: vec![], 1377 + next: None, 1378 + } 1379 ); 1380 }); 1381
+25 -41
constellation/src/storage/rocks_store.rs
··· 1 use super::{ 2 - ActionableEvent, LinkReader, LinkStorage, Order, PagedAppendingCollection, 3 - PagedOrderedCollection, StorageStats, 4 }; 5 use crate::{CountsByCount, Did, RecordId}; 6 use anyhow::{bail, Result}; ··· 960 961 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 962 eprintln!("nothin doin for this target, {target_key:?}"); 963 - return Ok(PagedOrderedCollection::empty()); 964 }; 965 966 let filter_did_ids: HashMap<DidId, bool> = filter_dids ··· 1127 target: &str, 1128 collection: &str, 1129 path: &str, 1130 - order: Order, 1131 limit: u64, 1132 until: Option<u64>, 1133 filter_dids: &HashSet<Did>, ··· 1139 ); 1140 1141 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 1142 - return Ok(PagedAppendingCollection::empty()); 1143 }; 1144 1145 let mut linkers = self.get_target_linkers(&target_id)?; ··· 1163 1164 let (alive, gone) = linkers.count(); 1165 let total = alive + gone; 1166 - 1167 - let (start, take, next_until) = match order { 1168 - // OldestToNewest: start from the beginning, paginate forward 1169 - Order::OldestToNewest => { 1170 - let start = until.unwrap_or(0); 1171 - let next = start + limit + 1; 1172 - let next_until = if next < total { Some(next) } else { None }; 1173 - (start, limit, next_until) 1174 - } 1175 - // NewestToOldest: start from the end, paginate backward 1176 - Order::NewestToOldest => { 1177 - let until = until.unwrap_or(total); 1178 - match until.checked_sub(limit) { 1179 - Some(s) if s > 0 => (s, limit, Some(s)), 1180 - Some(s) => (s, limit, None), 1181 - None => (0, until, None), 1182 - } 1183 - } 1184 - }; 1185 1186 - let did_id_rkeys = linkers.0.iter().skip(start as usize).take(take as usize); 1187 - let did_id_rkeys: Vec<_> = match order { 1188 - Order::OldestToNewest => did_id_rkeys.collect(), 1189 - Order::NewestToOldest => did_id_rkeys.rev().collect(), 1190 - }; 1191 1192 let mut items = Vec::with_capacity(did_id_rkeys.len()); 1193 // TODO: use get-many (or multi-get or whatever it's called) ··· 1217 Ok(PagedAppendingCollection { 1218 version: (total, gone), 1219 items, 1220 - next: next_until, 1221 total: alive, 1222 }) 1223 } ··· 1237 ); 1238 1239 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 1240 - return Ok(PagedAppendingCollection::empty()); 1241 }; 1242 1243 let linkers = self.get_distinct_target_linkers(&target_id)?; 1244 1245 let (alive, gone) = linkers.count(); 1246 let total = alive + gone; 1247 1248 - let until = until.unwrap_or(total); 1249 - let (start, take, next_until) = match until.checked_sub(limit) { 1250 - Some(s) if s > 0 => (s, limit, Some(s)), 1251 - Some(s) => (s, limit, None), 1252 - None => (0, until, None), 1253 - }; 1254 - 1255 - let did_id_rkeys = linkers.0.iter().skip(start as usize).take(take as usize); 1256 - let did_id_rkeys: Vec<_> = did_id_rkeys.rev().collect(); 1257 1258 let mut items = Vec::with_capacity(did_id_rkeys.len()); 1259 // TODO: use get-many (or multi-get or whatever it's called) ··· 1279 Ok(PagedAppendingCollection { 1280 version: (total, gone), 1281 items, 1282 - next: next_until, 1283 total: alive, 1284 }) 1285 }
··· 1 use super::{ 2 + ActionableEvent, LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection, 3 + StorageStats, 4 }; 5 use crate::{CountsByCount, Did, RecordId}; 6 use anyhow::{bail, Result}; ··· 960 961 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 962 eprintln!("nothin doin for this target, {target_key:?}"); 963 + return Ok(Default::default()); 964 }; 965 966 let filter_did_ids: HashMap<DidId, bool> = filter_dids ··· 1127 target: &str, 1128 collection: &str, 1129 path: &str, 1130 limit: u64, 1131 until: Option<u64>, 1132 filter_dids: &HashSet<Did>, ··· 1138 ); 1139 1140 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 1141 + return Ok(PagedAppendingCollection { 1142 + version: (0, 0), 1143 + items: Vec::new(), 1144 + next: None, 1145 + total: 0, 1146 + }); 1147 }; 1148 1149 let mut linkers = self.get_target_linkers(&target_id)?; ··· 1167 1168 let (alive, gone) = linkers.count(); 1169 let total = alive + gone; 1170 + let end = until.map(|u| std::cmp::min(u, total)).unwrap_or(total) as usize; 1171 + let begin = end.saturating_sub(limit as usize); 1172 + let next = if begin == 0 { None } else { Some(begin as u64) }; 1173 1174 + let did_id_rkeys = linkers.0[begin..end].iter().rev().collect::<Vec<_>>(); 1175 1176 let mut items = Vec::with_capacity(did_id_rkeys.len()); 1177 // TODO: use get-many (or multi-get or whatever it's called) ··· 1201 Ok(PagedAppendingCollection { 1202 version: (total, gone), 1203 items, 1204 + next, 1205 total: alive, 1206 }) 1207 } ··· 1221 ); 1222 1223 let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else { 1224 + return Ok(PagedAppendingCollection { 1225 + version: (0, 0), 1226 + items: Vec::new(), 1227 + next: None, 1228 + total: 0, 1229 + }); 1230 }; 1231 1232 let linkers = self.get_distinct_target_linkers(&target_id)?; 1233 1234 let (alive, gone) = linkers.count(); 1235 let total = alive + gone; 1236 + let end = until.map(|u| std::cmp::min(u, total)).unwrap_or(total) as usize; 1237 + let begin = end.saturating_sub(limit as usize); 1238 + let next = if begin == 0 { None } else { Some(begin as u64) }; 1239 1240 + let did_id_rkeys = linkers.0[begin..end].iter().rev().collect::<Vec<_>>(); 1241 1242 let mut items = Vec::with_capacity(did_id_rkeys.len()); 1243 // TODO: use get-many (or multi-get or whatever it's called) ··· 1263 Ok(PagedAppendingCollection { 1264 version: (total, gone), 1265 items, 1266 + next, 1267 total: alive, 1268 }) 1269 }
-4
constellation/templates/base.html.j2
··· 40 padding: 0.5em 0.3em; 41 max-width: 100%; 42 } 43 - pre.code input { 44 - margin: 0; 45 - padding: 0; 46 - } 47 .stat { 48 color: #f90; 49 font-size: 1.618rem;
··· 40 padding: 0.5em 0.3em; 41 max-width: 100%; 42 } 43 .stat { 44 color: #f90; 45 font-size: 1.618rem;
+1 -2
constellation/templates/get-backlinks.html.j2
··· 6 7 {% block content %} 8 9 - {% call try_it::get_backlinks(query.subject, query.source, query.did, query.limit, query.reverse) %} 10 11 <h2> 12 Links to <code>{{ query.subject }}</code> ··· 40 <input type="hidden" name="did" value="{{ did }}" /> 41 {% endfor %} 42 <input type="hidden" name="cursor" value={{ c|json|safe }} /> 43 - <input type="hidden" name="reverse" value="{{ query.reverse }}"> 44 <button type="submit">next page&hellip;</button> 45 </form> 46 {% else %}
··· 6 7 {% block content %} 8 9 + {% call try_it::get_backlinks(query.subject, query.source, query.did, query.limit) %} 10 11 <h2> 12 Links to <code>{{ query.subject }}</code> ··· 40 <input type="hidden" name="did" value="{{ did }}" /> 41 {% endfor %} 42 <input type="hidden" name="cursor" value={{ c|json|safe }} /> 43 <button type="submit">next page&hellip;</button> 44 </form> 45 {% else %}
+38
lexicons/blue.microcosm/links/getBacklinksCount.json
···
··· 1 + { 2 + "lexicon": 1, 3 + "id": "blue.microcosm.links.getBacklinksCount", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "description": "count records that link to another record", 8 + "parameters": { 9 + "type": "params", 10 + "required": ["subject", "source"], 11 + "properties": { 12 + "subject": { 13 + "type": "string", 14 + "format": "uri", 15 + "description": "the primary target being linked to (at-uri, did, or uri)" 16 + }, 17 + "source": { 18 + "type": "string", 19 + "description": "collection and path specification for the primary link" 20 + } 21 + } 22 + }, 23 + "output": { 24 + "encoding": "application/json", 25 + "schema": { 26 + "type": "object", 27 + "required": ["total"], 28 + "properties": { 29 + "total": { 30 + "type": "integer", 31 + "description": "total number of matching links" 32 + } 33 + } 34 + } 35 + } 36 + } 37 + } 38 + }

History

8 rounds 13 comments
sign up or login to add to the discussion
8 commits
expand
Add getCounts XRPC equivalent to REST /links/count
Modify backlinks counting XRPC endpoint name
Mark /links/count REST endpoint as deprecated
Remove .uri suffix
Reformat existing lexicons
Remove wrongly commited getManyToMany lexicon
Fix Git whitespace error in "hello" template
Format .prettierrc and fix Git whitespace error
expand 3 comments

I'm giving up on the whitespace issue...

max@max-mbpro ~/dev/microcosm-rs (xrpc_backlinks_count) $ git diff --check upstream/main
max@max-mbpro ~/dev/microcosm-rs (xrpc_backlinks_count) $

git-diff --checked doesn't indicate any errors either.

As far as I can tell this seems to be a confirmed Tangled issue

happy to do merging locally as needed!

merged. thanks!

closed without merging
7 commits
expand
Add getCounts XRPC equivalent to REST /links/count
Modify backlinks counting XRPC endpoint name
Mark /links/count REST endpoint as deprecated
Remove .uri suffix
Reformat existing lexicons
Remove wrongly commited getManyToMany lexicon
Fix Git whitespace error in "hello" template
expand 0 comments
6 commits
expand
Add getCounts XRPC equivalent to REST /links/count
Modify backlinks counting XRPC endpoint name
Mark /links/count REST endpoint as deprecated
Remove .uri suffix
Reformat existing lexicons
Remove wrongly commited getManyToMany lexicon
expand 0 comments
3 commits
expand
Add getCounts XRPC equivalent to REST /links/count
Modify backlinks counting XRPC endpoint name
Mark /links/count REST endpoint as deprecated
expand 4 comments

Tangled somehow complains about merge conflicts here, but I couldn't find any after rebasing on upstream/main again?!

very weird!

one tiny thing left: the source for blocks is app.bsky.graph.block:subject (no .uri suffix on the path) -- it's in the hello.html template.

i think some many-to-many order stuff ended up on this branch but i'm too tired for git rn.

if you don't get to it first i'm happy to fix the source and git stuff and merge when i can get to it :)

Sorry about the the m2m stuff that I didn't catch before opening the PR. I did clean this up again, and nothing belonging there should be remaining here; Addresses your above comment regarding the .uri suffix as well.

3 commits
expand
Add getCounts XRPC equivalent to REST /links/count
Modify backlinks counting XRPC endpoint name
Mark /links/count REST endpoint as deprecated
expand 0 comments
1 commit
expand
Add getCounts XRPC equivalent to REST /links/count
expand 6 comments

Had to do some rebasing and cleanup, but good to go now I think.

sweet!

(btw the local/ gitignore is there for doing local/rocks.test, but i'm fine adding it to the top-level gitignore too!)

i think links.getCounts is a little too broad -- what do you think of links.getBacklinksCount?

we also throw a deprecated warning under /links/count on the main page template, matching the one for /links.

it's weird but the whitespace in the forms in try-it-macros is significant (one of my moments of knowingly doing things the wrong way because i was bored, sorry!)

the new form for this endpoint needs little tweaking to match the others.

Ah got it. The rocks tests ended up cluttering the top-level directory so I just added them to .gitignore. I often keep a .local file in .gitignores to ignore whatever's supposed to stay in one's local copy and somehow assumed this had the same intent.

Changed the function and endpoint/method name as requested to get_backlinks_count/getBacklinksCount

The endpoint in the try-it-macro should now match the other existing ones down to how they're formatted with whitespace

3 commits
expand
Make metrics collection opt-in
Increase constellation response limits
Add getCounts XRPC equivalent to REST /links/count
expand 0 comments
7 commits
expand
Make metrics collection opt-in
Increase constellation response limits
wip: m2m
Add tests for new get_many_to_many query handler
Fix get_m2m_empty test
Replace tuple with RecordsBySubject struct
Add getCounts XRPC equivalent to REST /links/count
expand 0 comments