···99use jacquard_common::types::crypto::PublicKey;
1010use jacquard_common::types::string::{Datetime, Did, Tid};
1111use jacquard_common::{CowStr, IntoStatic};
1212+use smol_str::ToSmolStr;
12131314/// Firehose commit message (sync v1.0 and v1.1)
1415///
···114115 pub prev: Option<CidLink<'a>>,
115116}
116117118118+impl<'a> RepoOp<'a> {
119119+ /// Convert to VerifiedWriteOp for v1.1 validation
120120+ ///
121121+ /// Validates that all required fields are present for inversion.
122122+ pub fn to_invertible_op(&self) -> Result<VerifiedWriteOp> {
123123+ let key = self.path.to_smolstr();
124124+125125+ match self.action.as_ref() {
126126+ "create" => {
127127+ let cid = self
128128+ .cid
129129+ .as_ref()
130130+ .ok_or_else(|| RepoError::invalid_commit("create operation missing cid field"))?
131131+ .to_ipld()
132132+ .map_err(|e| RepoError::invalid_cid_conversion(e, "create cid"))?;
133133+134134+ Ok(VerifiedWriteOp::Create { key, cid })
135135+ }
136136+ "update" => {
137137+ let cid = self
138138+ .cid
139139+ .as_ref()
140140+ .ok_or_else(|| RepoError::invalid_commit("update operation missing cid field"))?
141141+ .to_ipld()
142142+ .map_err(|e| RepoError::invalid_cid_conversion(e, "update cid"))?;
143143+144144+ let prev = self
145145+ .prev
146146+ .as_ref()
147147+ .ok_or_else(|| {
148148+ RepoError::invalid_commit(
149149+ "update operation missing prev field for v1.1 validation",
150150+ )
151151+ })?
152152+ .to_ipld()
153153+ .map_err(|e| RepoError::invalid_cid_conversion(e, "update prev"))?;
154154+155155+ Ok(VerifiedWriteOp::Update { key, cid, prev })
156156+ }
157157+ "delete" => {
158158+ let prev = self
159159+ .prev
160160+ .as_ref()
161161+ .ok_or_else(|| {
162162+ RepoError::invalid_commit(
163163+ "delete operation missing prev field for v1.1 validation",
164164+ )
165165+ })?
166166+ .to_ipld()
167167+ .map_err(|e| RepoError::invalid_cid_conversion(e, "delete prev"))?;
168168+169169+ Ok(VerifiedWriteOp::Delete { key, prev })
170170+ }
171171+ action => Err(RepoError::invalid_commit(format!(
172172+ "unknown action type: {}",
173173+ action
174174+ ))),
175175+ }
176176+ }
177177+}
178178+117179impl IntoStatic for FirehoseCommit<'_> {
118180 type Output = FirehoseCommit<'static>;
119181···153215///
154216/// These functions validate commits from the `com.atproto.sync.subscribeRepos` firehose.
155217use crate::error::{RepoError, Result};
156156-use crate::mst::Mst;
218218+use crate::mst::{Mst, VerifiedWriteOp};
157219use crate::storage::{BlockStore, LayeredBlockStore, MemoryBlockStore};
158220use cid::Cid as IpldCid;
159221use std::sync::Arc;
···302364 // Verify signature
303365 commit.verify(pubkey)?;
304366305305- // 4. Load previous MST from prev_data (all blocks should be in temp_storage)
306306- let prev_mst = Mst::load(temp_storage.clone(), prev_data_cid, None);
307307-308367 // 5. Load new MST from commit.data (claimed result)
309368 let expected_root = *commit.data();
310310- let new_mst = Mst::load(temp_storage, expected_root, None);
369369+ let mut new_mst = Mst::load(temp_storage, expected_root, None);
311370312312- // 6. Compute diff to get verified write ops (with actual prev values from tree state)
313313- let diff = prev_mst.diff(&new_mst).await?;
314314- let verified_ops = diff.to_verified_ops();
315315-316316- // 7. Apply verified ops to prev MST
317317- let computed_mst = prev_mst.batch(&verified_ops).await?;
371371+ let verified_ops = self
372372+ .ops
373373+ .iter()
374374+ .filter_map(|op| op.to_invertible_op().ok())
375375+ .collect::<Vec<_>>();
376376+ if verified_ops.len() != self.ops.len() {
377377+ return Err(RepoError::invalid_commit(format!(
378378+ "Invalid commit: expected {} ops, got {}",
379379+ self.ops.len(),
380380+ verified_ops.len()
381381+ )));
382382+ }
318383319319- // 8. Verify computed result matches claimed result
320320- let computed_root = computed_mst.get_pointer().await?;
384384+ for op in verified_ops {
385385+ if let Ok(inverted) = new_mst.invert_op(op.clone()).await {
386386+ if !inverted {
387387+ return Err(RepoError::invalid_commit(format!(
388388+ "Invalid commit: op {:?} is not invertible",
389389+ op
390390+ )));
391391+ }
392392+ }
393393+ }
394394+ // 8. Verify computed previous state matches claimed previous state
395395+ let computed_root = new_mst.get_pointer().await?;
321396322322- if computed_root != expected_root {
397397+ if computed_root != prev_data_cid {
323398 return Err(RepoError::cid_mismatch(format!(
324399 "MST root mismatch: expected {}, got {}",
325325- expected_root, computed_root
400400+ prev_data_cid, computed_root
326401 )));
327402 }
328403329404 Ok(expected_root)
330405 }
331406}
407407+408408+#[cfg(test)]
409409+mod tests {
410410+ use super::*;
411411+ use crate::commit::{Commit, SigningKey as _};
412412+ use crate::mst::{Mst, RecordWriteOp};
413413+ use crate::storage::MemoryBlockStore;
414414+ use crate::{CommitData, Repository};
415415+ use jacquard_common::types::crypto::{KeyCodec, PublicKey};
416416+ use jacquard_common::types::recordkey::Rkey;
417417+ use jacquard_common::types::string::{Nsid, RecordKey};
418418+ use jacquard_common::types::tid::Ticker;
419419+ use jacquard_common::types::value::RawData;
420420+ use smol_str::SmolStr;
421421+ use std::collections::BTreeMap;
422422+423423+ fn make_test_record(n: u32) -> BTreeMap<SmolStr, RawData<'static>> {
424424+ let mut record = BTreeMap::new();
425425+ record.insert(
426426+ SmolStr::new("$type"),
427427+ RawData::String("app.bsky.feed.post".into()),
428428+ );
429429+ record.insert(
430430+ SmolStr::new("text"),
431431+ RawData::String(format!("Test post #{}", n).into()),
432432+ );
433433+ record.insert(
434434+ SmolStr::new("createdAt"),
435435+ RawData::String("2024-01-01T00:00:00Z".to_string().into()),
436436+ );
437437+ record
438438+ }
439439+440440+ async fn create_test_repo(storage: Arc<MemoryBlockStore>) -> Repository<MemoryBlockStore> {
441441+ let did = Did::new("did:plc:test").unwrap();
442442+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
443443+444444+ let mst = Mst::new(storage.clone());
445445+ let data = mst.persist().await.unwrap();
446446+447447+ let rev = Ticker::new().next(None);
448448+ let commit = Commit::new_unsigned(did.into_static(), data, rev, None)
449449+ .sign(&signing_key)
450450+ .unwrap();
451451+452452+ let commit_cbor = commit.to_cbor().unwrap();
453453+ let commit_cid = storage.put(&commit_cbor).await.unwrap();
454454+455455+ Repository::new(storage, mst, commit.into_static(), commit_cid)
456456+ }
457457+458458+ fn get_public_key(signing_key: &k256::ecdsa::SigningKey) -> PublicKey<'static> {
459459+ let verifying_key = signing_key.verifying_key();
460460+ let pubkey_bytes = verifying_key.to_encoded_point(true).as_bytes().to_vec();
461461+ PublicKey {
462462+ codec: KeyCodec::Secp256k1,
463463+ bytes: pubkey_bytes.into(),
464464+ }
465465+ }
466466+467467+ #[tokio::test]
468468+ async fn test_valid_v1_1_commit_roundtrip() {
469469+ let storage = Arc::new(MemoryBlockStore::new());
470470+ let mut repo = create_test_repo(storage.clone()).await;
471471+472472+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
473473+ let rkey = RecordKey(Rkey::new("test1").unwrap());
474474+475475+ let did = Did::new("did:plc:test").unwrap();
476476+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
477477+ let pubkey = get_public_key(&signing_key);
478478+479479+ // Create operation
480480+ let ops = vec![RecordWriteOp::Create {
481481+ collection: collection.clone(),
482482+ rkey: rkey.clone(),
483483+ record: make_test_record(1),
484484+ }];
485485+486486+ let (repo_ops, commit_data) = repo
487487+ .create_commit(
488488+ &ops,
489489+ &did,
490490+ Some(repo.current_commit_cid().clone()),
491491+ &signing_key,
492492+ )
493493+ .await
494494+ .unwrap();
495495+496496+ // Convert to firehose commit (v1.1 includes prev_data)
497497+ let firehose_commit = commit_data
498498+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
499499+ .await
500500+ .unwrap();
501501+502502+ // Validate using v1.1 validation
503503+ let result = firehose_commit.validate_v1_1(&pubkey).await;
504504+ if let Err(ref e) = result {
505505+ eprintln!("Validation error: {}", e);
506506+ }
507507+ assert!(result.is_ok(), "Valid v1.1 commit should pass validation");
508508+509509+ let validated_root = result.unwrap();
510510+ assert_eq!(
511511+ validated_root, commit_data.data,
512512+ "Validated root should match commit data root"
513513+ );
514514+ }
515515+516516+ #[tokio::test]
517517+ async fn test_valid_v1_0_commit_with_prev_storage() {
518518+ let storage = Arc::new(MemoryBlockStore::new());
519519+ let mut repo = create_test_repo(storage.clone()).await;
520520+521521+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
522522+ let rkey = RecordKey(Rkey::new("test1").unwrap());
523523+524524+ let did = Did::new("did:plc:test").unwrap();
525525+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
526526+ let pubkey = get_public_key(&signing_key);
527527+528528+ let prev_root = *repo.current_commit().data();
529529+530530+ // Create operation
531531+ let ops = vec![RecordWriteOp::Create {
532532+ collection: collection.clone(),
533533+ rkey: rkey.clone(),
534534+ record: make_test_record(1),
535535+ }];
536536+537537+ let (repo_ops, commit_data) = repo
538538+ .create_commit(
539539+ &ops,
540540+ &did,
541541+ Some(repo.current_commit_cid().clone()),
542542+ &signing_key,
543543+ )
544544+ .await
545545+ .unwrap();
546546+547547+ // For v1.0, we strip prev_data
548548+ let mut firehose_commit = commit_data
549549+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
550550+ .await
551551+ .unwrap();
552552+553553+ firehose_commit.prev_data = None;
554554+555555+ // Validate using v1.0 validation with previous storage
556556+ let result = firehose_commit
557557+ .validate_v1_0(Some(prev_root), storage.clone(), &pubkey)
558558+ .await;
559559+560560+ assert!(result.is_ok(), "Valid v1.0 commit should pass validation");
561561+562562+ let validated_root = result.unwrap();
563563+ assert_eq!(
564564+ validated_root, commit_data.data,
565565+ "Validated root should match commit data root"
566566+ );
567567+ }
568568+569569+ #[tokio::test]
570570+ async fn test_multiple_operations_roundtrip() {
571571+ let storage = Arc::new(MemoryBlockStore::new());
572572+ let mut repo = create_test_repo(storage.clone()).await;
573573+574574+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
575575+ let did = Did::new("did:plc:test").unwrap();
576576+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
577577+ let pubkey = get_public_key(&signing_key);
578578+579579+ // First commit: create two records
580580+ let ops1 = vec![
581581+ RecordWriteOp::Create {
582582+ collection: collection.clone(),
583583+ rkey: RecordKey(Rkey::new("post1").unwrap()),
584584+ record: make_test_record(1),
585585+ },
586586+ RecordWriteOp::Create {
587587+ collection: collection.clone(),
588588+ rkey: RecordKey(Rkey::new("post2").unwrap()),
589589+ record: make_test_record(2),
590590+ },
591591+ ];
592592+593593+ let (repo_ops, commit_data) = repo
594594+ .create_commit(
595595+ &ops1,
596596+ &did,
597597+ Some(repo.current_commit_cid().clone()),
598598+ &signing_key,
599599+ )
600600+ .await
601601+ .unwrap();
602602+603603+ let firehose_commit = commit_data
604604+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
605605+ .await
606606+ .unwrap();
607607+608608+ let result = firehose_commit.validate_v1_1(&pubkey).await;
609609+ assert!(result.is_ok(), "Multiple creates should validate");
610610+ }
611611+612612+ #[tokio::test]
613613+ async fn test_update_and_delete_operations_roundtrip() {
614614+ let storage = Arc::new(MemoryBlockStore::new());
615615+ let mut repo = create_test_repo(storage.clone()).await;
616616+617617+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
618618+ let did = Did::new("did:plc:test").unwrap();
619619+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
620620+ let pubkey = get_public_key(&signing_key);
621621+622622+ // First: create records
623623+ let rkey1 = RecordKey(Rkey::new("post1").unwrap());
624624+ let rkey2 = RecordKey(Rkey::new("post2").unwrap());
625625+626626+ let create_ops = vec![
627627+ RecordWriteOp::Create {
628628+ collection: collection.clone(),
629629+ rkey: rkey1.clone(),
630630+ record: make_test_record(1),
631631+ },
632632+ RecordWriteOp::Create {
633633+ collection: collection.clone(),
634634+ rkey: rkey2.clone(),
635635+ record: make_test_record(2),
636636+ },
637637+ ];
638638+639639+ let (_, commit_data) = repo
640640+ .create_commit(
641641+ &create_ops,
642642+ &did,
643643+ Some(repo.current_commit_cid().clone()),
644644+ &signing_key,
645645+ )
646646+ .await
647647+ .unwrap();
648648+649649+ repo.apply_commit(commit_data).await.unwrap();
650650+651651+ // Second: update one, delete the other
652652+ let update_ops = vec![
653653+ RecordWriteOp::Update {
654654+ collection: collection.clone(),
655655+ rkey: rkey1.clone(),
656656+ record: make_test_record(10),
657657+ prev: None,
658658+ },
659659+ RecordWriteOp::Delete {
660660+ collection: collection.clone(),
661661+ rkey: rkey2.clone(),
662662+ prev: None,
663663+ },
664664+ ];
665665+666666+ let (repo_ops, commit_data) = repo
667667+ .create_commit(
668668+ &update_ops,
669669+ &did,
670670+ Some(repo.current_commit_cid().clone()),
671671+ &signing_key,
672672+ )
673673+ .await
674674+ .unwrap();
675675+676676+ let firehose_commit = commit_data
677677+ .to_firehose_commit(&did, 2, Datetime::now(), repo_ops, vec![])
678678+ .await
679679+ .unwrap();
680680+681681+ let result = firehose_commit.validate_v1_1(&pubkey).await;
682682+ assert!(
683683+ result.is_ok(),
684684+ "Update and delete operations should validate"
685685+ );
686686+ }
687687+688688+ #[tokio::test]
689689+ async fn test_missing_commit_block_fails() {
690690+ let storage = Arc::new(MemoryBlockStore::new());
691691+ let mut repo = create_test_repo(storage.clone()).await;
692692+693693+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
694694+ let did = Did::new("did:plc:test").unwrap();
695695+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
696696+ let pubkey = get_public_key(&signing_key);
697697+698698+ let ops = vec![RecordWriteOp::Create {
699699+ collection: collection.clone(),
700700+ rkey: RecordKey(Rkey::new("test1").unwrap()),
701701+ record: make_test_record(1),
702702+ }];
703703+704704+ let (repo_ops, commit_data) = repo
705705+ .create_commit(
706706+ &ops,
707707+ &did,
708708+ Some(repo.current_commit_cid().clone()),
709709+ &signing_key,
710710+ )
711711+ .await
712712+ .unwrap();
713713+714714+ let mut firehose_commit = commit_data
715715+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
716716+ .await
717717+ .unwrap();
718718+719719+ // Parse CAR and remove commit block
720720+ let parsed = parse_car_bytes(&firehose_commit.blocks).await.unwrap();
721721+ let commit_cid: IpldCid = firehose_commit.commit.to_ipld().unwrap();
722722+723723+ let mut blocks_without_commit: BTreeMap<IpldCid, bytes::Bytes> = parsed
724724+ .blocks
725725+ .into_iter()
726726+ .filter(|(cid, _)| cid != &commit_cid)
727727+ .collect();
728728+729729+ // Rebuild CAR without commit block
730730+ let bad_car = crate::car::write_car_bytes(commit_cid, blocks_without_commit)
731731+ .await
732732+ .unwrap();
733733+734734+ firehose_commit.blocks = bad_car.into();
735735+736736+ let result = firehose_commit.validate_v1_1(&pubkey).await;
737737+ assert!(
738738+ result.is_err(),
739739+ "Validation should fail when commit block is missing"
740740+ );
741741+ }
742742+743743+ #[tokio::test]
744744+ async fn test_missing_mst_blocks_fails() {
745745+ let storage = Arc::new(MemoryBlockStore::new());
746746+ let mut repo = create_test_repo(storage.clone()).await;
747747+748748+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
749749+ let did = Did::new("did:plc:test").unwrap();
750750+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
751751+ let pubkey = get_public_key(&signing_key);
752752+753753+ // Create multiple records to ensure MST has nodes
754754+ let ops = vec![
755755+ RecordWriteOp::Create {
756756+ collection: collection.clone(),
757757+ rkey: RecordKey(Rkey::new("aaa").unwrap()),
758758+ record: make_test_record(1),
759759+ },
760760+ RecordWriteOp::Create {
761761+ collection: collection.clone(),
762762+ rkey: RecordKey(Rkey::new("zzz").unwrap()),
763763+ record: make_test_record(2),
764764+ },
765765+ ];
766766+767767+ let (repo_ops, commit_data) = repo
768768+ .create_commit(
769769+ &ops,
770770+ &did,
771771+ Some(repo.current_commit_cid().clone()),
772772+ &signing_key,
773773+ )
774774+ .await
775775+ .unwrap();
776776+777777+ let mut firehose_commit = commit_data
778778+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
779779+ .await
780780+ .unwrap();
781781+782782+ // Parse CAR and keep only commit block (remove MST nodes)
783783+ let parsed = parse_car_bytes(&firehose_commit.blocks).await.unwrap();
784784+ let commit_cid: IpldCid = firehose_commit.commit.to_ipld().unwrap();
785785+786786+ let blocks_commit_only: BTreeMap<IpldCid, bytes::Bytes> = parsed
787787+ .blocks
788788+ .into_iter()
789789+ .filter(|(cid, _)| cid == &commit_cid)
790790+ .collect();
791791+792792+ let bad_car = crate::car::write_car_bytes(commit_cid, blocks_commit_only)
793793+ .await
794794+ .unwrap();
795795+796796+ firehose_commit.blocks = bad_car.into();
797797+798798+ let result = firehose_commit.validate_v1_1(&pubkey).await;
799799+ assert!(
800800+ result.is_err(),
801801+ "Validation should fail when MST blocks are missing"
802802+ );
803803+ }
804804+805805+ #[tokio::test]
806806+ async fn test_wrong_mst_root_in_commit_fails() {
807807+ let storage = Arc::new(MemoryBlockStore::new());
808808+ let mut repo = create_test_repo(storage.clone()).await;
809809+810810+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
811811+ let did = Did::new("did:plc:test").unwrap();
812812+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
813813+ let pubkey = get_public_key(&signing_key);
814814+815815+ let ops = vec![RecordWriteOp::Create {
816816+ collection: collection.clone(),
817817+ rkey: RecordKey(Rkey::new("test1").unwrap()),
818818+ record: make_test_record(1),
819819+ }];
820820+821821+ let (repo_ops, mut commit_data) = repo
822822+ .create_commit(
823823+ &ops,
824824+ &did,
825825+ Some(repo.current_commit_cid().clone()),
826826+ &signing_key,
827827+ )
828828+ .await
829829+ .unwrap();
830830+831831+ // Create a fake commit with wrong data root
832832+ use crate::mst::util::compute_cid;
833833+ let wrong_root = compute_cid(&[1, 2, 3, 4]).unwrap();
834834+835835+ let fake_commit = Commit::new_unsigned(
836836+ did.clone().into_static(),
837837+ wrong_root,
838838+ commit_data.rev.clone(),
839839+ commit_data.prev,
840840+ )
841841+ .sign(&signing_key)
842842+ .unwrap();
843843+844844+ let fake_commit_cbor = fake_commit.to_cbor().unwrap();
845845+ let fake_commit_cid = compute_cid(&fake_commit_cbor).unwrap();
846846+847847+ // Replace commit block in blocks
848848+ commit_data.blocks.remove(&commit_data.cid);
849849+ commit_data
850850+ .blocks
851851+ .insert(fake_commit_cid, bytes::Bytes::from(fake_commit_cbor));
852852+ commit_data.cid = fake_commit_cid;
853853+854854+ let mut firehose_commit = commit_data
855855+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
856856+ .await
857857+ .unwrap();
858858+859859+ let result = firehose_commit.validate_v1_1(&pubkey).await;
860860+ assert!(
861861+ result.is_err(),
862862+ "Validation should fail when commit has wrong MST root"
863863+ );
864864+ }
865865+866866+ #[tokio::test]
867867+ async fn test_mismatched_did_fails() {
868868+ let storage = Arc::new(MemoryBlockStore::new());
869869+ let mut repo = create_test_repo(storage.clone()).await;
870870+871871+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
872872+ let did = Did::new("did:plc:test").unwrap();
873873+ let wrong_did = Did::new("did:plc:wrong").unwrap();
874874+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
875875+ let pubkey = get_public_key(&signing_key);
876876+877877+ let ops = vec![RecordWriteOp::Create {
878878+ collection: collection.clone(),
879879+ rkey: RecordKey(Rkey::new("test1").unwrap()),
880880+ record: make_test_record(1),
881881+ }];
882882+883883+ let (repo_ops, commit_data) = repo
884884+ .create_commit(
885885+ &ops,
886886+ &did,
887887+ Some(repo.current_commit_cid().clone()),
888888+ &signing_key,
889889+ )
890890+ .await
891891+ .unwrap();
892892+893893+ // Create firehose commit with wrong DID
894894+ let mut firehose_commit = commit_data
895895+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
896896+ .await
897897+ .unwrap();
898898+899899+ firehose_commit.repo = wrong_did;
900900+901901+ let result = firehose_commit.validate_v1_1(&pubkey).await;
902902+ assert!(
903903+ result.is_err(),
904904+ "Validation should fail with mismatched DID"
905905+ );
906906+907907+ let err_msg = result.unwrap_err().to_string();
908908+ assert!(
909909+ err_msg.contains("DID mismatch"),
910910+ "Error should mention DID mismatch"
911911+ );
912912+ }
913913+914914+ #[tokio::test]
915915+ async fn test_invalid_signature_fails() {
916916+ let storage = Arc::new(MemoryBlockStore::new());
917917+ let mut repo = create_test_repo(storage.clone()).await;
918918+919919+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
920920+ let did = Did::new("did:plc:test").unwrap();
921921+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
922922+923923+ // Use a different key for verification
924924+ let wrong_signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
925925+ let wrong_pubkey = get_public_key(&wrong_signing_key);
926926+927927+ let ops = vec![RecordWriteOp::Create {
928928+ collection: collection.clone(),
929929+ rkey: RecordKey(Rkey::new("test1").unwrap()),
930930+ record: make_test_record(1),
931931+ }];
932932+933933+ let (repo_ops, commit_data) = repo
934934+ .create_commit(
935935+ &ops,
936936+ &did,
937937+ Some(repo.current_commit_cid().clone()),
938938+ &signing_key,
939939+ )
940940+ .await
941941+ .unwrap();
942942+943943+ let firehose_commit = commit_data
944944+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
945945+ .await
946946+ .unwrap();
947947+948948+ let result = firehose_commit.validate_v1_1(&wrong_pubkey).await;
949949+ assert!(
950950+ result.is_err(),
951951+ "Validation should fail with wrong public key"
952952+ );
953953+ }
954954+955955+ #[tokio::test]
956956+ async fn test_missing_prev_data_for_v1_1_fails() {
957957+ let storage = Arc::new(MemoryBlockStore::new());
958958+ let mut repo = create_test_repo(storage.clone()).await;
959959+960960+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
961961+ let did = Did::new("did:plc:test").unwrap();
962962+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
963963+ let pubkey = get_public_key(&signing_key);
964964+965965+ let ops = vec![RecordWriteOp::Create {
966966+ collection: collection.clone(),
967967+ rkey: RecordKey(Rkey::new("test1").unwrap()),
968968+ record: make_test_record(1),
969969+ }];
970970+971971+ let (repo_ops, commit_data) = repo
972972+ .create_commit(
973973+ &ops,
974974+ &did,
975975+ Some(repo.current_commit_cid().clone()),
976976+ &signing_key,
977977+ )
978978+ .await
979979+ .unwrap();
980980+981981+ let mut firehose_commit = commit_data
982982+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
983983+ .await
984984+ .unwrap();
985985+986986+ // Strip prev_data to make it invalid for v1.1
987987+ firehose_commit.prev_data = None;
988988+989989+ let result = firehose_commit.validate_v1_1(&pubkey).await;
990990+ assert!(
991991+ result.is_err(),
992992+ "v1.1 validation should fail without prev_data"
993993+ );
994994+995995+ let err_msg = result.unwrap_err().to_string();
996996+ assert!(
997997+ err_msg.contains("prev_data"),
998998+ "Error should mention missing prev_data"
999999+ );
10001000+ }
10011001+10021002+ #[tokio::test]
10031003+ async fn test_wrong_prev_data_cid_fails() {
10041004+ let storage = Arc::new(MemoryBlockStore::new());
10051005+ let mut repo = create_test_repo(storage.clone()).await;
10061006+10071007+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
10081008+ let did = Did::new("did:plc:test").unwrap();
10091009+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
10101010+ let pubkey = get_public_key(&signing_key);
10111011+10121012+ let ops = vec![RecordWriteOp::Create {
10131013+ collection: collection.clone(),
10141014+ rkey: RecordKey(Rkey::new("test1").unwrap()),
10151015+ record: make_test_record(1),
10161016+ }];
10171017+10181018+ let (repo_ops, commit_data) = repo
10191019+ .create_commit(
10201020+ &ops,
10211021+ &did,
10221022+ Some(repo.current_commit_cid().clone()),
10231023+ &signing_key,
10241024+ )
10251025+ .await
10261026+ .unwrap();
10271027+10281028+ let mut firehose_commit = commit_data
10291029+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
10301030+ .await
10311031+ .unwrap();
10321032+10331033+ // Use wrong prev_data CID (point to commit instead of MST root)
10341034+ firehose_commit.prev_data = Some(firehose_commit.commit.clone());
10351035+10361036+ let result = firehose_commit.validate_v1_1(&pubkey).await;
10371037+ assert!(
10381038+ result.is_err(),
10391039+ "Validation should fail with wrong prev_data CID"
10401040+ );
10411041+ }
10421042+}
+2
crates/jacquard-repo/src/lib.rs
···1515//! - Zero-copy deserialization where possible
1616//! - Support for both current and future sync protocol versions
1717//!
1818+//! Note: thank you very much to Rudy and Clinton, rsky was very helpful in figuring this all out.
1919+//!
1820//! # Example
1921//!
2022//! ```rust,ignore
+1-1
crates/jacquard-repo/src/mst/node.rs
···4242 }
4343}
44444545-impl<S: BlockStore> NodeEntry<S> {
4545+impl<S> NodeEntry<S> {
4646 /// Check if this is a tree entry
4747 pub fn is_tree(&self) -> bool {
4848 matches!(self, NodeEntry::Tree(_))
+191-27
crates/jacquard-repo/src/mst/tree.rs
···55use crate::error::{RepoError, Result};
66use crate::mst::util::validate_key;
77use crate::storage::BlockStore;
88+use bytes::Bytes;
89use cid::Cid as IpldCid;
910use core::fmt;
1011use jacquard_common::types::recordkey::Rkey;
···1213use jacquard_common::types::value::RawData;
1314use n0_future::try_join_all;
1415use smol_str::SmolStr;
1616+use std::collections::BTreeMap;
1517use std::fmt::{Display, Formatter};
1618use std::future::Future;
1719use std::pin::Pin;
···220222 ) -> Result<Self> {
221223 // Serialize and compute CID (don't persist yet)
222224 let node_data = util::serialize_node_data(&entries).await?;
223223- let cbor = serde_ipld_dagcbor::to_vec(&node_data)
224224- .map_err(|e| RepoError::serialization(e).with_context("serializing MST node during creation"))?;
225225+ let cbor = serde_ipld_dagcbor::to_vec(&node_data).map_err(|e| {
226226+ RepoError::serialization(e).with_context("serializing MST node during creation")
227227+ })?;
225228 let cid = util::compute_cid(&cbor)?;
226229227230 let mst = Self {
···282285 })?;
283286284287 let node_data: super::node::NodeData = serde_ipld_dagcbor::from_slice(&node_bytes)
285285- .map_err(|e| RepoError::serialization(e).with_context(format!("deserializing MST node from storage: {}", pointer)))?;
288288+ .map_err(|e| {
289289+ RepoError::serialization(e)
290290+ .with_context(format!("deserializing MST node from storage: {}", pointer))
291291+ })?;
286292287293 let entries = util::deserialize_node_data(self.storage.clone(), &node_data, self.layer)?;
288294···333339334340 // Now serialize and compute CID with fresh child CIDs
335341 let node_data = util::serialize_node_data(&entries).await?;
336336- let cbor = serde_ipld_dagcbor::to_vec(&node_data)
337337- .map_err(|e| RepoError::serialization(e).with_context("serializing MST node for CID computation"))?;
342342+ let cbor = serde_ipld_dagcbor::to_vec(&node_data).map_err(|e| {
343343+ RepoError::serialization(e).with_context("serializing MST node for CID computation")
344344+ })?;
338345 let cid = util::compute_cid(&cbor)?;
339346340347 // Update pointer and mark as fresh
···447454448455 Ok(None)
449456 })
457457+ }
458458+459459+ /// Add a key-value pair, mutating the current tree
460460+ pub async fn add_mut<'a>(&'a mut self, key: &'a str, cid: IpldCid) -> Result<()> {
461461+ *self = self.add(key, cid).await?;
462462+ Ok(())
450463 }
451464452465 /// Add a key-value pair (returns new tree)
···573586 })
574587 }
575588589589+ /// invert an update function, returning the previous cid
590590+ pub async fn check_update(&mut self, key: &str, cid: IpldCid) -> Result<IpldCid> {
591591+ validate_key(key)?;
592592+593593+ // Check key exists
594594+ let Ok(Some(prev)) = self.get(key).await else {
595595+ return Err(RepoError::not_found("key", key));
596596+ };
597597+598598+ if prev == cid {
599599+ return Ok(prev);
600600+ }
601601+602602+ // Update is just add (which replaces)
603603+ *self = self.add(key, cid).await?;
604604+ Ok(prev)
605605+ }
606606+576607 /// Delete a key (returns new tree)
577608 pub fn delete<'a>(
578609 &'a self,
···586617 })
587618 }
588619620620+ /// mutates a tree in place to delete, returns the CID of what was deleted
621621+ ///
622622+ /// Used to invert tree operations for verification
623623+ pub fn delete_cid<'a>(
624624+ &'a mut self,
625625+ key: &'a str,
626626+ ) -> Pin<Box<dyn Future<Output = Result<IpldCid>> + Send + 'a>> {
627627+ Box::pin(async move {
628628+ let cid = self
629629+ .get(key)
630630+ .await?
631631+ .ok_or(RepoError::not_found("cid for key", key))?;
632632+ *self = self.delete(key).await?;
633633+ Ok(cid)
634634+ })
635635+ }
636636+589637 /// Recursively delete a key
590638 fn delete_recurse<'a>(
591639 &'a self,
···924972 self.new_tree(entries).await
925973 }
926974975975+ /// invert a tree operation in-place for validation
976976+ pub async fn invert_op(&mut self, op: VerifiedWriteOp) -> Result<bool> {
977977+ //println!("tree before op inversion:\n{}", self);
978978+ match op {
979979+ VerifiedWriteOp::Create { key, cid: expected } => {
980980+ let Ok(found) = self.delete_cid(&key).await else {
981981+ //println!("tree at failure:\n{}", self);
982982+ return Ok(false);
983983+ };
984984+ if found == expected {
985985+ Ok(true)
986986+ } else {
987987+ //println!("tree at failure:\n{}", self);
988988+ Ok(false)
989989+ }
990990+ }
991991+ VerifiedWriteOp::Update {
992992+ key,
993993+ cid: expected,
994994+ prev,
995995+ } => {
996996+ let Ok(found) = self.check_update(&key, prev).await else {
997997+ //println!("tree at failure:\n{}", self);
998998+ return Ok(false);
999999+ };
10001000+ if found == expected {
10011001+ Ok(true)
10021002+ } else {
10031003+ //println!("tree at failure:\n{}", self);
10041004+ Ok(false)
10051005+ }
10061006+ }
10071007+ VerifiedWriteOp::Delete { key, prev } => {
10081008+ if let Ok(Some(_)) = self.get(&key).await {
10091009+ Ok(false)
10101010+ } else {
10111011+ self.add_mut(&key, prev).await?;
10121012+ Ok(true)
10131013+ }
10141014+ }
10151015+ }
10161016+ }
10171017+9271018 /// Apply batch of verified write operations (returns new tree)
9281019 ///
9291020 /// More efficient than individual operations as it only rebuilds
···10381129 // Serialize this node
10391130 let entries = self.get_entries().await?;
10401131 let node_data = util::serialize_node_data(&entries).await?;
10411041- let cbor = serde_ipld_dagcbor::to_vec(&node_data)
10421042- .map_err(|e| RepoError::serialization(e).with_context("serializing MST node for block collection"))?;
11321132+ let cbor = serde_ipld_dagcbor::to_vec(&node_data).map_err(|e| {
11331133+ RepoError::serialization(e)
11341134+ .with_context("serializing MST node for block collection")
11351135+ })?;
10431136 blocks.insert(pointer, Bytes::from(cbor));
1044113710451138 // Recursively collect from subtrees
···1119121211201213 let mut cids = vec![self.get_pointer().await?];
11211214 let entries = self.get_entries().await?;
11221122- let index = Self::find_gt_or_equal_leaf_index_in(&entries, key);
12151215+ let index = Self::find_gt_or_equal_leaf_index_in(&entries, key) as isize;
1123121611241124- // Check if we found exact match at this level
11251125- if index < entries.len() {
11261126- if let NodeEntry::Leaf {
11271127- key: leaf_key,
11281128- value,
11291129- } = &entries[index]
11301130- {
11311131- if leaf_key.as_str() == key {
11321132- cids.push(*value);
11331133- return Ok(cids);
12171217+ let found = self.at_index(index).await?;
12181218+12191219+ if let Some(NodeEntry::Leaf {
12201220+ key: leaf_key,
12211221+ value,
12221222+ }) = found
12231223+ {
12241224+ if leaf_key.as_str() == key {
12251225+ cids.push(value);
12261226+ return Ok(cids);
12271227+ }
12281228+ }
12291229+12301230+ // Not found at this level - check subtree before this index
12311231+ if let Some(NodeEntry::Tree(subtree)) = self.at_index(index - 1).await? {
12321232+ let mut subtree_cids = subtree.cids_for_path(key).await?;
12331233+ cids.append(&mut subtree_cids);
12341234+ return Ok(cids);
12351235+ }
12361236+12371237+ // Key not found in tree
12381238+ Ok(cids)
12391239+ })
12401240+ }
12411241+12421242+ /// serialize the tree as car bytes, returning the cid of the car and the car bytes
12431243+ pub fn serialize_tree<'a>(
12441244+ &'a self,
12451245+ ) -> Pin<Box<dyn Future<Output = Result<(IpldCid, Bytes)>> + Send + 'a>> {
12461246+ Box::pin(async move {
12471247+ let mut entries = self.get_entries().await?;
12481248+ let mut outdated: Vec<Self> = Vec::new();
12491249+ for entry in &entries {
12501250+ if let NodeEntry::Tree(mst) = entry {
12511251+ let is_outdated = *mst.outdated_pointer.read().await;
12521252+ if is_outdated {
12531253+ outdated.push(mst.clone());
11341254 }
11351255 }
11361256 }
1137125711381138- // Not found at this level - check subtree before this index
11391139- if index > 0 {
11401140- if let NodeEntry::Tree(subtree) = &entries[index - 1] {
11411141- let mut subtree_cids = subtree.cids_for_path(key).await?;
11421142- cids.append(&mut subtree_cids);
11431143- return Ok(cids);
12581258+ if outdated.len() > 0 {
12591259+ for outdated_entry in &outdated {
12601260+ let _ = outdated_entry.get_pointer().await?;
12611261+ }
12621262+ entries = self.get_entries().await?
12631263+ }
12641264+ let data = util::serialize_node_data(entries.as_slice()).await?;
12651265+ let bytes = serde_ipld_dagcbor::to_vec(&data).map_err(|e| RepoError::car(e))?;
12661266+ let cid = util::compute_cid(&bytes)?;
12671267+12681268+ Ok((cid, Bytes::from_owner(bytes)))
12691269+ })
12701270+ }
12711271+12721272+ /// Find the node at the given index if any
12731273+ pub async fn at_index(&self, index: isize) -> Result<Option<NodeEntry<S>>> {
12741274+ let entries = self.get_entries().await?;
12751275+ if index < 0 || index as usize >= entries.len() {
12761276+ return Ok(None);
12771277+ }
12781278+ Ok(entries
12791279+ .into_iter()
12801280+ .nth(index as usize)
12811281+ .map(|entry| entry.clone()))
12821282+ }
12831283+12841284+ /// Add any relevant blocks along the path to the given key to the map
12851285+ pub fn blocks_for_path<'a>(
12861286+ &'a self,
12871287+ key: &'a str,
12881288+ blocks: &'a mut BTreeMap<IpldCid, bytes::Bytes>,
12891289+ ) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'a>> {
12901290+ Box::pin(async move {
12911291+ validate_key(key)?;
12921292+ let (cid, bytes) = self.serialize_tree().await?;
12931293+ blocks.insert(cid, bytes);
12941294+12951295+ let entries = self.get_entries().await?;
12961296+ let index = Self::find_gt_or_equal_leaf_index_in(&entries, key) as isize;
12971297+ let found = self.at_index(index).await?;
12981298+12991299+ if let Some(NodeEntry::Leaf { key: leaf_key, .. }) = found {
13001300+ if leaf_key.as_str() == key {
13011301+ return Ok(());
11441302 }
11451303 }
13041304+ if let Some(NodeEntry::Tree(subtree)) = self.at_index(index - 1).await? {
13051305+ subtree.blocks_for_path(key, blocks).await?;
13061306+ return Ok(());
13071307+ }
1146130811471309 // Key not found in tree
11481148- Ok(cids)
13101310+ Ok(())
11491311 })
11501312 }
11511313···13341496 // Serialize this node
13351497 let entries = tree.get_entries().await?;
13361498 let node_data = util::serialize_node_data(&entries).await?;
13371337- let cbor = serde_ipld_dagcbor::to_vec(&node_data)
13381338- .map_err(|e| RepoError::serialization(e).with_context("serializing MST node for parallel block collection"))?;
14991499+ let cbor = serde_ipld_dagcbor::to_vec(&node_data).map_err(|e| {
15001500+ RepoError::serialization(e)
15011501+ .with_context("serializing MST node for parallel block collection")
15021502+ })?;
13391503 blocks.insert(pointer, Bytes::from(cbor));
1340150413411505 // Spawn tasks for each subtree
+186-38
crates/jacquard-repo/src/repo.rs
···88use crate::error::{RepoError, Result};
99use crate::mst::{Mst, MstDiff, RecordWriteOp};
1010use crate::storage::BlockStore;
1111+use bytes::Bytes;
1112use cid::Cid as IpldCid;
1213use jacquard_common::IntoStatic;
1314use jacquard_common::types::cid::CidLink;
···1617use jacquard_common::types::tid::Ticker;
1718use smol_str::format_smolstr;
1819use std::collections::BTreeMap;
2020+use std::fmt::{self, Display, Formatter};
1921use std::path::Path;
2022use std::sync::Arc;
2123···7173 ops: Vec<RepoOp<'static>>,
7274 blobs: Vec<CidLink<'static>>,
7375 ) -> Result<FirehoseCommit<'static>> {
7676+ let mut proof_blocks = self.blocks.clone();
7777+ proof_blocks.append(&mut self.relevant_blocks.clone());
7478 // Convert relevant blocks to CAR format
7575- let blocks_car =
7676- crate::car::write_car_bytes(self.cid, self.relevant_blocks.clone()).await?;
7979+ let blocks_car = crate::car::write_car_bytes(self.cid, proof_blocks).await?;
77807881 Ok(FirehoseCommit {
7982 repo: repo.clone().into_static(),
···163166 })
164167 }
165168169169+ /// Format an initial commit for a new repository
170170+ ///
171171+ /// Creates an empty MST, optionally applies initial record writes, signs the commit,
172172+ /// and returns CommitData ready to apply to storage.
173173+ ///
174174+ /// This does NOT persist to storage - use `create_from_commit` or `create` for that.
175175+ pub async fn format_init_commit<K>(
176176+ storage: Arc<S>,
177177+ did: Did<'static>,
178178+ signing_key: &K,
179179+ initial_writes: Option<&[RecordWriteOp<'_>]>,
180180+ ) -> Result<CommitData>
181181+ where
182182+ K: SigningKey,
183183+ {
184184+ let mut mst = Mst::new(storage.clone());
185185+ let mut blocks = BTreeMap::new();
186186+187187+ // Apply initial writes if provided
188188+ if let Some(ops) = initial_writes {
189189+ for op in ops {
190190+ let key = format_smolstr!("{}/{}", op.collection().as_ref(), op.rkey().as_ref());
191191+192192+ match op {
193193+ RecordWriteOp::Create { record, .. } => {
194194+ // Serialize and store record
195195+ let cbor = serde_ipld_dagcbor::to_vec(record)
196196+ .map_err(|e| RepoError::serialization(e))?;
197197+ let cid = storage.put(&cbor).await?;
198198+ blocks.insert(cid, bytes::Bytes::from(cbor));
199199+200200+ mst = mst.add(key.as_str(), cid).await?;
201201+ }
202202+ RecordWriteOp::Update { .. } | RecordWriteOp::Delete { .. } => {
203203+ return Err(RepoError::invalid_commit(
204204+ "Initial commit can only contain creates",
205205+ ));
206206+ }
207207+ }
208208+ }
209209+ }
210210+211211+ // Persist MST and collect blocks
212212+ let data = mst.persist().await?;
213213+ let diff = Mst::new(storage.clone()).diff(&mst).await?;
214214+ blocks.extend(diff.new_mst_blocks);
215215+216216+ // Create and sign initial commit
217217+ let rev = Ticker::new().next(None);
218218+ let commit = Commit::new_unsigned(did, data, rev.clone(), None).sign(signing_key)?;
219219+220220+ let commit_cbor = commit.to_cbor()?;
221221+ let commit_cid = crate::mst::util::compute_cid(&commit_cbor)?;
222222+ let commit_bytes = bytes::Bytes::from(commit_cbor);
223223+224224+ blocks.insert(commit_cid, commit_bytes.clone());
225225+226226+ Ok(CommitData {
227227+ cid: commit_cid,
228228+ rev,
229229+ since: None,
230230+ prev: None,
231231+ data,
232232+ prev_data: None,
233233+ blocks: blocks.clone(),
234234+ relevant_blocks: blocks,
235235+ deleted_cids: Vec::new(),
236236+ })
237237+ }
238238+239239+ /// Create repository from CommitData
240240+ ///
241241+ /// Applies the commit to storage and loads the repository from it.
242242+ pub async fn create_from_commit(storage: Arc<S>, commit_data: CommitData) -> Result<Self> {
243243+ let commit_cid = commit_data.cid;
244244+ storage.apply_commit(commit_data).await?;
245245+ Self::from_commit(storage, &commit_cid).await
246246+ }
247247+248248+ /// Create a new repository
249249+ ///
250250+ /// Convenience method that formats an initial commit and applies it to storage.
251251+ pub async fn create<K>(
252252+ storage: Arc<S>,
253253+ did: Did<'static>,
254254+ signing_key: &K,
255255+ initial_writes: Option<&[RecordWriteOp<'_>]>,
256256+ ) -> Result<Self>
257257+ where
258258+ K: SigningKey,
259259+ {
260260+ let commit =
261261+ Self::format_init_commit(storage.clone(), did, signing_key, initial_writes).await?;
262262+ Self::create_from_commit(storage, commit).await
263263+ }
264264+166265 /// Get a record by collection and rkey
167266 pub async fn get_record<T: RecordKeyType>(
168267 &self,
···268367 let key = format_smolstr!("{}/{}", collection.as_ref(), rkey.as_ref());
269368270369 // Serialize record to DAG-CBOR
271271- let cbor = serde_ipld_dagcbor::to_vec(record)
272272- .map_err(|e| RepoError::serialization(e).with_context(format!("serializing record data for {}/{}", collection.as_ref(), rkey.as_ref())))?;
370370+ let cbor = serde_ipld_dagcbor::to_vec(record).map_err(|e| {
371371+ RepoError::serialization(e).with_context(format!(
372372+ "serializing record data for {}/{}",
373373+ collection.as_ref(),
374374+ rkey.as_ref()
375375+ ))
376376+ })?;
273377274378 // Compute CID and store data
275379 let cid = self.storage.put(&cbor).await?;
···285389 let key = format_smolstr!("{}/{}", collection.as_ref(), rkey.as_ref());
286390287391 // Serialize record to DAG-CBOR
288288- let cbor = serde_ipld_dagcbor::to_vec(record)
289289- .map_err(|e| RepoError::serialization(e).with_context(format!("serializing record data for {}/{}", collection.as_ref(), rkey.as_ref())))?;
392392+ let cbor = serde_ipld_dagcbor::to_vec(record).map_err(|e| {
393393+ RepoError::serialization(e).with_context(format!(
394394+ "serializing record data for {}/{}",
395395+ collection.as_ref(),
396396+ rkey.as_ref()
397397+ ))
398398+ })?;
290399291400 // Compute CID and store data
292401 let cid = self.storage.put(&cbor).await?;
···335444 // Compute diff before updating
336445 let diff = self.mst.diff(&updated_tree).await?;
337446447447+ println!("Repo before:\n{}", self);
338448 // Update mst
339449 self.mst = updated_tree;
340450451451+ println!("Repo after:\n{}", self);
341452 Ok(diff)
342453 }
343454···360471 where
361472 K: SigningKey,
362473 {
363363- // Step 1: Apply all write operations to build new MST
474474+ // Step 1: Apply all write operations to build new MST and collect leaf blocks
364475 let mut updated_tree = self.mst.clone();
476476+ let mut leaf_blocks = BTreeMap::new();
365477366478 for op in ops {
367479 updated_tree = match op {
···373485 let key = format_smolstr!("{}/{}", collection.as_ref(), rkey.as_ref());
374486375487 // Serialize record to DAG-CBOR
376376- let cbor = serde_ipld_dagcbor::to_vec(record)
377377- .map_err(|e| RepoError::serialization(e).with_context(format!("serializing record data for {}/{}", collection.as_ref(), rkey.as_ref())))?;
488488+ let cbor = serde_ipld_dagcbor::to_vec(record).map_err(|e| {
489489+ RepoError::serialization(e).with_context(format!(
490490+ "serializing record data for {}/{}",
491491+ collection.as_ref(),
492492+ rkey.as_ref()
493493+ ))
494494+ })?;
378495379496 // Compute CID and store data
380497 let cid = self.storage.put(&cbor).await?;
498498+ leaf_blocks.insert(cid.clone(), Bytes::from(cbor));
381499382500 updated_tree.add(key.as_str(), cid).await?
383501 }
···390508 let key = format_smolstr!("{}/{}", collection.as_ref(), rkey.as_ref());
391509392510 // Serialize record to DAG-CBOR
393393- let cbor = serde_ipld_dagcbor::to_vec(record)
394394- .map_err(|e| RepoError::serialization(e).with_context(format!("serializing record data for {}/{}", collection.as_ref(), rkey.as_ref())))?;
511511+ let cbor = serde_ipld_dagcbor::to_vec(record).map_err(|e| {
512512+ RepoError::serialization(e).with_context(format!(
513513+ "serializing record data for {}/{}",
514514+ collection.as_ref(),
515515+ rkey.as_ref()
516516+ ))
517517+ })?;
395518396519 // Compute CID and store data
397520 let cid = self.storage.put(&cbor).await?;
···405528 )));
406529 }
407530 }
531531+532532+ leaf_blocks.insert(cid.clone(), Bytes::from(cbor));
408533409534 updated_tree.add(key.as_str(), cid).await?
410535 }
···414539 prev,
415540 } => {
416541 let key = format_smolstr!("{}/{}", collection.as_ref(), rkey.as_ref());
417417-418418- // Check exists
419419- let current = self
420420- .mst
421421- .get(key.as_str())
422422- .await?
423423- .ok_or_else(|| RepoError::not_found("record", key.as_str()))?;
424542425543 // Validate prev if provided
426544 if let Some(prev_cid) = prev {
545545+ // Check exists
546546+ let current = self
547547+ .mst
548548+ .get(key.as_str())
549549+ .await?
550550+ .ok_or_else(|| RepoError::not_found("record", key.as_str()))?;
427551 if ¤t != prev_cid {
428552 return Err(RepoError::cid_mismatch(format!(
429553 "Delete prev CID mismatch for key {}: expected {}, got {}",
···442566 let prev_data = *self.commit.data();
443567 let diff = self.mst.diff(&updated_tree).await?;
444568445445- // Step 3: Extract everything we need from diff before moving it
446446- let new_leaf_blocks = diff.fetch_new_blocks(self.storage.as_ref()).await?;
569569+ // Step 3: Extract everything we need from diff
447570 let repo_ops = diff
448571 .to_repo_ops()
449572 .into_iter()
···451574 .collect();
452575 let deleted_cids = diff.removed_cids;
453576454454- // Step 4: Use diff.new_mst_blocks instead of collect_blocks()
577577+ // Step 4: Build blocks and relevant_blocks collections
455578 let mut blocks = diff.new_mst_blocks;
579579+ let mut relevant_blocks = BTreeMap::new();
456580457457- // Step 5: Build relevant_blocks by walking paths for ORIGINAL operations
458458- let mut relevant_blocks = BTreeMap::new();
581581+ // Add the previous MST root block (needed to load prev_data in validation)
582582+ if let Some(prev_root_block) = self.storage.get(&prev_data).await? {
583583+ relevant_blocks.insert(prev_data, prev_root_block);
584584+ }
585585+586586+ // Walk paths in both old and new trees for each operation
459587 for op in ops {
460588 let key = format_smolstr!("{}/{}", op.collection().as_ref(), op.rkey().as_ref());
461461- let path_cids = updated_tree.cids_for_path(key.as_str()).await?;
462589463463- for path_cid in path_cids {
464464- if let Some(block) = blocks.get(&path_cid) {
465465- relevant_blocks.insert(path_cid, block.clone());
466466- } else if let Some(block) = self.storage.get(&path_cid).await? {
467467- relevant_blocks.insert(path_cid, block);
468468- }
469469- }
590590+ updated_tree
591591+ .blocks_for_path(&key, &mut relevant_blocks)
592592+ .await?;
593593+594594+ self.mst.blocks_for_path(&key, &mut relevant_blocks).await?;
470595 }
471596472472- // Step 6: Add new leaf blocks (record data) to both collections
473473- for (cid, block) in new_leaf_blocks {
474474- blocks.insert(cid, block.clone());
475475- relevant_blocks.insert(cid, block);
597597+ // Add new leaf blocks to both collections (single iteration)
598598+ for (cid, block) in &leaf_blocks {
599599+ if diff.new_leaf_cids.contains(cid) {
600600+ blocks.insert(*cid, block.clone());
601601+ relevant_blocks.insert(*cid, block.clone());
602602+ }
476603 }
477604478478- // Step 7: Create and sign commit
605605+ // Step 6: Create and sign commit
479606 let rev = Ticker::new().next(Some(self.commit.rev.clone()));
480607 let commit = Commit::new_unsigned(did.clone().into_static(), data, rev.clone(), prev)
481608 .sign(signing_key)?;
···484611 let commit_cid = crate::mst::util::compute_cid(&commit_cbor)?;
485612 let commit_bytes = bytes::Bytes::from(commit_cbor);
486613487487- // Step 8: Add commit block to both collections
614614+ // Step 7: Add commit block to both collections
488615 blocks.insert(commit_cid, commit_bytes.clone());
489616 relevant_blocks.insert(commit_cid, commit_bytes);
490617491491- // Step 9: Update internal MST state
618618+ // Step 8: Update internal MST state
492619 self.mst = updated_tree;
493620494621 Ok((
···583710 /// Get the DID from the current commit
584711 pub fn did(&self) -> &Did<'_> {
585712 self.commit.did()
713713+ }
714714+}
715715+716716+impl<S: BlockStore> Display for Repository<S> {
717717+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
718718+ use crate::mst::tree::short_cid;
719719+720720+ writeln!(f, "Repository {{")?;
721721+ writeln!(f, " DID: {}", self.commit.did())?;
722722+ writeln!(f, " Commit: {}", short_cid(&self.commit_cid))?;
723723+ writeln!(f, " Rev: {}", self.commit.rev)?;
724724+ writeln!(f, " Data: {}", short_cid(self.commit.data()))?;
725725+ writeln!(f, " MST:")?;
726726+727727+ // Format MST with indentation
728728+ let mst_display = format!("{}", self.mst);
729729+ for line in mst_display.lines() {
730730+ writeln!(f, " {}", line)?;
731731+ }
732732+733733+ write!(f, "}}")
586734 }
587735}
588736
+451
crates/jacquard-repo/tests/firehose_stress.rs
···11+//! Stress tests for firehose commit validation
22+//!
33+//! Generates thousands of random operations to catch edge cases in v1.1 validation.
44+55+use jacquard_common::IntoStatic;
66+use jacquard_common::types::crypto::{KeyCodec, PublicKey};
77+use jacquard_common::types::recordkey::Rkey;
88+use jacquard_common::types::string::{Datetime, Did, Nsid, RecordKey};
99+use jacquard_common::types::tid::Ticker;
1010+use jacquard_common::types::value::RawData;
1111+use jacquard_repo::Repository;
1212+use jacquard_repo::car::read_car_header;
1313+use jacquard_repo::mst::RecordWriteOp;
1414+use jacquard_repo::storage::{BlockStore, MemoryBlockStore};
1515+use rand::Rng;
1616+use rand::seq::SliceRandom;
1717+use smol_str::SmolStr;
1818+use std::collections::{BTreeMap, HashMap};
1919+use std::sync::Arc;
2020+2121+// Test configuration
2222+const INITIAL_RECORDS: usize = 50;
2323+const STRESS_OPERATIONS: usize = 100;
2424+const BATCH_SIZE_RANGE: (usize, usize) = (1, 10);
2525+2626+fn make_test_record(n: u32, text: &str) -> BTreeMap<SmolStr, RawData<'static>> {
2727+ let mut record = BTreeMap::new();
2828+ record.insert(
2929+ SmolStr::new("$type"),
3030+ RawData::String("app.bsky.feed.post".into()),
3131+ );
3232+ record.insert(
3333+ SmolStr::new("text"),
3434+ RawData::String(format!("{} #{}", text, n).into()),
3535+ );
3636+ record.insert(
3737+ SmolStr::new("createdAt"),
3838+ RawData::String("2024-01-01T00:00:00Z".to_string().into()),
3939+ );
4040+ record
4141+}
4242+4343+fn get_public_key(signing_key: &k256::ecdsa::SigningKey) -> PublicKey<'static> {
4444+ let verifying_key = signing_key.verifying_key();
4545+ let pubkey_bytes = verifying_key.to_encoded_point(true).as_bytes().to_vec();
4646+ PublicKey {
4747+ codec: KeyCodec::Secp256k1,
4848+ bytes: pubkey_bytes.into(),
4949+ }
5050+}
5151+5252+async fn create_test_repo(storage: Arc<MemoryBlockStore>) -> Repository<MemoryBlockStore> {
5353+ let did = Did::new("did:plc:stresstest").unwrap();
5454+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
5555+5656+ Repository::create(storage, did.into_static(), &signing_key, None)
5757+ .await
5858+ .unwrap()
5959+}
6060+6161+/// Track existing records for generating realistic updates/deletes
6262+struct RecordTracker {
6363+ records: HashMap<String, u32>,
6464+ ticker: Ticker,
6565+}
6666+6767+impl RecordTracker {
6868+ fn new() -> Self {
6969+ Self {
7070+ records: HashMap::new(),
7171+ ticker: Ticker::new(),
7272+ }
7373+ }
7474+7575+ fn gen_new_rkey(&mut self) -> String {
7676+ self.ticker.next(None).into_static().to_string()
7777+ }
7878+7979+ fn pick_random_existing<R: Rng>(&self, rng: &mut R) -> Option<String> {
8080+ let keys: Vec<_> = self.records.keys().cloned().collect();
8181+ keys.choose(rng).cloned()
8282+ }
8383+8484+ fn add(&mut self, rkey: String, counter: u32) {
8585+ self.records.insert(rkey, counter);
8686+ }
8787+8888+ fn remove(&mut self, rkey: &str) {
8989+ self.records.remove(rkey);
9090+ }
9191+9292+ fn len(&self) -> usize {
9393+ self.records.len()
9494+ }
9595+}
9696+9797+#[derive(Debug, Clone)]
9898+enum TestOp {
9999+ Create { rkey: String, counter: u32 },
100100+ Update { rkey: String, counter: u32 },
101101+ Delete { rkey: String },
102102+}
103103+104104+fn generate_creates_only<R: Rng>(
105105+ rng: &mut R,
106106+ tracker: &mut RecordTracker,
107107+ count: usize,
108108+) -> Vec<TestOp> {
109109+ let mut ops = Vec::new();
110110+ for _ in 0..count {
111111+ let rkey = tracker.gen_new_rkey();
112112+ let counter: u32 = rng.r#gen();
113113+ tracker.add(rkey.clone(), counter);
114114+ ops.push(TestOp::Create { rkey, counter });
115115+ }
116116+ ops
117117+}
118118+119119+fn generate_random_ops<R: Rng>(
120120+ rng: &mut R,
121121+ tracker: &mut RecordTracker,
122122+ count: usize,
123123+) -> Vec<TestOp> {
124124+ let mut ops = Vec::new();
125125+126126+ for _ in 0..count {
127127+ // Weighted random choice: 50% create, 30% update, 20% delete
128128+ let action = rng.gen_range(0..100);
129129+130130+ let op = if action < 50 || tracker.len() == 0 {
131131+ // Create
132132+ let rkey = tracker.gen_new_rkey();
133133+ let counter: u32 = rng.r#gen();
134134+ tracker.add(rkey.clone(), counter);
135135+ TestOp::Create { rkey, counter }
136136+ } else if action < 80 {
137137+ // Update
138138+ if let Some(rkey) = tracker.pick_random_existing(rng) {
139139+ let counter: u32 = rng.r#gen();
140140+ tracker.add(rkey.clone(), counter);
141141+ TestOp::Update { rkey, counter }
142142+ } else {
143143+ // Fall back to create if no records exist
144144+ let rkey = tracker.gen_new_rkey();
145145+ let counter: u32 = rng.r#gen();
146146+ tracker.add(rkey.clone(), counter);
147147+ TestOp::Create { rkey, counter }
148148+ }
149149+ } else {
150150+ // Delete
151151+ if let Some(rkey) = tracker.pick_random_existing(rng) {
152152+ tracker.remove(&rkey);
153153+ TestOp::Delete { rkey }
154154+ } else {
155155+ // Fall back to create if no records exist
156156+ let rkey = tracker.gen_new_rkey();
157157+ let counter: u32 = rng.r#gen();
158158+ tracker.add(rkey.clone(), counter);
159159+ TestOp::Create { rkey, counter }
160160+ }
161161+ };
162162+163163+ ops.push(op);
164164+ }
165165+166166+ ops
167167+}
168168+169169+fn test_ops_to_record_writes(ops: Vec<TestOp>, collection: &Nsid) -> Vec<RecordWriteOp<'static>> {
170170+ let collection_static = collection.clone().into_static();
171171+ ops.into_iter()
172172+ .map(|op| match op {
173173+ TestOp::Create { rkey, counter } => RecordWriteOp::Create {
174174+ collection: collection_static.clone(),
175175+ rkey: RecordKey(Rkey::new(&rkey).unwrap()).into_static(),
176176+ record: make_test_record(counter, "Random post"),
177177+ },
178178+ TestOp::Update { rkey, counter } => RecordWriteOp::Update {
179179+ collection: collection_static.clone(),
180180+ rkey: RecordKey(Rkey::new(&rkey).unwrap()).into_static(),
181181+ record: make_test_record(counter, "Updated post"),
182182+ prev: None,
183183+ },
184184+ TestOp::Delete { rkey } => RecordWriteOp::Delete {
185185+ collection: collection_static.clone(),
186186+ rkey: RecordKey(Rkey::new(&rkey).unwrap()).into_static(),
187187+ prev: None,
188188+ },
189189+ })
190190+ .collect()
191191+}
192192+193193+#[tokio::test]
194194+async fn test_stress_random_operations() {
195195+ let storage = Arc::new(MemoryBlockStore::new());
196196+ let mut repo = create_test_repo(storage.clone()).await;
197197+198198+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
199199+ let did = Did::new("did:plc:stresstest").unwrap();
200200+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
201201+ let pubkey = get_public_key(&signing_key);
202202+203203+ let mut rng = rand::thread_rng();
204204+ let mut tracker = RecordTracker::new();
205205+206206+ // Step 1: Create initial batch of records
207207+ println!("Creating {} initial records...", INITIAL_RECORDS);
208208+ println!("Repo before initial commit:\n{}", repo);
209209+210210+ let initial_ops = generate_creates_only(&mut rng, &mut tracker, INITIAL_RECORDS);
211211+ let record_writes = test_ops_to_record_writes(initial_ops, &collection);
212212+213213+ let (repo_ops, commit_data) = repo
214214+ .create_commit(&record_writes, &did, None, &signing_key)
215215+ .await
216216+ .unwrap();
217217+218218+ repo.apply_commit(commit_data.clone()).await.unwrap();
219219+ println!("Repo after initial commit:\n{}", repo);
220220+221221+ // Validate initial commit
222222+ let firehose_commit = commit_data
223223+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
224224+ .await
225225+ .unwrap();
226226+227227+ firehose_commit
228228+ .validate_v1_1(&pubkey)
229229+ .await
230230+ .expect("Initial batch should validate");
231231+232232+ println!(
233233+ "Initial repo created with {} records",
234234+ tracker.records.len()
235235+ );
236236+237237+ // Step 2: Generate and apply random operations in batches
238238+ let mut commit_count = 1;
239239+ let mut total_ops = 0;
240240+241241+ while total_ops < STRESS_OPERATIONS {
242242+ let batch_size = rng.gen_range(BATCH_SIZE_RANGE.0..=BATCH_SIZE_RANGE.1);
243243+ let remaining = STRESS_OPERATIONS - total_ops;
244244+ let ops_count = batch_size.min(remaining);
245245+246246+ let ops = generate_random_ops(&mut rng, &mut tracker, ops_count);
247247+ let record_writes = test_ops_to_record_writes(ops, &collection);
248248+249249+ let (repo_ops, commit_data) = repo
250250+ .create_commit(&record_writes, &did, None, &signing_key)
251251+ .await
252252+ .unwrap();
253253+254254+ repo.apply_commit(commit_data.clone()).await.unwrap();
255255+256256+ // Validate firehose commit
257257+ commit_count += 1;
258258+ let firehose_commit = commit_data
259259+ .to_firehose_commit(
260260+ &did,
261261+ commit_count,
262262+ Datetime::now(),
263263+ repo_ops.clone(),
264264+ vec![],
265265+ )
266266+ .await
267267+ .unwrap();
268268+269269+ firehose_commit
270270+ .validate_v1_1(&pubkey)
271271+ .await
272272+ .unwrap_or_else(|e| {
273273+ eprintln!(
274274+ "Validation failed at commit {} (batch size {})",
275275+ commit_count, ops_count
276276+ );
277277+ eprintln!("Error: {}", e);
278278+ eprintln!("Operations:\n{:?}", repo_ops);
279279+ eprintln!("Relevant blocks:\n{:?}", commit_data.relevant_blocks.keys());
280280+ eprintln!("All blocks:\n{:?}", commit_data.blocks.keys());
281281+ panic!(
282282+ "Validation failed at commit {} (batch size {}): {}",
283283+ commit_count, ops_count, e
284284+ )
285285+ });
286286+287287+ total_ops += ops_count;
288288+289289+ if commit_count % 50 == 0 {
290290+ println!(
291291+ "Processed {} commits, {} total operations, {} records in repo",
292292+ commit_count,
293293+ total_ops,
294294+ tracker.records.len()
295295+ );
296296+ }
297297+ }
298298+299299+ println!(
300300+ "Stress test complete: {} commits, {} operations, {} final records",
301301+ commit_count,
302302+ total_ops,
303303+ tracker.records.len()
304304+ );
305305+}
306306+307307+#[tokio::test]
308308+async fn test_stress_large_batches() {
309309+ let storage = Arc::new(MemoryBlockStore::new());
310310+ let mut repo = create_test_repo(storage.clone()).await;
311311+312312+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
313313+ let did = Did::new("did:plc:stresstest").unwrap();
314314+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
315315+ let pubkey = get_public_key(&signing_key);
316316+317317+ let mut rng = rand::thread_rng();
318318+ let mut tracker = RecordTracker::new();
319319+320320+ // Create initial records
321321+ let initial_ops = generate_creates_only(&mut rng, &mut tracker, 100);
322322+ let record_writes = test_ops_to_record_writes(initial_ops, &collection);
323323+ let (repo_ops, commit_data) = repo
324324+ .create_commit(
325325+ &record_writes,
326326+ &did,
327327+ Some(repo.current_commit_cid().clone()),
328328+ &signing_key,
329329+ )
330330+ .await
331331+ .unwrap();
332332+ repo.apply_commit(commit_data.clone()).await.unwrap();
333333+334334+ let firehose_commit = commit_data
335335+ .to_firehose_commit(&did, 1, Datetime::now(), repo_ops, vec![])
336336+ .await
337337+ .unwrap();
338338+339339+ firehose_commit.validate_v1_1(&pubkey).await.unwrap();
340340+341341+ for batch_num in 1..=2000 {
342342+ let batch_size = rng.gen_range(1..=20);
343343+ let ops = generate_random_ops(&mut rng, &mut tracker, batch_size);
344344+ let record_writes = test_ops_to_record_writes(ops, &collection);
345345+346346+ let (repo_ops, commit_data) = repo
347347+ .create_commit(&record_writes, &did, None, &signing_key)
348348+ .await
349349+ .unwrap();
350350+351351+ repo.apply_commit(commit_data.clone()).await.unwrap();
352352+353353+ let firehose_commit = commit_data
354354+ .to_firehose_commit(&did, batch_num + 1, Datetime::now(), repo_ops, vec![])
355355+ .await
356356+ .unwrap();
357357+358358+ firehose_commit
359359+ .validate_v1_1(&pubkey)
360360+ .await
361361+ .unwrap_or_else(|e| {
362362+ panic!(
363363+ "Large batch validation failed (batch size {}): {}",
364364+ batch_size, e
365365+ )
366366+ });
367367+368368+ repo.apply_commit(commit_data).await.unwrap();
369369+ // println!(
370370+ // "Validated large batch {} with {} ops",
371371+ // batch_num, batch_size
372372+ // );
373373+ }
374374+}
375375+376376+#[tokio::test]
377377+async fn test_stress_with_fixture() {
378378+ use jacquard_repo::car::read_car;
379379+ use std::path::Path;
380380+ let fixture_path =
381381+ Path::new("tests/fixtures/repo-nonbinary.computer-2025-10-21T13_05_55.090Z.car");
382382+383383+ // Skip test in CI if fixture doesn't exist
384384+ if !fixture_path.exists() {
385385+ println!(
386386+ "Skipping fixture test - fixture not found at {:?}",
387387+ fixture_path
388388+ );
389389+ return;
390390+ }
391391+392392+ println!("Loading fixture repo from {:?}", fixture_path);
393393+394394+ // Import CAR into storage
395395+ let storage = Arc::new(MemoryBlockStore::new());
396396+ let header = read_car_header(fixture_path).await.unwrap();
397397+ let parsed_car = read_car(fixture_path).await.unwrap();
398398+399399+ storage.put_many(parsed_car).await.unwrap();
400400+401401+ let root_cid = header.first().unwrap();
402402+403403+ // Load repository from fixture
404404+ let mut repo = Repository::from_commit(storage.clone(), root_cid)
405405+ .await
406406+ .unwrap();
407407+408408+ println!(
409409+ "Loaded fixture repo with commit at {}",
410410+ repo.current_commit_cid()
411411+ );
412412+413413+ let collection = Nsid::new("app.bsky.feed.post").unwrap();
414414+ let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rngs::OsRng);
415415+ let pubkey = get_public_key(&signing_key);
416416+ let did = repo.did().clone().into_static();
417417+418418+ let mut rng = rand::thread_rng();
419419+ let mut tracker = RecordTracker::new();
420420+421421+ // Perform random operations on fixture repo
422422+ for batch_num in 1..=20 {
423423+ let batch_size = rng.gen_range(10..=50);
424424+ let ops = generate_random_ops(&mut rng, &mut tracker, batch_size);
425425+ let record_writes = test_ops_to_record_writes(ops, &collection);
426426+427427+ let (repo_ops, commit_data) = repo
428428+ .create_commit(
429429+ &record_writes,
430430+ &did,
431431+ Some(repo.current_commit_cid().clone()),
432432+ &signing_key,
433433+ )
434434+ .await
435435+ .unwrap();
436436+437437+ repo.apply_commit(commit_data.clone()).await.unwrap();
438438+439439+ let firehose_commit = commit_data
440440+ .to_firehose_commit(&did, batch_num, Datetime::now(), repo_ops, vec![])
441441+ .await
442442+ .unwrap();
443443+444444+ firehose_commit
445445+ .validate_v1_1(&pubkey)
446446+ .await
447447+ .unwrap_or_else(|e| panic!("Fixture validation failed at batch {}: {}", batch_num, e));
448448+ }
449449+450450+ println!("Fixture stress test complete - 20 batches validated");
451451+}
+1-5
crates/jacquard-repo/tests/interop.rs
···888888 let blocks = read_car(fixture_path).await.expect("Failed to read CAR");
889889 let storage = Arc::new(MemoryBlockStore::new());
890890891891- let mut block_vec = Vec::new();
892892- for (cid, data) in blocks.iter() {
893893- block_vec.push((*cid, data.clone()));
894894- }
895891 storage
896896- .put_many(block_vec)
892892+ .put_many(blocks)
897893 .await
898894 .expect("Failed to store blocks");
899895