···3333 // BlobRef is an enum with Blob variant, which has a ref field (CidLink)
3434 let blob_ref = &file_node.blob;
3535 let cid_string = blob_ref.blob().r#ref.to_string();
3636-3636+3737 // Store with full path (mirrors TypeScript implementation)
3838 blob_map.insert(
3939 full_path,
···4343 EntryNode::Directory(subdir) => {
4444 let sub_map = extract_blob_map_recursive(subdir, full_path);
4545 blob_map.extend(sub_map);
4646+ }
4747+ EntryNode::Subfs(_) => {
4848+ // Subfs nodes don't contain blobs directly - they reference other records
4949+ // Skip them in blob map extraction
4650 }
4751 EntryNode::Unknown(_) => {
4852 // Skip unknown node types
+9
cli/src/lib.rs
···11+// @generated by jacquard-lexicon. DO NOT EDIT.
22+//
33+// This file was automatically generated from Lexicon schemas.
44+// Any manual changes will be overwritten on the next regeneration.
55+66+pub mod builder_types;
77+88+#[cfg(feature = "place_wisp")]
99+pub mod place_wisp;
+195-12
cli/src/main.rs
···66mod download;
77mod pull;
88mod serve;
99+mod subfs_utils;
9101011use clap::{Parser, Subcommand};
1112use jacquard::CowStr;
···204205 println!("Deploying site '{}'...", site_name);
205206206207 // Try to fetch existing manifest for incremental updates
207207- let existing_blob_map: HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)> = {
208208+ let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
208209 use jacquard_common::types::string::AtUri;
209209-210210+210211 // Get the DID for this session
211212 let session_info = agent.session_info().await;
212213 if let Some((did, _)) = session_info {
···218219 match response.into_output() {
219220 Ok(record_output) => {
220221 let existing_manifest = record_output.value;
221221- let blob_map = blob_map::extract_blob_map(&existing_manifest.root);
222222- println!("Found existing manifest with {} files, checking for changes...", blob_map.len());
223223- blob_map
222222+ let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
223223+ println!("Found existing manifest with {} files in main record", blob_map.len());
224224+225225+ // Extract subfs URIs from main record
226226+ let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
227227+228228+ if !subfs_uris.is_empty() {
229229+ println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
230230+231231+ // Merge blob maps from all subfs records
232232+ match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
233233+ Ok(merged_count) => {
234234+ println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
235235+ }
236236+ Err(e) => {
237237+ eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
238238+ }
239239+ }
240240+241241+ (blob_map, subfs_uris)
242242+ } else {
243243+ (blob_map, Vec::new())
244244+ }
224245 }
225246 Err(_) => {
226247 println!("No existing manifest found, uploading all files...");
227227- HashMap::new()
248248+ (HashMap::new(), Vec::new())
228249 }
229250 }
230251 }
231252 Err(_) => {
232253 // Record doesn't exist yet - this is a new site
233254 println!("No existing manifest found, uploading all files...");
234234- HashMap::new()
255255+ (HashMap::new(), Vec::new())
235256 }
236257 }
237258 } else {
238259 println!("No existing manifest found (invalid URI), uploading all files...");
239239- HashMap::new()
260260+ (HashMap::new(), Vec::new())
240261 }
241262 } else {
242263 println!("No existing manifest found (could not get DID), uploading all files...");
243243- HashMap::new()
264264+ (HashMap::new(), Vec::new())
244265 }
245266 };
246267···248269 let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new()).await?;
249270 let uploaded_count = total_files - reused_count;
250271251251- // Create the Fs record
272272+ // Check if we need to split into subfs records
273273+ const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
274274+ const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
275275+ const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
276276+277277+ let mut working_directory = root_dir;
278278+ let mut current_file_count = total_files;
279279+ let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
280280+281281+ // Estimate initial manifest size
282282+ let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
283283+284284+ if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
285285+ println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
286286+ total_files, manifest_size as f64 / 1024.0);
287287+288288+ let mut attempts = 0;
289289+ const MAX_SPLIT_ATTEMPTS: usize = 50;
290290+291291+ while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
292292+ attempts += 1;
293293+294294+ // Find large directories to split
295295+ let directories = subfs_utils::find_large_directories(&working_directory, String::new());
296296+297297+ if let Some(largest_dir) = directories.first() {
298298+ println!(" Split #{}: {} ({} files, {:.1}KB)",
299299+ attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
300300+301301+ // Create a subfs record for this directory
302302+ use jacquard_common::types::string::Tid;
303303+ let subfs_tid = Tid::now_0();
304304+ let subfs_rkey = subfs_tid.to_string();
305305+306306+ let subfs_manifest = crate::place_wisp::subfs::SubfsRecord::new()
307307+ .root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
308308+ .file_count(Some(largest_dir.file_count as i64))
309309+ .created_at(Datetime::now())
310310+ .build();
311311+312312+ // Upload subfs record
313313+ let subfs_output = agent.put_record(
314314+ RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
315315+ subfs_manifest
316316+ ).await.into_diagnostic()?;
317317+318318+ let subfs_uri = subfs_output.uri.to_string();
319319+ println!(" ✅ Created subfs: {}", subfs_uri);
320320+321321+ // Replace directory with subfs node (flat: false to preserve structure)
322322+ working_directory = subfs_utils::replace_directory_with_subfs(
323323+ working_directory,
324324+ &largest_dir.path,
325325+ &subfs_uri,
326326+ false // Preserve directory structure
327327+ )?;
328328+329329+ new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
330330+ current_file_count -= largest_dir.file_count;
331331+332332+ // Recalculate manifest size
333333+ manifest_size = subfs_utils::estimate_directory_size(&working_directory);
334334+ println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
335335+ manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
336336+337337+ if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
338338+ println!("✅ Manifest now fits within limits");
339339+ break;
340340+ }
341341+ } else {
342342+ println!(" No more subdirectories to split - stopping");
343343+ break;
344344+ }
345345+ }
346346+347347+ if attempts >= MAX_SPLIT_ATTEMPTS {
348348+ return Err(miette::miette!(
349349+ "Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
350350+ MAX_SPLIT_ATTEMPTS,
351351+ manifest_size as f64 / 1024.0,
352352+ current_file_count
353353+ ));
354354+ }
355355+356356+ println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
357357+ new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
358358+ } else {
359359+ println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
360360+ total_files, manifest_size as f64 / 1024.0);
361361+ }
362362+363363+ // Create the final Fs record
252364 let fs_record = Fs::new()
253365 .site(CowStr::from(site_name.clone()))
254254- .root(root_dir)
255255- .file_count(total_files as i64)
366366+ .root(working_directory)
367367+ .file_count(current_file_count as i64)
256368 .created_at(Datetime::now())
257369 .build();
258370···270382 println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
271383 println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
272384 println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
385385+386386+ // Clean up old subfs records
387387+ if !old_subfs_uris.is_empty() {
388388+ println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
389389+390390+ let mut deleted_count = 0;
391391+ let mut failed_count = 0;
392392+393393+ for (uri, _path) in old_subfs_uris {
394394+ match subfs_utils::delete_subfs_record(agent, &uri).await {
395395+ Ok(_) => {
396396+ deleted_count += 1;
397397+ println!(" 🗑️ Deleted old subfs: {}", uri);
398398+ }
399399+ Err(e) => {
400400+ failed_count += 1;
401401+ eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
402402+ }
403403+ }
404404+ }
405405+406406+ if failed_count > 0 {
407407+ eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
408408+ } else {
409409+ println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
410410+ }
411411+ }
273412274413 Ok(())
275414}
···448587 ))
449588}
450589590590+/// Convert fs::Directory to subfs::Directory
591591+/// They have the same structure, but different types
592592+fn convert_fs_dir_to_subfs_dir(fs_dir: place_wisp::fs::Directory<'static>) -> place_wisp::subfs::Directory<'static> {
593593+ use place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
594594+595595+ let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
596596+ let node = match entry.node {
597597+ place_wisp::fs::EntryNode::File(file) => {
598598+ SubfsEntryNode::File(Box::new(SubfsFile::new()
599599+ .r#type(file.r#type)
600600+ .blob(file.blob)
601601+ .encoding(file.encoding)
602602+ .mime_type(file.mime_type)
603603+ .base64(file.base64)
604604+ .build()))
605605+ }
606606+ place_wisp::fs::EntryNode::Directory(dir) => {
607607+ SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
608608+ }
609609+ place_wisp::fs::EntryNode::Subfs(subfs) => {
610610+ // Nested subfs in the directory we're converting
611611+ // Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
612612+ SubfsEntryNode::Subfs(Box::new(place_wisp::subfs::Subfs::new()
613613+ .r#type(subfs.r#type)
614614+ .subject(subfs.subject)
615615+ .build()))
616616+ }
617617+ place_wisp::fs::EntryNode::Unknown(unknown) => {
618618+ SubfsEntryNode::Unknown(unknown)
619619+ }
620620+ };
621621+622622+ SubfsEntry::new()
623623+ .name(entry.name)
624624+ .node(node)
625625+ .build()
626626+ }).collect();
627627+628628+ SubfsDirectory::new()
629629+ .r#type(fs_dir.r#type)
630630+ .entries(subfs_entries)
631631+ .build()
632632+}
633633+
+2-1
cli/src/place_wisp.rs
···33// This file was automatically generated from Lexicon schemas.
44// Any manual changes will be overwritten on the next regeneration.
5566-pub mod fs;66+pub mod fs;
77+pub mod subfs;
+261-1
cli/src/place_wisp/fs.rs
···251251 description: None,
252252 refs: vec![
253253 ::jacquard_common::CowStr::new_static("#file"),
254254- ::jacquard_common::CowStr::new_static("#directory")
254254+ ::jacquard_common::CowStr::new_static("#directory"),
255255+ ::jacquard_common::CowStr::new_static("#subfs")
255256 ],
256257 closed: None,
257258 }),
···428429 }),
429430 }),
430431 );
432432+ map.insert(
433433+ ::jacquard_common::smol_str::SmolStr::new_static("subfs"),
434434+ ::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
435435+ description: None,
436436+ required: Some(
437437+ vec![
438438+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
439439+ ::jacquard_common::smol_str::SmolStr::new_static("subject")
440440+ ],
441441+ ),
442442+ nullable: None,
443443+ properties: {
444444+ #[allow(unused_mut)]
445445+ let mut map = ::std::collections::BTreeMap::new();
446446+ map.insert(
447447+ ::jacquard_common::smol_str::SmolStr::new_static("flat"),
448448+ ::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
449449+ description: None,
450450+ default: None,
451451+ r#const: None,
452452+ }),
453453+ );
454454+ map.insert(
455455+ ::jacquard_common::smol_str::SmolStr::new_static("subject"),
456456+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
457457+ description: Some(
458458+ ::jacquard_common::CowStr::new_static(
459459+ "AT-URI pointing to a place.wisp.subfs record containing this subtree.",
460460+ ),
461461+ ),
462462+ format: Some(
463463+ ::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
464464+ ),
465465+ default: None,
466466+ min_length: None,
467467+ max_length: None,
468468+ min_graphemes: None,
469469+ max_graphemes: None,
470470+ r#enum: None,
471471+ r#const: None,
472472+ known_values: None,
473473+ }),
474474+ );
475475+ map.insert(
476476+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
477477+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
478478+ description: None,
479479+ format: None,
480480+ default: None,
481481+ min_length: None,
482482+ max_length: None,
483483+ min_graphemes: None,
484484+ max_graphemes: None,
485485+ r#enum: None,
486486+ r#const: None,
487487+ known_values: None,
488488+ }),
489489+ );
490490+ map
491491+ },
492492+ }),
493493+ );
431494 map
432495 },
433496 }
···638701 File(Box<crate::place_wisp::fs::File<'a>>),
639702 #[serde(rename = "place.wisp.fs#directory")]
640703 Directory(Box<crate::place_wisp::fs::Directory<'a>>),
704704+ #[serde(rename = "place.wisp.fs#subfs")]
705705+ Subfs(Box<crate::place_wisp::fs::Subfs<'a>>),
641706}
642707643708impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
···12251290 });
12261291 }
12271292 }
12931293+ Ok(())
12941294+ }
12951295+}
12961296+12971297+#[jacquard_derive::lexicon]
12981298+#[derive(
12991299+ serde::Serialize,
13001300+ serde::Deserialize,
13011301+ Debug,
13021302+ Clone,
13031303+ PartialEq,
13041304+ Eq,
13051305+ jacquard_derive::IntoStatic
13061306+)]
13071307+#[serde(rename_all = "camelCase")]
13081308+pub struct Subfs<'a> {
13091309+ /// If true, the subfs record's root entries are merged (flattened) into the parent directory, replacing the subfs entry. If false (default), the subfs entries are placed in a subdirectory with the subfs entry's name. Flat merging is useful for splitting large directories across multiple records while maintaining a flat structure.
13101310+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
13111311+ pub flat: Option<bool>,
13121312+ /// AT-URI pointing to a place.wisp.subfs record containing this subtree.
13131313+ #[serde(borrow)]
13141314+ pub subject: jacquard_common::types::string::AtUri<'a>,
13151315+ #[serde(borrow)]
13161316+ pub r#type: jacquard_common::CowStr<'a>,
13171317+}
13181318+13191319+pub mod subfs_state {
13201320+13211321+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
13221322+ #[allow(unused)]
13231323+ use ::core::marker::PhantomData;
13241324+ mod sealed {
13251325+ pub trait Sealed {}
13261326+ }
13271327+ /// State trait tracking which required fields have been set
13281328+ pub trait State: sealed::Sealed {
13291329+ type Type;
13301330+ type Subject;
13311331+ }
13321332+ /// Empty state - all required fields are unset
13331333+ pub struct Empty(());
13341334+ impl sealed::Sealed for Empty {}
13351335+ impl State for Empty {
13361336+ type Type = Unset;
13371337+ type Subject = Unset;
13381338+ }
13391339+ ///State transition - sets the `type` field to Set
13401340+ pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
13411341+ impl<S: State> sealed::Sealed for SetType<S> {}
13421342+ impl<S: State> State for SetType<S> {
13431343+ type Type = Set<members::r#type>;
13441344+ type Subject = S::Subject;
13451345+ }
13461346+ ///State transition - sets the `subject` field to Set
13471347+ pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
13481348+ impl<S: State> sealed::Sealed for SetSubject<S> {}
13491349+ impl<S: State> State for SetSubject<S> {
13501350+ type Type = S::Type;
13511351+ type Subject = Set<members::subject>;
13521352+ }
13531353+ /// Marker types for field names
13541354+ #[allow(non_camel_case_types)]
13551355+ pub mod members {
13561356+ ///Marker type for the `type` field
13571357+ pub struct r#type(());
13581358+ ///Marker type for the `subject` field
13591359+ pub struct subject(());
13601360+ }
13611361+}
13621362+13631363+/// Builder for constructing an instance of this type
13641364+pub struct SubfsBuilder<'a, S: subfs_state::State> {
13651365+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
13661366+ __unsafe_private_named: (
13671367+ ::core::option::Option<bool>,
13681368+ ::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
13691369+ ::core::option::Option<jacquard_common::CowStr<'a>>,
13701370+ ),
13711371+ _phantom: ::core::marker::PhantomData<&'a ()>,
13721372+}
13731373+13741374+impl<'a> Subfs<'a> {
13751375+ /// Create a new builder for this type
13761376+ pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
13771377+ SubfsBuilder::new()
13781378+ }
13791379+}
13801380+13811381+impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
13821382+ /// Create a new builder with all fields unset
13831383+ pub fn new() -> Self {
13841384+ SubfsBuilder {
13851385+ _phantom_state: ::core::marker::PhantomData,
13861386+ __unsafe_private_named: (None, None, None),
13871387+ _phantom: ::core::marker::PhantomData,
13881388+ }
13891389+ }
13901390+}
13911391+13921392+impl<'a, S: subfs_state::State> SubfsBuilder<'a, S> {
13931393+ /// Set the `flat` field (optional)
13941394+ pub fn flat(mut self, value: impl Into<Option<bool>>) -> Self {
13951395+ self.__unsafe_private_named.0 = value.into();
13961396+ self
13971397+ }
13981398+ /// Set the `flat` field to an Option value (optional)
13991399+ pub fn maybe_flat(mut self, value: Option<bool>) -> Self {
14001400+ self.__unsafe_private_named.0 = value;
14011401+ self
14021402+ }
14031403+}
14041404+14051405+impl<'a, S> SubfsBuilder<'a, S>
14061406+where
14071407+ S: subfs_state::State,
14081408+ S::Subject: subfs_state::IsUnset,
14091409+{
14101410+ /// Set the `subject` field (required)
14111411+ pub fn subject(
14121412+ mut self,
14131413+ value: impl Into<jacquard_common::types::string::AtUri<'a>>,
14141414+ ) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
14151415+ self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
14161416+ SubfsBuilder {
14171417+ _phantom_state: ::core::marker::PhantomData,
14181418+ __unsafe_private_named: self.__unsafe_private_named,
14191419+ _phantom: ::core::marker::PhantomData,
14201420+ }
14211421+ }
14221422+}
14231423+14241424+impl<'a, S> SubfsBuilder<'a, S>
14251425+where
14261426+ S: subfs_state::State,
14271427+ S::Type: subfs_state::IsUnset,
14281428+{
14291429+ /// Set the `type` field (required)
14301430+ pub fn r#type(
14311431+ mut self,
14321432+ value: impl Into<jacquard_common::CowStr<'a>>,
14331433+ ) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
14341434+ self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
14351435+ SubfsBuilder {
14361436+ _phantom_state: ::core::marker::PhantomData,
14371437+ __unsafe_private_named: self.__unsafe_private_named,
14381438+ _phantom: ::core::marker::PhantomData,
14391439+ }
14401440+ }
14411441+}
14421442+14431443+impl<'a, S> SubfsBuilder<'a, S>
14441444+where
14451445+ S: subfs_state::State,
14461446+ S::Type: subfs_state::IsSet,
14471447+ S::Subject: subfs_state::IsSet,
14481448+{
14491449+ /// Build the final struct
14501450+ pub fn build(self) -> Subfs<'a> {
14511451+ Subfs {
14521452+ flat: self.__unsafe_private_named.0,
14531453+ subject: self.__unsafe_private_named.1.unwrap(),
14541454+ r#type: self.__unsafe_private_named.2.unwrap(),
14551455+ extra_data: Default::default(),
14561456+ }
14571457+ }
14581458+ /// Build the final struct with custom extra_data
14591459+ pub fn build_with_data(
14601460+ self,
14611461+ extra_data: std::collections::BTreeMap<
14621462+ jacquard_common::smol_str::SmolStr,
14631463+ jacquard_common::types::value::Data<'a>,
14641464+ >,
14651465+ ) -> Subfs<'a> {
14661466+ Subfs {
14671467+ flat: self.__unsafe_private_named.0,
14681468+ subject: self.__unsafe_private_named.1.unwrap(),
14691469+ r#type: self.__unsafe_private_named.2.unwrap(),
14701470+ extra_data: Some(extra_data),
14711471+ }
14721472+ }
14731473+}
14741474+14751475+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
14761476+ fn nsid() -> &'static str {
14771477+ "place.wisp.fs"
14781478+ }
14791479+ fn def_name() -> &'static str {
14801480+ "subfs"
14811481+ }
14821482+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
14831483+ lexicon_doc_place_wisp_fs()
14841484+ }
14851485+ fn validate(
14861486+ &self,
14871487+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
12281488 Ok(())
12291489 }
12301490}
+1408
cli/src/place_wisp/subfs.rs
···11+// @generated by jacquard-lexicon. DO NOT EDIT.
22+//
33+// Lexicon: place.wisp.subfs
44+//
55+// This file was automatically generated from Lexicon schemas.
66+// Any manual changes will be overwritten on the next regeneration.
77+88+#[jacquard_derive::lexicon]
99+#[derive(
1010+ serde::Serialize,
1111+ serde::Deserialize,
1212+ Debug,
1313+ Clone,
1414+ PartialEq,
1515+ Eq,
1616+ jacquard_derive::IntoStatic
1717+)]
1818+#[serde(rename_all = "camelCase")]
1919+pub struct Directory<'a> {
2020+ #[serde(borrow)]
2121+ pub entries: Vec<crate::place_wisp::subfs::Entry<'a>>,
2222+ #[serde(borrow)]
2323+ pub r#type: jacquard_common::CowStr<'a>,
2424+}
2525+2626+pub mod directory_state {
2727+2828+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
2929+ #[allow(unused)]
3030+ use ::core::marker::PhantomData;
3131+ mod sealed {
3232+ pub trait Sealed {}
3333+ }
3434+ /// State trait tracking which required fields have been set
3535+ pub trait State: sealed::Sealed {
3636+ type Type;
3737+ type Entries;
3838+ }
3939+ /// Empty state - all required fields are unset
4040+ pub struct Empty(());
4141+ impl sealed::Sealed for Empty {}
4242+ impl State for Empty {
4343+ type Type = Unset;
4444+ type Entries = Unset;
4545+ }
4646+ ///State transition - sets the `type` field to Set
4747+ pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
4848+ impl<S: State> sealed::Sealed for SetType<S> {}
4949+ impl<S: State> State for SetType<S> {
5050+ type Type = Set<members::r#type>;
5151+ type Entries = S::Entries;
5252+ }
5353+ ///State transition - sets the `entries` field to Set
5454+ pub struct SetEntries<S: State = Empty>(PhantomData<fn() -> S>);
5555+ impl<S: State> sealed::Sealed for SetEntries<S> {}
5656+ impl<S: State> State for SetEntries<S> {
5757+ type Type = S::Type;
5858+ type Entries = Set<members::entries>;
5959+ }
6060+ /// Marker types for field names
6161+ #[allow(non_camel_case_types)]
6262+ pub mod members {
6363+ ///Marker type for the `type` field
6464+ pub struct r#type(());
6565+ ///Marker type for the `entries` field
6666+ pub struct entries(());
6767+ }
6868+}
6969+7070+/// Builder for constructing an instance of this type
7171+pub struct DirectoryBuilder<'a, S: directory_state::State> {
7272+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
7373+ __unsafe_private_named: (
7474+ ::core::option::Option<Vec<crate::place_wisp::subfs::Entry<'a>>>,
7575+ ::core::option::Option<jacquard_common::CowStr<'a>>,
7676+ ),
7777+ _phantom: ::core::marker::PhantomData<&'a ()>,
7878+}
7979+8080+impl<'a> Directory<'a> {
8181+ /// Create a new builder for this type
8282+ pub fn new() -> DirectoryBuilder<'a, directory_state::Empty> {
8383+ DirectoryBuilder::new()
8484+ }
8585+}
8686+8787+impl<'a> DirectoryBuilder<'a, directory_state::Empty> {
8888+ /// Create a new builder with all fields unset
8989+ pub fn new() -> Self {
9090+ DirectoryBuilder {
9191+ _phantom_state: ::core::marker::PhantomData,
9292+ __unsafe_private_named: (None, None),
9393+ _phantom: ::core::marker::PhantomData,
9494+ }
9595+ }
9696+}
9797+9898+impl<'a, S> DirectoryBuilder<'a, S>
9999+where
100100+ S: directory_state::State,
101101+ S::Entries: directory_state::IsUnset,
102102+{
103103+ /// Set the `entries` field (required)
104104+ pub fn entries(
105105+ mut self,
106106+ value: impl Into<Vec<crate::place_wisp::subfs::Entry<'a>>>,
107107+ ) -> DirectoryBuilder<'a, directory_state::SetEntries<S>> {
108108+ self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
109109+ DirectoryBuilder {
110110+ _phantom_state: ::core::marker::PhantomData,
111111+ __unsafe_private_named: self.__unsafe_private_named,
112112+ _phantom: ::core::marker::PhantomData,
113113+ }
114114+ }
115115+}
116116+117117+impl<'a, S> DirectoryBuilder<'a, S>
118118+where
119119+ S: directory_state::State,
120120+ S::Type: directory_state::IsUnset,
121121+{
122122+ /// Set the `type` field (required)
123123+ pub fn r#type(
124124+ mut self,
125125+ value: impl Into<jacquard_common::CowStr<'a>>,
126126+ ) -> DirectoryBuilder<'a, directory_state::SetType<S>> {
127127+ self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
128128+ DirectoryBuilder {
129129+ _phantom_state: ::core::marker::PhantomData,
130130+ __unsafe_private_named: self.__unsafe_private_named,
131131+ _phantom: ::core::marker::PhantomData,
132132+ }
133133+ }
134134+}
135135+136136+impl<'a, S> DirectoryBuilder<'a, S>
137137+where
138138+ S: directory_state::State,
139139+ S::Type: directory_state::IsSet,
140140+ S::Entries: directory_state::IsSet,
141141+{
142142+ /// Build the final struct
143143+ pub fn build(self) -> Directory<'a> {
144144+ Directory {
145145+ entries: self.__unsafe_private_named.0.unwrap(),
146146+ r#type: self.__unsafe_private_named.1.unwrap(),
147147+ extra_data: Default::default(),
148148+ }
149149+ }
150150+ /// Build the final struct with custom extra_data
151151+ pub fn build_with_data(
152152+ self,
153153+ extra_data: std::collections::BTreeMap<
154154+ jacquard_common::smol_str::SmolStr,
155155+ jacquard_common::types::value::Data<'a>,
156156+ >,
157157+ ) -> Directory<'a> {
158158+ Directory {
159159+ entries: self.__unsafe_private_named.0.unwrap(),
160160+ r#type: self.__unsafe_private_named.1.unwrap(),
161161+ extra_data: Some(extra_data),
162162+ }
163163+ }
164164+}
165165+166166+fn lexicon_doc_place_wisp_subfs() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
167167+ ::jacquard_lexicon::lexicon::LexiconDoc {
168168+ lexicon: ::jacquard_lexicon::lexicon::Lexicon::Lexicon1,
169169+ id: ::jacquard_common::CowStr::new_static("place.wisp.subfs"),
170170+ revision: None,
171171+ description: None,
172172+ defs: {
173173+ let mut map = ::std::collections::BTreeMap::new();
174174+ map.insert(
175175+ ::jacquard_common::smol_str::SmolStr::new_static("directory"),
176176+ ::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
177177+ description: None,
178178+ required: Some(
179179+ vec![
180180+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
181181+ ::jacquard_common::smol_str::SmolStr::new_static("entries")
182182+ ],
183183+ ),
184184+ nullable: None,
185185+ properties: {
186186+ #[allow(unused_mut)]
187187+ let mut map = ::std::collections::BTreeMap::new();
188188+ map.insert(
189189+ ::jacquard_common::smol_str::SmolStr::new_static("entries"),
190190+ ::jacquard_lexicon::lexicon::LexObjectProperty::Array(::jacquard_lexicon::lexicon::LexArray {
191191+ description: None,
192192+ items: ::jacquard_lexicon::lexicon::LexArrayItem::Ref(::jacquard_lexicon::lexicon::LexRef {
193193+ description: None,
194194+ r#ref: ::jacquard_common::CowStr::new_static("#entry"),
195195+ }),
196196+ min_length: None,
197197+ max_length: Some(500usize),
198198+ }),
199199+ );
200200+ map.insert(
201201+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
202202+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
203203+ description: None,
204204+ format: None,
205205+ default: None,
206206+ min_length: None,
207207+ max_length: None,
208208+ min_graphemes: None,
209209+ max_graphemes: None,
210210+ r#enum: None,
211211+ r#const: None,
212212+ known_values: None,
213213+ }),
214214+ );
215215+ map
216216+ },
217217+ }),
218218+ );
219219+ map.insert(
220220+ ::jacquard_common::smol_str::SmolStr::new_static("entry"),
221221+ ::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
222222+ description: None,
223223+ required: Some(
224224+ vec![
225225+ ::jacquard_common::smol_str::SmolStr::new_static("name"),
226226+ ::jacquard_common::smol_str::SmolStr::new_static("node")
227227+ ],
228228+ ),
229229+ nullable: None,
230230+ properties: {
231231+ #[allow(unused_mut)]
232232+ let mut map = ::std::collections::BTreeMap::new();
233233+ map.insert(
234234+ ::jacquard_common::smol_str::SmolStr::new_static("name"),
235235+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
236236+ description: None,
237237+ format: None,
238238+ default: None,
239239+ min_length: None,
240240+ max_length: Some(255usize),
241241+ min_graphemes: None,
242242+ max_graphemes: None,
243243+ r#enum: None,
244244+ r#const: None,
245245+ known_values: None,
246246+ }),
247247+ );
248248+ map.insert(
249249+ ::jacquard_common::smol_str::SmolStr::new_static("node"),
250250+ ::jacquard_lexicon::lexicon::LexObjectProperty::Union(::jacquard_lexicon::lexicon::LexRefUnion {
251251+ description: None,
252252+ refs: vec![
253253+ ::jacquard_common::CowStr::new_static("#file"),
254254+ ::jacquard_common::CowStr::new_static("#directory"),
255255+ ::jacquard_common::CowStr::new_static("#subfs")
256256+ ],
257257+ closed: None,
258258+ }),
259259+ );
260260+ map
261261+ },
262262+ }),
263263+ );
264264+ map.insert(
265265+ ::jacquard_common::smol_str::SmolStr::new_static("file"),
266266+ ::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
267267+ description: None,
268268+ required: Some(
269269+ vec![
270270+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
271271+ ::jacquard_common::smol_str::SmolStr::new_static("blob")
272272+ ],
273273+ ),
274274+ nullable: None,
275275+ properties: {
276276+ #[allow(unused_mut)]
277277+ let mut map = ::std::collections::BTreeMap::new();
278278+ map.insert(
279279+ ::jacquard_common::smol_str::SmolStr::new_static("base64"),
280280+ ::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
281281+ description: None,
282282+ default: None,
283283+ r#const: None,
284284+ }),
285285+ );
286286+ map.insert(
287287+ ::jacquard_common::smol_str::SmolStr::new_static("blob"),
288288+ ::jacquard_lexicon::lexicon::LexObjectProperty::Blob(::jacquard_lexicon::lexicon::LexBlob {
289289+ description: None,
290290+ accept: None,
291291+ max_size: None,
292292+ }),
293293+ );
294294+ map.insert(
295295+ ::jacquard_common::smol_str::SmolStr::new_static("encoding"),
296296+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
297297+ description: Some(
298298+ ::jacquard_common::CowStr::new_static(
299299+ "Content encoding (e.g., gzip for compressed files)",
300300+ ),
301301+ ),
302302+ format: None,
303303+ default: None,
304304+ min_length: None,
305305+ max_length: None,
306306+ min_graphemes: None,
307307+ max_graphemes: None,
308308+ r#enum: None,
309309+ r#const: None,
310310+ known_values: None,
311311+ }),
312312+ );
313313+ map.insert(
314314+ ::jacquard_common::smol_str::SmolStr::new_static("mimeType"),
315315+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
316316+ description: Some(
317317+ ::jacquard_common::CowStr::new_static(
318318+ "Original MIME type before compression",
319319+ ),
320320+ ),
321321+ format: None,
322322+ default: None,
323323+ min_length: None,
324324+ max_length: None,
325325+ min_graphemes: None,
326326+ max_graphemes: None,
327327+ r#enum: None,
328328+ r#const: None,
329329+ known_values: None,
330330+ }),
331331+ );
332332+ map.insert(
333333+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
334334+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
335335+ description: None,
336336+ format: None,
337337+ default: None,
338338+ min_length: None,
339339+ max_length: None,
340340+ min_graphemes: None,
341341+ max_graphemes: None,
342342+ r#enum: None,
343343+ r#const: None,
344344+ known_values: None,
345345+ }),
346346+ );
347347+ map
348348+ },
349349+ }),
350350+ );
351351+ map.insert(
352352+ ::jacquard_common::smol_str::SmolStr::new_static("main"),
353353+ ::jacquard_lexicon::lexicon::LexUserType::Record(::jacquard_lexicon::lexicon::LexRecord {
354354+ description: Some(
355355+ ::jacquard_common::CowStr::new_static(
356356+ "Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",
357357+ ),
358358+ ),
359359+ key: None,
360360+ record: ::jacquard_lexicon::lexicon::LexRecordRecord::Object(::jacquard_lexicon::lexicon::LexObject {
361361+ description: None,
362362+ required: Some(
363363+ vec![
364364+ ::jacquard_common::smol_str::SmolStr::new_static("root"),
365365+ ::jacquard_common::smol_str::SmolStr::new_static("createdAt")
366366+ ],
367367+ ),
368368+ nullable: None,
369369+ properties: {
370370+ #[allow(unused_mut)]
371371+ let mut map = ::std::collections::BTreeMap::new();
372372+ map.insert(
373373+ ::jacquard_common::smol_str::SmolStr::new_static(
374374+ "createdAt",
375375+ ),
376376+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
377377+ description: None,
378378+ format: Some(
379379+ ::jacquard_lexicon::lexicon::LexStringFormat::Datetime,
380380+ ),
381381+ default: None,
382382+ min_length: None,
383383+ max_length: None,
384384+ min_graphemes: None,
385385+ max_graphemes: None,
386386+ r#enum: None,
387387+ r#const: None,
388388+ known_values: None,
389389+ }),
390390+ );
391391+ map.insert(
392392+ ::jacquard_common::smol_str::SmolStr::new_static(
393393+ "fileCount",
394394+ ),
395395+ ::jacquard_lexicon::lexicon::LexObjectProperty::Integer(::jacquard_lexicon::lexicon::LexInteger {
396396+ description: None,
397397+ default: None,
398398+ minimum: Some(0i64),
399399+ maximum: Some(1000i64),
400400+ r#enum: None,
401401+ r#const: None,
402402+ }),
403403+ );
404404+ map.insert(
405405+ ::jacquard_common::smol_str::SmolStr::new_static("root"),
406406+ ::jacquard_lexicon::lexicon::LexObjectProperty::Ref(::jacquard_lexicon::lexicon::LexRef {
407407+ description: None,
408408+ r#ref: ::jacquard_common::CowStr::new_static("#directory"),
409409+ }),
410410+ );
411411+ map
412412+ },
413413+ }),
414414+ }),
415415+ );
416416+ map.insert(
417417+ ::jacquard_common::smol_str::SmolStr::new_static("subfs"),
418418+ ::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
419419+ description: None,
420420+ required: Some(
421421+ vec![
422422+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
423423+ ::jacquard_common::smol_str::SmolStr::new_static("subject")
424424+ ],
425425+ ),
426426+ nullable: None,
427427+ properties: {
428428+ #[allow(unused_mut)]
429429+ let mut map = ::std::collections::BTreeMap::new();
430430+ map.insert(
431431+ ::jacquard_common::smol_str::SmolStr::new_static("subject"),
432432+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
433433+ description: Some(
434434+ ::jacquard_common::CowStr::new_static(
435435+ "AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.",
436436+ ),
437437+ ),
438438+ format: Some(
439439+ ::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
440440+ ),
441441+ default: None,
442442+ min_length: None,
443443+ max_length: None,
444444+ min_graphemes: None,
445445+ max_graphemes: None,
446446+ r#enum: None,
447447+ r#const: None,
448448+ known_values: None,
449449+ }),
450450+ );
451451+ map.insert(
452452+ ::jacquard_common::smol_str::SmolStr::new_static("type"),
453453+ ::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
454454+ description: None,
455455+ format: None,
456456+ default: None,
457457+ min_length: None,
458458+ max_length: None,
459459+ min_graphemes: None,
460460+ max_graphemes: None,
461461+ r#enum: None,
462462+ r#const: None,
463463+ known_values: None,
464464+ }),
465465+ );
466466+ map
467467+ },
468468+ }),
469469+ );
470470+ map
471471+ },
472472+ }
473473+}
474474+475475+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Directory<'a> {
476476+ fn nsid() -> &'static str {
477477+ "place.wisp.subfs"
478478+ }
479479+ fn def_name() -> &'static str {
480480+ "directory"
481481+ }
482482+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
483483+ lexicon_doc_place_wisp_subfs()
484484+ }
485485+ fn validate(
486486+ &self,
487487+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
488488+ {
489489+ let value = &self.entries;
490490+ #[allow(unused_comparisons)]
491491+ if value.len() > 500usize {
492492+ return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
493493+ path: ::jacquard_lexicon::validation::ValidationPath::from_field(
494494+ "entries",
495495+ ),
496496+ max: 500usize,
497497+ actual: value.len(),
498498+ });
499499+ }
500500+ }
501501+ Ok(())
502502+ }
503503+}
504504+505505+#[jacquard_derive::lexicon]
506506+#[derive(
507507+ serde::Serialize,
508508+ serde::Deserialize,
509509+ Debug,
510510+ Clone,
511511+ PartialEq,
512512+ Eq,
513513+ jacquard_derive::IntoStatic
514514+)]
515515+#[serde(rename_all = "camelCase")]
516516+pub struct Entry<'a> {
517517+ #[serde(borrow)]
518518+ pub name: jacquard_common::CowStr<'a>,
519519+ #[serde(borrow)]
520520+ pub node: EntryNode<'a>,
521521+}
522522+523523+pub mod entry_state {
524524+525525+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
526526+ #[allow(unused)]
527527+ use ::core::marker::PhantomData;
528528+ mod sealed {
529529+ pub trait Sealed {}
530530+ }
531531+ /// State trait tracking which required fields have been set
532532+ pub trait State: sealed::Sealed {
533533+ type Name;
534534+ type Node;
535535+ }
536536+ /// Empty state - all required fields are unset
537537+ pub struct Empty(());
538538+ impl sealed::Sealed for Empty {}
539539+ impl State for Empty {
540540+ type Name = Unset;
541541+ type Node = Unset;
542542+ }
543543+ ///State transition - sets the `name` field to Set
544544+ pub struct SetName<S: State = Empty>(PhantomData<fn() -> S>);
545545+ impl<S: State> sealed::Sealed for SetName<S> {}
546546+ impl<S: State> State for SetName<S> {
547547+ type Name = Set<members::name>;
548548+ type Node = S::Node;
549549+ }
550550+ ///State transition - sets the `node` field to Set
551551+ pub struct SetNode<S: State = Empty>(PhantomData<fn() -> S>);
552552+ impl<S: State> sealed::Sealed for SetNode<S> {}
553553+ impl<S: State> State for SetNode<S> {
554554+ type Name = S::Name;
555555+ type Node = Set<members::node>;
556556+ }
557557+ /// Marker types for field names
558558+ #[allow(non_camel_case_types)]
559559+ pub mod members {
560560+ ///Marker type for the `name` field
561561+ pub struct name(());
562562+ ///Marker type for the `node` field
563563+ pub struct node(());
564564+ }
565565+}
566566+567567+/// Builder for constructing an instance of this type
568568+pub struct EntryBuilder<'a, S: entry_state::State> {
569569+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
570570+ __unsafe_private_named: (
571571+ ::core::option::Option<jacquard_common::CowStr<'a>>,
572572+ ::core::option::Option<EntryNode<'a>>,
573573+ ),
574574+ _phantom: ::core::marker::PhantomData<&'a ()>,
575575+}
576576+577577+impl<'a> Entry<'a> {
578578+ /// Create a new builder for this type
579579+ pub fn new() -> EntryBuilder<'a, entry_state::Empty> {
580580+ EntryBuilder::new()
581581+ }
582582+}
583583+584584+impl<'a> EntryBuilder<'a, entry_state::Empty> {
585585+ /// Create a new builder with all fields unset
586586+ pub fn new() -> Self {
587587+ EntryBuilder {
588588+ _phantom_state: ::core::marker::PhantomData,
589589+ __unsafe_private_named: (None, None),
590590+ _phantom: ::core::marker::PhantomData,
591591+ }
592592+ }
593593+}
594594+595595+impl<'a, S> EntryBuilder<'a, S>
596596+where
597597+ S: entry_state::State,
598598+ S::Name: entry_state::IsUnset,
599599+{
600600+ /// Set the `name` field (required)
601601+ pub fn name(
602602+ mut self,
603603+ value: impl Into<jacquard_common::CowStr<'a>>,
604604+ ) -> EntryBuilder<'a, entry_state::SetName<S>> {
605605+ self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
606606+ EntryBuilder {
607607+ _phantom_state: ::core::marker::PhantomData,
608608+ __unsafe_private_named: self.__unsafe_private_named,
609609+ _phantom: ::core::marker::PhantomData,
610610+ }
611611+ }
612612+}
613613+614614+impl<'a, S> EntryBuilder<'a, S>
615615+where
616616+ S: entry_state::State,
617617+ S::Node: entry_state::IsUnset,
618618+{
619619+ /// Set the `node` field (required)
620620+ pub fn node(
621621+ mut self,
622622+ value: impl Into<EntryNode<'a>>,
623623+ ) -> EntryBuilder<'a, entry_state::SetNode<S>> {
624624+ self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
625625+ EntryBuilder {
626626+ _phantom_state: ::core::marker::PhantomData,
627627+ __unsafe_private_named: self.__unsafe_private_named,
628628+ _phantom: ::core::marker::PhantomData,
629629+ }
630630+ }
631631+}
632632+633633+impl<'a, S> EntryBuilder<'a, S>
634634+where
635635+ S: entry_state::State,
636636+ S::Name: entry_state::IsSet,
637637+ S::Node: entry_state::IsSet,
638638+{
639639+ /// Build the final struct
640640+ pub fn build(self) -> Entry<'a> {
641641+ Entry {
642642+ name: self.__unsafe_private_named.0.unwrap(),
643643+ node: self.__unsafe_private_named.1.unwrap(),
644644+ extra_data: Default::default(),
645645+ }
646646+ }
647647+ /// Build the final struct with custom extra_data
648648+ pub fn build_with_data(
649649+ self,
650650+ extra_data: std::collections::BTreeMap<
651651+ jacquard_common::smol_str::SmolStr,
652652+ jacquard_common::types::value::Data<'a>,
653653+ >,
654654+ ) -> Entry<'a> {
655655+ Entry {
656656+ name: self.__unsafe_private_named.0.unwrap(),
657657+ node: self.__unsafe_private_named.1.unwrap(),
658658+ extra_data: Some(extra_data),
659659+ }
660660+ }
661661+}
662662+663663+#[jacquard_derive::open_union]
664664+#[derive(
665665+ serde::Serialize,
666666+ serde::Deserialize,
667667+ Debug,
668668+ Clone,
669669+ PartialEq,
670670+ Eq,
671671+ jacquard_derive::IntoStatic
672672+)]
673673+#[serde(tag = "$type")]
674674+#[serde(bound(deserialize = "'de: 'a"))]
675675+pub enum EntryNode<'a> {
676676+ #[serde(rename = "place.wisp.subfs#file")]
677677+ File(Box<crate::place_wisp::subfs::File<'a>>),
678678+ #[serde(rename = "place.wisp.subfs#directory")]
679679+ Directory(Box<crate::place_wisp::subfs::Directory<'a>>),
680680+ #[serde(rename = "place.wisp.subfs#subfs")]
681681+ Subfs(Box<crate::place_wisp::subfs::Subfs<'a>>),
682682+}
683683+684684+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
685685+ fn nsid() -> &'static str {
686686+ "place.wisp.subfs"
687687+ }
688688+ fn def_name() -> &'static str {
689689+ "entry"
690690+ }
691691+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
692692+ lexicon_doc_place_wisp_subfs()
693693+ }
694694+ fn validate(
695695+ &self,
696696+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
697697+ {
698698+ let value = &self.name;
699699+ #[allow(unused_comparisons)]
700700+ if <str>::len(value.as_ref()) > 255usize {
701701+ return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
702702+ path: ::jacquard_lexicon::validation::ValidationPath::from_field(
703703+ "name",
704704+ ),
705705+ max: 255usize,
706706+ actual: <str>::len(value.as_ref()),
707707+ });
708708+ }
709709+ }
710710+ Ok(())
711711+ }
712712+}
713713+714714+#[jacquard_derive::lexicon]
715715+#[derive(
716716+ serde::Serialize,
717717+ serde::Deserialize,
718718+ Debug,
719719+ Clone,
720720+ PartialEq,
721721+ Eq,
722722+ jacquard_derive::IntoStatic
723723+)]
724724+#[serde(rename_all = "camelCase")]
725725+pub struct File<'a> {
726726+ /// True if blob content is base64-encoded (used to bypass PDS content sniffing)
727727+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
728728+ pub base64: Option<bool>,
729729+ /// Content blob ref
730730+ #[serde(borrow)]
731731+ pub blob: jacquard_common::types::blob::BlobRef<'a>,
732732+ /// Content encoding (e.g., gzip for compressed files)
733733+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
734734+ #[serde(borrow)]
735735+ pub encoding: Option<jacquard_common::CowStr<'a>>,
736736+ /// Original MIME type before compression
737737+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
738738+ #[serde(borrow)]
739739+ pub mime_type: Option<jacquard_common::CowStr<'a>>,
740740+ #[serde(borrow)]
741741+ pub r#type: jacquard_common::CowStr<'a>,
742742+}
743743+744744+pub mod file_state {
745745+746746+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
747747+ #[allow(unused)]
748748+ use ::core::marker::PhantomData;
749749+ mod sealed {
750750+ pub trait Sealed {}
751751+ }
752752+ /// State trait tracking which required fields have been set
753753+ pub trait State: sealed::Sealed {
754754+ type Type;
755755+ type Blob;
756756+ }
757757+ /// Empty state - all required fields are unset
758758+ pub struct Empty(());
759759+ impl sealed::Sealed for Empty {}
760760+ impl State for Empty {
761761+ type Type = Unset;
762762+ type Blob = Unset;
763763+ }
764764+ ///State transition - sets the `type` field to Set
765765+ pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
766766+ impl<S: State> sealed::Sealed for SetType<S> {}
767767+ impl<S: State> State for SetType<S> {
768768+ type Type = Set<members::r#type>;
769769+ type Blob = S::Blob;
770770+ }
771771+ ///State transition - sets the `blob` field to Set
772772+ pub struct SetBlob<S: State = Empty>(PhantomData<fn() -> S>);
773773+ impl<S: State> sealed::Sealed for SetBlob<S> {}
774774+ impl<S: State> State for SetBlob<S> {
775775+ type Type = S::Type;
776776+ type Blob = Set<members::blob>;
777777+ }
778778+ /// Marker types for field names
779779+ #[allow(non_camel_case_types)]
780780+ pub mod members {
781781+ ///Marker type for the `type` field
782782+ pub struct r#type(());
783783+ ///Marker type for the `blob` field
784784+ pub struct blob(());
785785+ }
786786+}
787787+788788+/// Builder for constructing an instance of this type
789789+pub struct FileBuilder<'a, S: file_state::State> {
790790+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
791791+ __unsafe_private_named: (
792792+ ::core::option::Option<bool>,
793793+ ::core::option::Option<jacquard_common::types::blob::BlobRef<'a>>,
794794+ ::core::option::Option<jacquard_common::CowStr<'a>>,
795795+ ::core::option::Option<jacquard_common::CowStr<'a>>,
796796+ ::core::option::Option<jacquard_common::CowStr<'a>>,
797797+ ),
798798+ _phantom: ::core::marker::PhantomData<&'a ()>,
799799+}
800800+801801+impl<'a> File<'a> {
802802+ /// Create a new builder for this type
803803+ pub fn new() -> FileBuilder<'a, file_state::Empty> {
804804+ FileBuilder::new()
805805+ }
806806+}
807807+808808+impl<'a> FileBuilder<'a, file_state::Empty> {
809809+ /// Create a new builder with all fields unset
810810+ pub fn new() -> Self {
811811+ FileBuilder {
812812+ _phantom_state: ::core::marker::PhantomData,
813813+ __unsafe_private_named: (None, None, None, None, None),
814814+ _phantom: ::core::marker::PhantomData,
815815+ }
816816+ }
817817+}
818818+819819+impl<'a, S: file_state::State> FileBuilder<'a, S> {
820820+ /// Set the `base64` field (optional)
821821+ pub fn base64(mut self, value: impl Into<Option<bool>>) -> Self {
822822+ self.__unsafe_private_named.0 = value.into();
823823+ self
824824+ }
825825+ /// Set the `base64` field to an Option value (optional)
826826+ pub fn maybe_base64(mut self, value: Option<bool>) -> Self {
827827+ self.__unsafe_private_named.0 = value;
828828+ self
829829+ }
830830+}
831831+832832+impl<'a, S> FileBuilder<'a, S>
833833+where
834834+ S: file_state::State,
835835+ S::Blob: file_state::IsUnset,
836836+{
837837+ /// Set the `blob` field (required)
838838+ pub fn blob(
839839+ mut self,
840840+ value: impl Into<jacquard_common::types::blob::BlobRef<'a>>,
841841+ ) -> FileBuilder<'a, file_state::SetBlob<S>> {
842842+ self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
843843+ FileBuilder {
844844+ _phantom_state: ::core::marker::PhantomData,
845845+ __unsafe_private_named: self.__unsafe_private_named,
846846+ _phantom: ::core::marker::PhantomData,
847847+ }
848848+ }
849849+}
850850+851851+impl<'a, S: file_state::State> FileBuilder<'a, S> {
852852+ /// Set the `encoding` field (optional)
853853+ pub fn encoding(
854854+ mut self,
855855+ value: impl Into<Option<jacquard_common::CowStr<'a>>>,
856856+ ) -> Self {
857857+ self.__unsafe_private_named.2 = value.into();
858858+ self
859859+ }
860860+ /// Set the `encoding` field to an Option value (optional)
861861+ pub fn maybe_encoding(mut self, value: Option<jacquard_common::CowStr<'a>>) -> Self {
862862+ self.__unsafe_private_named.2 = value;
863863+ self
864864+ }
865865+}
866866+867867+impl<'a, S: file_state::State> FileBuilder<'a, S> {
868868+ /// Set the `mimeType` field (optional)
869869+ pub fn mime_type(
870870+ mut self,
871871+ value: impl Into<Option<jacquard_common::CowStr<'a>>>,
872872+ ) -> Self {
873873+ self.__unsafe_private_named.3 = value.into();
874874+ self
875875+ }
876876+ /// Set the `mimeType` field to an Option value (optional)
877877+ pub fn maybe_mime_type(
878878+ mut self,
879879+ value: Option<jacquard_common::CowStr<'a>>,
880880+ ) -> Self {
881881+ self.__unsafe_private_named.3 = value;
882882+ self
883883+ }
884884+}
885885+886886+impl<'a, S> FileBuilder<'a, S>
887887+where
888888+ S: file_state::State,
889889+ S::Type: file_state::IsUnset,
890890+{
891891+ /// Set the `type` field (required)
892892+ pub fn r#type(
893893+ mut self,
894894+ value: impl Into<jacquard_common::CowStr<'a>>,
895895+ ) -> FileBuilder<'a, file_state::SetType<S>> {
896896+ self.__unsafe_private_named.4 = ::core::option::Option::Some(value.into());
897897+ FileBuilder {
898898+ _phantom_state: ::core::marker::PhantomData,
899899+ __unsafe_private_named: self.__unsafe_private_named,
900900+ _phantom: ::core::marker::PhantomData,
901901+ }
902902+ }
903903+}
904904+905905+impl<'a, S> FileBuilder<'a, S>
906906+where
907907+ S: file_state::State,
908908+ S::Type: file_state::IsSet,
909909+ S::Blob: file_state::IsSet,
910910+{
911911+ /// Build the final struct
912912+ pub fn build(self) -> File<'a> {
913913+ File {
914914+ base64: self.__unsafe_private_named.0,
915915+ blob: self.__unsafe_private_named.1.unwrap(),
916916+ encoding: self.__unsafe_private_named.2,
917917+ mime_type: self.__unsafe_private_named.3,
918918+ r#type: self.__unsafe_private_named.4.unwrap(),
919919+ extra_data: Default::default(),
920920+ }
921921+ }
922922+ /// Build the final struct with custom extra_data
923923+ pub fn build_with_data(
924924+ self,
925925+ extra_data: std::collections::BTreeMap<
926926+ jacquard_common::smol_str::SmolStr,
927927+ jacquard_common::types::value::Data<'a>,
928928+ >,
929929+ ) -> File<'a> {
930930+ File {
931931+ base64: self.__unsafe_private_named.0,
932932+ blob: self.__unsafe_private_named.1.unwrap(),
933933+ encoding: self.__unsafe_private_named.2,
934934+ mime_type: self.__unsafe_private_named.3,
935935+ r#type: self.__unsafe_private_named.4.unwrap(),
936936+ extra_data: Some(extra_data),
937937+ }
938938+ }
939939+}
940940+941941+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for File<'a> {
942942+ fn nsid() -> &'static str {
943943+ "place.wisp.subfs"
944944+ }
945945+ fn def_name() -> &'static str {
946946+ "file"
947947+ }
948948+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
949949+ lexicon_doc_place_wisp_subfs()
950950+ }
951951+ fn validate(
952952+ &self,
953953+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
954954+ Ok(())
955955+ }
956956+}
957957+958958+/// Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.
959959+#[jacquard_derive::lexicon]
960960+#[derive(
961961+ serde::Serialize,
962962+ serde::Deserialize,
963963+ Debug,
964964+ Clone,
965965+ PartialEq,
966966+ Eq,
967967+ jacquard_derive::IntoStatic
968968+)]
969969+#[serde(rename_all = "camelCase")]
970970+pub struct SubfsRecord<'a> {
971971+ pub created_at: jacquard_common::types::string::Datetime,
972972+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
973973+ pub file_count: Option<i64>,
974974+ #[serde(borrow)]
975975+ pub root: crate::place_wisp::subfs::Directory<'a>,
976976+}
977977+978978+pub mod subfs_record_state {
979979+980980+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
981981+ #[allow(unused)]
982982+ use ::core::marker::PhantomData;
983983+ mod sealed {
984984+ pub trait Sealed {}
985985+ }
986986+ /// State trait tracking which required fields have been set
987987+ pub trait State: sealed::Sealed {
988988+ type Root;
989989+ type CreatedAt;
990990+ }
991991+ /// Empty state - all required fields are unset
992992+ pub struct Empty(());
993993+ impl sealed::Sealed for Empty {}
994994+ impl State for Empty {
995995+ type Root = Unset;
996996+ type CreatedAt = Unset;
997997+ }
998998+ ///State transition - sets the `root` field to Set
999999+ pub struct SetRoot<S: State = Empty>(PhantomData<fn() -> S>);
10001000+ impl<S: State> sealed::Sealed for SetRoot<S> {}
10011001+ impl<S: State> State for SetRoot<S> {
10021002+ type Root = Set<members::root>;
10031003+ type CreatedAt = S::CreatedAt;
10041004+ }
10051005+ ///State transition - sets the `created_at` field to Set
10061006+ pub struct SetCreatedAt<S: State = Empty>(PhantomData<fn() -> S>);
10071007+ impl<S: State> sealed::Sealed for SetCreatedAt<S> {}
10081008+ impl<S: State> State for SetCreatedAt<S> {
10091009+ type Root = S::Root;
10101010+ type CreatedAt = Set<members::created_at>;
10111011+ }
10121012+ /// Marker types for field names
10131013+ #[allow(non_camel_case_types)]
10141014+ pub mod members {
10151015+ ///Marker type for the `root` field
10161016+ pub struct root(());
10171017+ ///Marker type for the `created_at` field
10181018+ pub struct created_at(());
10191019+ }
10201020+}
10211021+10221022+/// Builder for constructing an instance of this type
10231023+pub struct SubfsRecordBuilder<'a, S: subfs_record_state::State> {
10241024+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
10251025+ __unsafe_private_named: (
10261026+ ::core::option::Option<jacquard_common::types::string::Datetime>,
10271027+ ::core::option::Option<i64>,
10281028+ ::core::option::Option<crate::place_wisp::subfs::Directory<'a>>,
10291029+ ),
10301030+ _phantom: ::core::marker::PhantomData<&'a ()>,
10311031+}
10321032+10331033+impl<'a> SubfsRecord<'a> {
10341034+ /// Create a new builder for this type
10351035+ pub fn new() -> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
10361036+ SubfsRecordBuilder::new()
10371037+ }
10381038+}
10391039+10401040+impl<'a> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
10411041+ /// Create a new builder with all fields unset
10421042+ pub fn new() -> Self {
10431043+ SubfsRecordBuilder {
10441044+ _phantom_state: ::core::marker::PhantomData,
10451045+ __unsafe_private_named: (None, None, None),
10461046+ _phantom: ::core::marker::PhantomData,
10471047+ }
10481048+ }
10491049+}
10501050+10511051+impl<'a, S> SubfsRecordBuilder<'a, S>
10521052+where
10531053+ S: subfs_record_state::State,
10541054+ S::CreatedAt: subfs_record_state::IsUnset,
10551055+{
10561056+ /// Set the `createdAt` field (required)
10571057+ pub fn created_at(
10581058+ mut self,
10591059+ value: impl Into<jacquard_common::types::string::Datetime>,
10601060+ ) -> SubfsRecordBuilder<'a, subfs_record_state::SetCreatedAt<S>> {
10611061+ self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
10621062+ SubfsRecordBuilder {
10631063+ _phantom_state: ::core::marker::PhantomData,
10641064+ __unsafe_private_named: self.__unsafe_private_named,
10651065+ _phantom: ::core::marker::PhantomData,
10661066+ }
10671067+ }
10681068+}
10691069+10701070+impl<'a, S: subfs_record_state::State> SubfsRecordBuilder<'a, S> {
10711071+ /// Set the `fileCount` field (optional)
10721072+ pub fn file_count(mut self, value: impl Into<Option<i64>>) -> Self {
10731073+ self.__unsafe_private_named.1 = value.into();
10741074+ self
10751075+ }
10761076+ /// Set the `fileCount` field to an Option value (optional)
10771077+ pub fn maybe_file_count(mut self, value: Option<i64>) -> Self {
10781078+ self.__unsafe_private_named.1 = value;
10791079+ self
10801080+ }
10811081+}
10821082+10831083+impl<'a, S> SubfsRecordBuilder<'a, S>
10841084+where
10851085+ S: subfs_record_state::State,
10861086+ S::Root: subfs_record_state::IsUnset,
10871087+{
10881088+ /// Set the `root` field (required)
10891089+ pub fn root(
10901090+ mut self,
10911091+ value: impl Into<crate::place_wisp::subfs::Directory<'a>>,
10921092+ ) -> SubfsRecordBuilder<'a, subfs_record_state::SetRoot<S>> {
10931093+ self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
10941094+ SubfsRecordBuilder {
10951095+ _phantom_state: ::core::marker::PhantomData,
10961096+ __unsafe_private_named: self.__unsafe_private_named,
10971097+ _phantom: ::core::marker::PhantomData,
10981098+ }
10991099+ }
11001100+}
11011101+11021102+impl<'a, S> SubfsRecordBuilder<'a, S>
11031103+where
11041104+ S: subfs_record_state::State,
11051105+ S::Root: subfs_record_state::IsSet,
11061106+ S::CreatedAt: subfs_record_state::IsSet,
11071107+{
11081108+ /// Build the final struct
11091109+ pub fn build(self) -> SubfsRecord<'a> {
11101110+ SubfsRecord {
11111111+ created_at: self.__unsafe_private_named.0.unwrap(),
11121112+ file_count: self.__unsafe_private_named.1,
11131113+ root: self.__unsafe_private_named.2.unwrap(),
11141114+ extra_data: Default::default(),
11151115+ }
11161116+ }
11171117+ /// Build the final struct with custom extra_data
11181118+ pub fn build_with_data(
11191119+ self,
11201120+ extra_data: std::collections::BTreeMap<
11211121+ jacquard_common::smol_str::SmolStr,
11221122+ jacquard_common::types::value::Data<'a>,
11231123+ >,
11241124+ ) -> SubfsRecord<'a> {
11251125+ SubfsRecord {
11261126+ created_at: self.__unsafe_private_named.0.unwrap(),
11271127+ file_count: self.__unsafe_private_named.1,
11281128+ root: self.__unsafe_private_named.2.unwrap(),
11291129+ extra_data: Some(extra_data),
11301130+ }
11311131+ }
11321132+}
11331133+11341134+impl<'a> SubfsRecord<'a> {
11351135+ pub fn uri(
11361136+ uri: impl Into<jacquard_common::CowStr<'a>>,
11371137+ ) -> Result<
11381138+ jacquard_common::types::uri::RecordUri<'a, SubfsRecordRecord>,
11391139+ jacquard_common::types::uri::UriError,
11401140+ > {
11411141+ jacquard_common::types::uri::RecordUri::try_from_uri(
11421142+ jacquard_common::types::string::AtUri::new_cow(uri.into())?,
11431143+ )
11441144+ }
11451145+}
11461146+11471147+/// Typed wrapper for GetRecord response with this collection's record type.
11481148+#[derive(
11491149+ serde::Serialize,
11501150+ serde::Deserialize,
11511151+ Debug,
11521152+ Clone,
11531153+ PartialEq,
11541154+ Eq,
11551155+ jacquard_derive::IntoStatic
11561156+)]
11571157+#[serde(rename_all = "camelCase")]
11581158+pub struct SubfsRecordGetRecordOutput<'a> {
11591159+ #[serde(skip_serializing_if = "std::option::Option::is_none")]
11601160+ #[serde(borrow)]
11611161+ pub cid: std::option::Option<jacquard_common::types::string::Cid<'a>>,
11621162+ #[serde(borrow)]
11631163+ pub uri: jacquard_common::types::string::AtUri<'a>,
11641164+ #[serde(borrow)]
11651165+ pub value: SubfsRecord<'a>,
11661166+}
11671167+11681168+impl From<SubfsRecordGetRecordOutput<'_>> for SubfsRecord<'_> {
11691169+ fn from(output: SubfsRecordGetRecordOutput<'_>) -> Self {
11701170+ use jacquard_common::IntoStatic;
11711171+ output.value.into_static()
11721172+ }
11731173+}
11741174+11751175+impl jacquard_common::types::collection::Collection for SubfsRecord<'_> {
11761176+ const NSID: &'static str = "place.wisp.subfs";
11771177+ type Record = SubfsRecordRecord;
11781178+}
11791179+11801180+/// Marker type for deserializing records from this collection.
11811181+#[derive(Debug, serde::Serialize, serde::Deserialize)]
11821182+pub struct SubfsRecordRecord;
11831183+impl jacquard_common::xrpc::XrpcResp for SubfsRecordRecord {
11841184+ const NSID: &'static str = "place.wisp.subfs";
11851185+ const ENCODING: &'static str = "application/json";
11861186+ type Output<'de> = SubfsRecordGetRecordOutput<'de>;
11871187+ type Err<'de> = jacquard_common::types::collection::RecordError<'de>;
11881188+}
11891189+11901190+impl jacquard_common::types::collection::Collection for SubfsRecordRecord {
11911191+ const NSID: &'static str = "place.wisp.subfs";
11921192+ type Record = SubfsRecordRecord;
11931193+}
11941194+11951195+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for SubfsRecord<'a> {
11961196+ fn nsid() -> &'static str {
11971197+ "place.wisp.subfs"
11981198+ }
11991199+ fn def_name() -> &'static str {
12001200+ "main"
12011201+ }
12021202+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
12031203+ lexicon_doc_place_wisp_subfs()
12041204+ }
12051205+ fn validate(
12061206+ &self,
12071207+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
12081208+ if let Some(ref value) = self.file_count {
12091209+ if *value > 1000i64 {
12101210+ return Err(::jacquard_lexicon::validation::ConstraintError::Maximum {
12111211+ path: ::jacquard_lexicon::validation::ValidationPath::from_field(
12121212+ "file_count",
12131213+ ),
12141214+ max: 1000i64,
12151215+ actual: *value,
12161216+ });
12171217+ }
12181218+ }
12191219+ if let Some(ref value) = self.file_count {
12201220+ if *value < 0i64 {
12211221+ return Err(::jacquard_lexicon::validation::ConstraintError::Minimum {
12221222+ path: ::jacquard_lexicon::validation::ValidationPath::from_field(
12231223+ "file_count",
12241224+ ),
12251225+ min: 0i64,
12261226+ actual: *value,
12271227+ });
12281228+ }
12291229+ }
12301230+ Ok(())
12311231+ }
12321232+}
12331233+12341234+#[jacquard_derive::lexicon]
12351235+#[derive(
12361236+ serde::Serialize,
12371237+ serde::Deserialize,
12381238+ Debug,
12391239+ Clone,
12401240+ PartialEq,
12411241+ Eq,
12421242+ jacquard_derive::IntoStatic
12431243+)]
12441244+#[serde(rename_all = "camelCase")]
12451245+pub struct Subfs<'a> {
12461246+ /// AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.
12471247+ #[serde(borrow)]
12481248+ pub subject: jacquard_common::types::string::AtUri<'a>,
12491249+ #[serde(borrow)]
12501250+ pub r#type: jacquard_common::CowStr<'a>,
12511251+}
12521252+12531253+pub mod subfs_state {
12541254+12551255+ pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
12561256+ #[allow(unused)]
12571257+ use ::core::marker::PhantomData;
12581258+ mod sealed {
12591259+ pub trait Sealed {}
12601260+ }
12611261+ /// State trait tracking which required fields have been set
12621262+ pub trait State: sealed::Sealed {
12631263+ type Type;
12641264+ type Subject;
12651265+ }
12661266+ /// Empty state - all required fields are unset
12671267+ pub struct Empty(());
12681268+ impl sealed::Sealed for Empty {}
12691269+ impl State for Empty {
12701270+ type Type = Unset;
12711271+ type Subject = Unset;
12721272+ }
12731273+ ///State transition - sets the `type` field to Set
12741274+ pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
12751275+ impl<S: State> sealed::Sealed for SetType<S> {}
12761276+ impl<S: State> State for SetType<S> {
12771277+ type Type = Set<members::r#type>;
12781278+ type Subject = S::Subject;
12791279+ }
12801280+ ///State transition - sets the `subject` field to Set
12811281+ pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
12821282+ impl<S: State> sealed::Sealed for SetSubject<S> {}
12831283+ impl<S: State> State for SetSubject<S> {
12841284+ type Type = S::Type;
12851285+ type Subject = Set<members::subject>;
12861286+ }
12871287+ /// Marker types for field names
12881288+ #[allow(non_camel_case_types)]
12891289+ pub mod members {
12901290+ ///Marker type for the `type` field
12911291+ pub struct r#type(());
12921292+ ///Marker type for the `subject` field
12931293+ pub struct subject(());
12941294+ }
12951295+}
12961296+12971297+/// Builder for constructing an instance of this type
12981298+pub struct SubfsBuilder<'a, S: subfs_state::State> {
12991299+ _phantom_state: ::core::marker::PhantomData<fn() -> S>,
13001300+ __unsafe_private_named: (
13011301+ ::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
13021302+ ::core::option::Option<jacquard_common::CowStr<'a>>,
13031303+ ),
13041304+ _phantom: ::core::marker::PhantomData<&'a ()>,
13051305+}
13061306+13071307+impl<'a> Subfs<'a> {
13081308+ /// Create a new builder for this type
13091309+ pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
13101310+ SubfsBuilder::new()
13111311+ }
13121312+}
13131313+13141314+impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
13151315+ /// Create a new builder with all fields unset
13161316+ pub fn new() -> Self {
13171317+ SubfsBuilder {
13181318+ _phantom_state: ::core::marker::PhantomData,
13191319+ __unsafe_private_named: (None, None),
13201320+ _phantom: ::core::marker::PhantomData,
13211321+ }
13221322+ }
13231323+}
13241324+13251325+impl<'a, S> SubfsBuilder<'a, S>
13261326+where
13271327+ S: subfs_state::State,
13281328+ S::Subject: subfs_state::IsUnset,
13291329+{
13301330+ /// Set the `subject` field (required)
13311331+ pub fn subject(
13321332+ mut self,
13331333+ value: impl Into<jacquard_common::types::string::AtUri<'a>>,
13341334+ ) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
13351335+ self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
13361336+ SubfsBuilder {
13371337+ _phantom_state: ::core::marker::PhantomData,
13381338+ __unsafe_private_named: self.__unsafe_private_named,
13391339+ _phantom: ::core::marker::PhantomData,
13401340+ }
13411341+ }
13421342+}
13431343+13441344+impl<'a, S> SubfsBuilder<'a, S>
13451345+where
13461346+ S: subfs_state::State,
13471347+ S::Type: subfs_state::IsUnset,
13481348+{
13491349+ /// Set the `type` field (required)
13501350+ pub fn r#type(
13511351+ mut self,
13521352+ value: impl Into<jacquard_common::CowStr<'a>>,
13531353+ ) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
13541354+ self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
13551355+ SubfsBuilder {
13561356+ _phantom_state: ::core::marker::PhantomData,
13571357+ __unsafe_private_named: self.__unsafe_private_named,
13581358+ _phantom: ::core::marker::PhantomData,
13591359+ }
13601360+ }
13611361+}
13621362+13631363+impl<'a, S> SubfsBuilder<'a, S>
13641364+where
13651365+ S: subfs_state::State,
13661366+ S::Type: subfs_state::IsSet,
13671367+ S::Subject: subfs_state::IsSet,
13681368+{
13691369+ /// Build the final struct
13701370+ pub fn build(self) -> Subfs<'a> {
13711371+ Subfs {
13721372+ subject: self.__unsafe_private_named.0.unwrap(),
13731373+ r#type: self.__unsafe_private_named.1.unwrap(),
13741374+ extra_data: Default::default(),
13751375+ }
13761376+ }
13771377+ /// Build the final struct with custom extra_data
13781378+ pub fn build_with_data(
13791379+ self,
13801380+ extra_data: std::collections::BTreeMap<
13811381+ jacquard_common::smol_str::SmolStr,
13821382+ jacquard_common::types::value::Data<'a>,
13831383+ >,
13841384+ ) -> Subfs<'a> {
13851385+ Subfs {
13861386+ subject: self.__unsafe_private_named.0.unwrap(),
13871387+ r#type: self.__unsafe_private_named.1.unwrap(),
13881388+ extra_data: Some(extra_data),
13891389+ }
13901390+ }
13911391+}
13921392+13931393+impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
13941394+ fn nsid() -> &'static str {
13951395+ "place.wisp.subfs"
13961396+ }
13971397+ fn def_name() -> &'static str {
13981398+ "subfs"
13991399+ }
14001400+ fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
14011401+ lexicon_doc_place_wisp_subfs()
14021402+ }
14031403+ fn validate(
14041404+ &self,
14051405+ ) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
14061406+ Ok(())
14071407+ }
14081408+}
+433-55
cli/src/pull.rs
···22use crate::download;
33use crate::metadata::SiteMetadata;
44use crate::place_wisp::fs::*;
55+use crate::subfs_utils;
56use jacquard::CowStr;
67use jacquard::prelude::IdentityResolver;
78use jacquard_common::types::string::Did;
···6667 let fs_record: Fs = from_data(&record_output.value).into_diagnostic()?;
67686869 let file_count = fs_record.file_count.map(|c| c.to_string()).unwrap_or_else(|| "?".to_string());
6969- println!("Found site '{}' with {} files", fs_record.site, file_count);
7070+ println!("Found site '{}' with {} files (in main record)", fs_record.site, file_count);
7171+7272+ // Check for and expand subfs nodes
7373+ let expanded_root = expand_subfs_in_pull(&fs_record.root, &pds_url, did.as_str()).await?;
7474+ let total_file_count = subfs_utils::count_files_in_directory(&expanded_root);
7575+7676+ if total_file_count as i64 != fs_record.file_count.unwrap_or(0) {
7777+ println!("Total files after expanding subfs: {}", total_file_count);
7878+ }
70797180 // Load existing metadata for incremental updates
7281 let existing_metadata = SiteMetadata::load(&output_dir)?;
···7584 .map(|m| m.file_cids.clone())
7685 .unwrap_or_default();
77867878- // Extract blob map from the new manifest
7979- let new_blob_map = blob_map::extract_blob_map(&fs_record.root);
8787+ // Extract blob map from the expanded manifest
8888+ let new_blob_map = blob_map::extract_blob_map(&expanded_root);
8089 let new_file_cids: HashMap<String, String> = new_blob_map
8190 .iter()
8291 .map(|(path, (_blob_ref, cid))| (path.clone(), cid.clone()))
···96105 }
97106 }
981079999- // Check if we need to update (but only if output directory actually exists with files)
108108+ // Check if we need to update (verify files actually exist, not just metadata)
100109 if let Some(metadata) = &existing_metadata {
101110 if metadata.record_cid == record_cid {
102102- // Verify that the output directory actually exists and has content
103103- let has_content = output_dir.exists() &&
104104- output_dir.read_dir()
105105- .map(|mut entries| entries.any(|e| {
106106- if let Ok(entry) = e {
107107- !entry.file_name().to_string_lossy().starts_with(".wisp-metadata")
108108- } else {
109109- false
111111+ // Verify that the output directory actually exists and has the expected files
112112+ let has_all_files = output_dir.exists() && {
113113+ // Count actual files on disk (excluding metadata)
114114+ let mut actual_file_count = 0;
115115+ if let Ok(entries) = std::fs::read_dir(&output_dir) {
116116+ for entry in entries.flatten() {
117117+ let name = entry.file_name();
118118+ if !name.to_string_lossy().starts_with(".wisp-metadata") {
119119+ if entry.path().is_file() {
120120+ actual_file_count += 1;
121121+ }
110122 }
111111- }))
112112- .unwrap_or(false);
113113-114114- if has_content {
123123+ }
124124+ }
125125+126126+ // Compare with expected file count from metadata
127127+ let expected_count = metadata.file_cids.len();
128128+ actual_file_count > 0 && actual_file_count >= expected_count
129129+ };
130130+131131+ if has_all_files {
115132 println!("Site is already up to date!");
116133 return Ok(());
134134+ } else {
135135+ println!("Site metadata exists but files are missing, re-downloading...");
117136 }
118137 }
119138 }
···133152 let mut downloaded = 0;
134153 let mut reused = 0;
135154136136- // Download files recursively
155155+ // Download files recursively (using expanded root)
137156 let download_result = download_directory(
138138- &fs_record.root,
157157+ &expanded_root,
139158 &temp_dir,
140159 &pds_url,
141160 did.as_str(),
···218237 Ok(())
219238}
220239221221-/// Recursively download a directory
240240+/// Recursively download a directory with concurrent downloads
222241fn download_directory<'a>(
223242 dir: &'a Directory<'_>,
224243 output_dir: &'a Path,
···232251 reused: &'a mut usize,
233252) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<()>> + Send + 'a>> {
234253 Box::pin(async move {
254254+ use futures::stream::{self, StreamExt};
255255+256256+ // Collect download tasks and directory tasks separately
257257+ struct DownloadTask {
258258+ path: String,
259259+ output_path: PathBuf,
260260+ blob: jacquard_common::types::blob::BlobRef<'static>,
261261+ base64: bool,
262262+ gzip: bool,
263263+ }
264264+265265+ struct CopyTask {
266266+ path: String,
267267+ from: PathBuf,
268268+ to: PathBuf,
269269+ }
270270+271271+ let mut download_tasks = Vec::new();
272272+ let mut copy_tasks = Vec::new();
273273+ let mut dir_tasks = Vec::new();
274274+235275 for entry in &dir.entries {
236276 let entry_name = entry.name.as_str();
237277 let current_path = if path_prefix.is_empty() {
···245285 let output_path = output_dir.join(entry_name);
246286247287 // Check if file CID matches existing
248248- if let Some((_blob_ref, new_cid)) = new_blob_map.get(¤t_path) {
288288+ let should_copy = if let Some((_blob_ref, new_cid)) = new_blob_map.get(¤t_path) {
249289 if let Some(existing_cid) = existing_file_cids.get(¤t_path) {
250290 if existing_cid == new_cid {
251251- // File unchanged, copy from existing directory
252291 let existing_path = existing_output_dir.join(¤t_path);
253292 if existing_path.exists() {
254254- std::fs::copy(&existing_path, &output_path).into_diagnostic()?;
255255- *reused += 1;
256256- println!(" ✓ Reused {}", current_path);
257257- continue;
293293+ copy_tasks.push(CopyTask {
294294+ path: current_path.clone(),
295295+ from: existing_path,
296296+ to: output_path.clone(),
297297+ });
298298+ true
299299+ } else {
300300+ false
258301 }
302302+ } else {
303303+ false
259304 }
305305+ } else {
306306+ false
260307 }
261261- }
262262-263263- // File is new or changed, download it
264264- println!(" ↓ Downloading {}", current_path);
265265- let data = download::download_and_decompress_blob(
266266- pds_url,
267267- &file.blob,
268268- did,
269269- file.base64.unwrap_or(false),
270270- file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
271271- )
272272- .await?;
308308+ } else {
309309+ false
310310+ };
273311274274- std::fs::write(&output_path, data).into_diagnostic()?;
275275- *downloaded += 1;
312312+ if !should_copy {
313313+ use jacquard_common::IntoStatic;
314314+ // File needs to be downloaded
315315+ download_tasks.push(DownloadTask {
316316+ path: current_path,
317317+ output_path,
318318+ blob: file.blob.clone().into_static(),
319319+ base64: file.base64.unwrap_or(false),
320320+ gzip: file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
321321+ });
322322+ }
276323 }
277324 EntryNode::Directory(subdir) => {
278325 let subdir_path = output_dir.join(entry_name);
279279- std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
280280-281281- download_directory(
282282- subdir,
283283- &subdir_path,
284284- pds_url,
285285- did,
286286- new_blob_map,
287287- existing_file_cids,
288288- existing_output_dir,
289289- current_path,
290290- downloaded,
291291- reused,
292292- )
293293- .await?;
326326+ dir_tasks.push((subdir.as_ref().clone(), subdir_path, current_path));
327327+ }
328328+ EntryNode::Subfs(_) => {
329329+ println!(" ⚠ Skipping subfs node at {} (should have been expanded)", current_path);
294330 }
295331 EntryNode::Unknown(_) => {
296296- // Skip unknown node types
297332 println!(" ⚠ Skipping unknown node type for {}", current_path);
298333 }
299334 }
300335 }
301336337337+ // Execute copy tasks (fast, do them all)
338338+ for task in copy_tasks {
339339+ std::fs::copy(&task.from, &task.to).into_diagnostic()?;
340340+ *reused += 1;
341341+ println!(" ✓ Reused {}", task.path);
342342+ }
343343+344344+ // Execute download tasks with concurrency limit (20 concurrent downloads)
345345+ const DOWNLOAD_CONCURRENCY: usize = 20;
346346+347347+ let pds_url_clone = pds_url.clone();
348348+ let did_str = did.to_string();
349349+350350+ let download_results: Vec<miette::Result<(String, PathBuf, Vec<u8>)>> = stream::iter(download_tasks)
351351+ .map(|task| {
352352+ let pds = pds_url_clone.clone();
353353+ let did_copy = did_str.clone();
354354+355355+ async move {
356356+ println!(" ↓ Downloading {}", task.path);
357357+ let data = download::download_and_decompress_blob(
358358+ &pds,
359359+ &task.blob,
360360+ &did_copy,
361361+ task.base64,
362362+ task.gzip,
363363+ )
364364+ .await?;
365365+366366+ Ok::<_, miette::Report>((task.path, task.output_path, data))
367367+ }
368368+ })
369369+ .buffer_unordered(DOWNLOAD_CONCURRENCY)
370370+ .collect()
371371+ .await;
372372+373373+ // Write downloaded files to disk
374374+ for result in download_results {
375375+ let (path, output_path, data) = result?;
376376+ std::fs::write(&output_path, data).into_diagnostic()?;
377377+ *downloaded += 1;
378378+ println!(" ✓ Downloaded {}", path);
379379+ }
380380+381381+ // Recursively process directories
382382+ for (subdir, subdir_path, current_path) in dir_tasks {
383383+ std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
384384+385385+ download_directory(
386386+ &subdir,
387387+ &subdir_path,
388388+ pds_url,
389389+ did,
390390+ new_blob_map,
391391+ existing_file_cids,
392392+ existing_output_dir,
393393+ current_path,
394394+ downloaded,
395395+ reused,
396396+ )
397397+ .await?;
398398+ }
399399+302400 Ok(())
303401 })
304402}
305403404404+/// Expand subfs nodes in a directory tree by fetching and merging subfs records (RECURSIVELY)
405405+async fn expand_subfs_in_pull<'a>(
406406+ directory: &Directory<'a>,
407407+ pds_url: &Url,
408408+ _did: &str,
409409+) -> miette::Result<Directory<'static>> {
410410+ use crate::place_wisp::subfs::SubfsRecord;
411411+ use jacquard_common::types::value::from_data;
412412+ use jacquard_common::IntoStatic;
413413+414414+ // Recursively fetch ALL subfs records (including nested ones)
415415+ let mut all_subfs_map: HashMap<String, crate::place_wisp::subfs::Directory> = HashMap::new();
416416+ let mut to_fetch = subfs_utils::extract_subfs_uris(directory, String::new());
417417+418418+ if to_fetch.is_empty() {
419419+ return Ok((*directory).clone().into_static());
420420+ }
421421+422422+ println!("Found {} subfs records, fetching recursively...", to_fetch.len());
423423+ let client = reqwest::Client::new();
424424+425425+ // Keep fetching until we've resolved all subfs (including nested ones)
426426+ let mut iteration = 0;
427427+ const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
428428+429429+ while !to_fetch.is_empty() && iteration < MAX_ITERATIONS {
430430+ iteration += 1;
431431+ println!(" Iteration {}: fetching {} subfs records...", iteration, to_fetch.len());
432432+433433+ let mut fetch_tasks = Vec::new();
434434+435435+ for (uri, path) in to_fetch.clone() {
436436+ let client = client.clone();
437437+ let pds_url = pds_url.clone();
438438+439439+ fetch_tasks.push(async move {
440440+ let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
441441+ if parts.len() < 3 {
442442+ return Err(miette::miette!("Invalid subfs URI: {}", uri));
443443+ }
444444+445445+ let _did = parts[0];
446446+ let collection = parts[1];
447447+ let rkey = parts[2];
448448+449449+ if collection != "place.wisp.subfs" {
450450+ return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
451451+ }
452452+453453+ use jacquard::api::com_atproto::repo::get_record::GetRecord;
454454+ use jacquard_common::types::string::Rkey as RkeyType;
455455+ use jacquard_common::types::ident::AtIdentifier;
456456+ use jacquard_common::types::string::{RecordKey, Did as DidType};
457457+458458+ let rkey_parsed = RkeyType::new(rkey).into_diagnostic()?;
459459+ let did_parsed = DidType::new(_did).into_diagnostic()?;
460460+461461+ let request = GetRecord::new()
462462+ .repo(AtIdentifier::Did(did_parsed))
463463+ .collection(CowStr::from("place.wisp.subfs"))
464464+ .rkey(RecordKey::from(rkey_parsed))
465465+ .build();
466466+467467+ let response = client
468468+ .xrpc(pds_url)
469469+ .send(&request)
470470+ .await
471471+ .into_diagnostic()?;
472472+473473+ let record_output = response.into_output().into_diagnostic()?;
474474+ let subfs_record: SubfsRecord = from_data(&record_output.value).into_diagnostic()?;
475475+ let subfs_record_static = subfs_record.into_static();
476476+477477+ Ok::<_, miette::Report>((path, subfs_record_static))
478478+ });
479479+ }
480480+481481+ let results: Vec<_> = futures::future::join_all(fetch_tasks).await;
482482+483483+ // Process results and find nested subfs
484484+ let mut newly_fetched = Vec::new();
485485+ for result in results {
486486+ match result {
487487+ Ok((path, record)) => {
488488+ println!(" ✓ Fetched subfs at {}", path);
489489+490490+ // Check for nested subfs in this record
491491+ let nested_subfs = extract_subfs_from_subfs_dir(&record.root, path.clone());
492492+ newly_fetched.extend(nested_subfs);
493493+494494+ all_subfs_map.insert(path, record.root);
495495+ }
496496+ Err(e) => {
497497+ eprintln!(" ⚠️ Failed to fetch subfs: {}", e);
498498+ }
499499+ }
500500+ }
501501+502502+ // Update to_fetch with only the NEW subfs we haven't fetched yet
503503+ to_fetch = newly_fetched
504504+ .into_iter()
505505+ .filter(|(uri, _)| !all_subfs_map.iter().any(|(k, _)| k == uri))
506506+ .collect();
507507+ }
508508+509509+ if iteration >= MAX_ITERATIONS {
510510+ return Err(miette::miette!("Max iterations reached while fetching nested subfs"));
511511+ }
512512+513513+ println!(" Total subfs records fetched: {}", all_subfs_map.len());
514514+515515+ // Now replace all subfs nodes with their content
516516+ Ok(replace_subfs_with_content(directory.clone(), &all_subfs_map, String::new()))
517517+}
518518+519519+/// Extract subfs URIs from a subfs::Directory
520520+fn extract_subfs_from_subfs_dir(
521521+ directory: &crate::place_wisp::subfs::Directory,
522522+ current_path: String,
523523+) -> Vec<(String, String)> {
524524+ let mut uris = Vec::new();
525525+526526+ for entry in &directory.entries {
527527+ let full_path = if current_path.is_empty() {
528528+ entry.name.to_string()
529529+ } else {
530530+ format!("{}/{}", current_path, entry.name)
531531+ };
532532+533533+ match &entry.node {
534534+ crate::place_wisp::subfs::EntryNode::Subfs(subfs_node) => {
535535+ uris.push((subfs_node.subject.to_string(), full_path.clone()));
536536+ }
537537+ crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
538538+ let nested = extract_subfs_from_subfs_dir(subdir, full_path);
539539+ uris.extend(nested);
540540+ }
541541+ _ => {}
542542+ }
543543+ }
544544+545545+ uris
546546+}
547547+548548+/// Recursively replace subfs nodes with their actual content
549549+fn replace_subfs_with_content(
550550+ directory: Directory,
551551+ subfs_map: &HashMap<String, crate::place_wisp::subfs::Directory>,
552552+ current_path: String,
553553+) -> Directory<'static> {
554554+ use jacquard_common::IntoStatic;
555555+556556+ let new_entries: Vec<Entry<'static>> = directory
557557+ .entries
558558+ .into_iter()
559559+ .flat_map(|entry| {
560560+ let full_path = if current_path.is_empty() {
561561+ entry.name.to_string()
562562+ } else {
563563+ format!("{}/{}", current_path, entry.name)
564564+ };
565565+566566+ match entry.node {
567567+ EntryNode::Subfs(subfs_node) => {
568568+ // Check if we have this subfs record
569569+ if let Some(subfs_dir) = subfs_map.get(&full_path) {
570570+ let flat = subfs_node.flat.unwrap_or(true); // Default to flat merge
571571+572572+ if flat {
573573+ // Flat merge: hoist subfs entries into parent
574574+ println!(" Merging subfs {} (flat)", full_path);
575575+ let converted_entries: Vec<Entry<'static>> = subfs_dir
576576+ .entries
577577+ .iter()
578578+ .map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
579579+ .collect();
580580+581581+ converted_entries
582582+ } else {
583583+ // Nested: create a directory with the subfs name
584584+ println!(" Merging subfs {} (nested)", full_path);
585585+ let converted_entries: Vec<Entry<'static>> = subfs_dir
586586+ .entries
587587+ .iter()
588588+ .map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
589589+ .collect();
590590+591591+ vec![Entry::new()
592592+ .name(entry.name.into_static())
593593+ .node(EntryNode::Directory(Box::new(
594594+ Directory::new()
595595+ .r#type(CowStr::from("directory"))
596596+ .entries(converted_entries)
597597+ .build()
598598+ )))
599599+ .build()]
600600+ }
601601+ } else {
602602+ // Subfs not found, skip with warning
603603+ eprintln!(" ⚠️ Subfs not found: {}", full_path);
604604+ vec![]
605605+ }
606606+ }
607607+ EntryNode::Directory(dir) => {
608608+ // Recursively process subdirectories
609609+ vec![Entry::new()
610610+ .name(entry.name.into_static())
611611+ .node(EntryNode::Directory(Box::new(
612612+ replace_subfs_with_content(*dir, subfs_map, full_path)
613613+ )))
614614+ .build()]
615615+ }
616616+ EntryNode::File(_) => {
617617+ vec![entry.into_static()]
618618+ }
619619+ EntryNode::Unknown(_) => {
620620+ vec![entry.into_static()]
621621+ }
622622+ }
623623+ })
624624+ .collect();
625625+626626+ Directory::new()
627627+ .r#type(CowStr::from("directory"))
628628+ .entries(new_entries)
629629+ .build()
630630+}
631631+632632+/// Convert a subfs entry to a fs entry (they have the same structure but different types)
633633+fn convert_subfs_entry_to_fs(subfs_entry: crate::place_wisp::subfs::Entry<'static>) -> Entry<'static> {
634634+ use jacquard_common::IntoStatic;
635635+636636+ let node = match subfs_entry.node {
637637+ crate::place_wisp::subfs::EntryNode::File(file) => {
638638+ EntryNode::File(Box::new(
639639+ File::new()
640640+ .r#type(file.r#type.into_static())
641641+ .blob(file.blob.into_static())
642642+ .encoding(file.encoding.map(|e| e.into_static()))
643643+ .mime_type(file.mime_type.map(|m| m.into_static()))
644644+ .base64(file.base64)
645645+ .build()
646646+ ))
647647+ }
648648+ crate::place_wisp::subfs::EntryNode::Directory(dir) => {
649649+ let converted_entries: Vec<Entry<'static>> = dir
650650+ .entries
651651+ .into_iter()
652652+ .map(|e| convert_subfs_entry_to_fs(e.into_static()))
653653+ .collect();
654654+655655+ EntryNode::Directory(Box::new(
656656+ Directory::new()
657657+ .r#type(dir.r#type.into_static())
658658+ .entries(converted_entries)
659659+ .build()
660660+ ))
661661+ }
662662+ crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
663663+ // Nested subfs should have been expanded already - if we get here, it means expansion failed
664664+ // Treat it like a directory reference that should have been expanded
665665+ eprintln!(" ⚠️ Warning: unexpanded nested subfs at path, treating as empty directory");
666666+ EntryNode::Directory(Box::new(
667667+ Directory::new()
668668+ .r#type(CowStr::from("directory"))
669669+ .entries(vec![])
670670+ .build()
671671+ ))
672672+ }
673673+ crate::place_wisp::subfs::EntryNode::Unknown(unknown) => {
674674+ EntryNode::Unknown(unknown)
675675+ }
676676+ };
677677+678678+ Entry::new()
679679+ .name(subfs_entry.name.into_static())
680680+ .node(node)
681681+ .build()
682682+}
683683+
+336
cli/src/subfs_utils.rs
···11+use jacquard_common::types::string::AtUri;
22+use jacquard_common::types::blob::BlobRef;
33+use jacquard_common::IntoStatic;
44+use jacquard::client::{Agent, AgentSession, AgentSessionExt};
55+use jacquard::prelude::IdentityResolver;
66+use miette::IntoDiagnostic;
77+use std::collections::HashMap;
88+99+use crate::place_wisp::fs::{Directory as FsDirectory, EntryNode as FsEntryNode};
1010+use crate::place_wisp::subfs::SubfsRecord;
1111+1212+/// Extract all subfs URIs from a directory tree with their mount paths
1313+pub fn extract_subfs_uris(directory: &FsDirectory, current_path: String) -> Vec<(String, String)> {
1414+ let mut uris = Vec::new();
1515+1616+ for entry in &directory.entries {
1717+ let full_path = if current_path.is_empty() {
1818+ entry.name.to_string()
1919+ } else {
2020+ format!("{}/{}", current_path, entry.name)
2121+ };
2222+2323+ match &entry.node {
2424+ FsEntryNode::Subfs(subfs_node) => {
2525+ // Found a subfs node - store its URI and mount path
2626+ uris.push((subfs_node.subject.to_string(), full_path.clone()));
2727+ }
2828+ FsEntryNode::Directory(subdir) => {
2929+ // Recursively search subdirectories
3030+ let sub_uris = extract_subfs_uris(subdir, full_path);
3131+ uris.extend(sub_uris);
3232+ }
3333+ FsEntryNode::File(_) => {
3434+ // Files don't contain subfs
3535+ }
3636+ FsEntryNode::Unknown(_) => {
3737+ // Skip unknown nodes
3838+ }
3939+ }
4040+ }
4141+4242+ uris
4343+}
4444+4545+/// Fetch a subfs record from the PDS
4646+pub async fn fetch_subfs_record(
4747+ agent: &Agent<impl AgentSession + IdentityResolver>,
4848+ uri: &str,
4949+) -> miette::Result<SubfsRecord<'static>> {
5050+ // Parse URI: at://did/collection/rkey
5151+ let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
5252+5353+ if parts.len() < 3 {
5454+ return Err(miette::miette!("Invalid subfs URI: {}", uri));
5555+ }
5656+5757+ let _did = parts[0];
5858+ let collection = parts[1];
5959+ let _rkey = parts[2];
6060+6161+ if collection != "place.wisp.subfs" {
6262+ return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
6363+ }
6464+6565+ // Construct AT-URI for fetching
6666+ let at_uri = AtUri::new(uri).into_diagnostic()?;
6767+6868+ // Fetch the record
6969+ let response = agent.get_record::<SubfsRecord>(&at_uri).await.into_diagnostic()?;
7070+ let record_output = response.into_output().into_diagnostic()?;
7171+7272+ Ok(record_output.value.into_static())
7373+}
7474+7575+/// Merge blob maps from subfs records into the main blob map
7676+/// Returns the total number of blobs merged from all subfs records
7777+pub async fn merge_subfs_blob_maps(
7878+ agent: &Agent<impl AgentSession + IdentityResolver>,
7979+ subfs_uris: Vec<(String, String)>,
8080+ main_blob_map: &mut HashMap<String, (BlobRef<'static>, String)>,
8181+) -> miette::Result<usize> {
8282+ let mut total_merged = 0;
8383+8484+ println!("Fetching {} subfs records for blob reuse...", subfs_uris.len());
8585+8686+ // Fetch all subfs records in parallel (but with some concurrency limit)
8787+ use futures::stream::{self, StreamExt};
8888+8989+ let subfs_results: Vec<_> = stream::iter(subfs_uris)
9090+ .map(|(uri, mount_path)| async move {
9191+ match fetch_subfs_record(agent, &uri).await {
9292+ Ok(record) => Some((record, mount_path)),
9393+ Err(e) => {
9494+ eprintln!(" ⚠️ Failed to fetch subfs {}: {}", uri, e);
9595+ None
9696+ }
9797+ }
9898+ })
9999+ .buffer_unordered(5)
100100+ .collect()
101101+ .await;
102102+103103+ // Convert subfs Directory to fs Directory for blob extraction
104104+ // Note: We need to extract blobs from the subfs record's root
105105+ for result in subfs_results {
106106+ if let Some((subfs_record, mount_path)) = result {
107107+ // Extract blobs from this subfs record's root
108108+ // The blob_map module works with fs::Directory, but subfs::Directory has the same structure
109109+ // We need to convert or work directly with the entries
110110+111111+ let subfs_blob_map = extract_subfs_blobs(&subfs_record.root, mount_path.clone());
112112+ let count = subfs_blob_map.len();
113113+114114+ for (path, blob_info) in subfs_blob_map {
115115+ main_blob_map.insert(path, blob_info);
116116+ }
117117+118118+ total_merged += count;
119119+ println!(" ✓ Merged {} blobs from subfs at {}", count, mount_path);
120120+ }
121121+ }
122122+123123+ Ok(total_merged)
124124+}
125125+126126+/// Extract blobs from a subfs directory (works with subfs::Directory)
127127+/// Returns a map of file paths to their blob refs and CIDs
128128+fn extract_subfs_blobs(
129129+ directory: &crate::place_wisp::subfs::Directory,
130130+ current_path: String,
131131+) -> HashMap<String, (BlobRef<'static>, String)> {
132132+ let mut blob_map = HashMap::new();
133133+134134+ for entry in &directory.entries {
135135+ let full_path = if current_path.is_empty() {
136136+ entry.name.to_string()
137137+ } else {
138138+ format!("{}/{}", current_path, entry.name)
139139+ };
140140+141141+ match &entry.node {
142142+ crate::place_wisp::subfs::EntryNode::File(file_node) => {
143143+ let blob_ref = &file_node.blob;
144144+ let cid_string = blob_ref.blob().r#ref.to_string();
145145+ blob_map.insert(
146146+ full_path,
147147+ (blob_ref.clone().into_static(), cid_string)
148148+ );
149149+ }
150150+ crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
151151+ let sub_map = extract_subfs_blobs(subdir, full_path);
152152+ blob_map.extend(sub_map);
153153+ }
154154+ crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
155155+ // Nested subfs - these should be resolved recursively in the main flow
156156+ // For now, we skip them (they'll be fetched separately)
157157+ eprintln!(" ⚠️ Found nested subfs at {}, skipping (should be fetched separately)", full_path);
158158+ }
159159+ crate::place_wisp::subfs::EntryNode::Unknown(_) => {
160160+ // Skip unknown nodes
161161+ }
162162+ }
163163+ }
164164+165165+ blob_map
166166+}
167167+168168+/// Count total files in a directory tree
169169+pub fn count_files_in_directory(directory: &FsDirectory) -> usize {
170170+ let mut count = 0;
171171+172172+ for entry in &directory.entries {
173173+ match &entry.node {
174174+ FsEntryNode::File(_) => count += 1,
175175+ FsEntryNode::Directory(subdir) => {
176176+ count += count_files_in_directory(subdir);
177177+ }
178178+ FsEntryNode::Subfs(_) => {
179179+ // Subfs nodes don't count towards the main manifest file count
180180+ }
181181+ FsEntryNode::Unknown(_) => {}
182182+ }
183183+ }
184184+185185+ count
186186+}
187187+188188+/// Estimate JSON size of a directory tree
189189+pub fn estimate_directory_size(directory: &FsDirectory) -> usize {
190190+ // Serialize to JSON and measure
191191+ match serde_json::to_string(directory) {
192192+ Ok(json) => json.len(),
193193+ Err(_) => 0,
194194+ }
195195+}
196196+197197+/// Information about a directory that could be split into a subfs record
198198+#[derive(Debug)]
199199+pub struct SplittableDirectory {
200200+ pub path: String,
201201+ pub directory: FsDirectory<'static>,
202202+ pub size: usize,
203203+ pub file_count: usize,
204204+}
205205+206206+/// Find large directories that could be split into subfs records
207207+/// Returns directories sorted by size (largest first)
208208+pub fn find_large_directories(directory: &FsDirectory, current_path: String) -> Vec<SplittableDirectory> {
209209+ let mut result = Vec::new();
210210+211211+ for entry in &directory.entries {
212212+ if let FsEntryNode::Directory(subdir) = &entry.node {
213213+ let dir_path = if current_path.is_empty() {
214214+ entry.name.to_string()
215215+ } else {
216216+ format!("{}/{}", current_path, entry.name)
217217+ };
218218+219219+ let size = estimate_directory_size(subdir);
220220+ let file_count = count_files_in_directory(subdir);
221221+222222+ result.push(SplittableDirectory {
223223+ path: dir_path.clone(),
224224+ directory: (*subdir.clone()).into_static(),
225225+ size,
226226+ file_count,
227227+ });
228228+229229+ // Recursively find subdirectories
230230+ let subdirs = find_large_directories(subdir, dir_path);
231231+ result.extend(subdirs);
232232+ }
233233+ }
234234+235235+ // Sort by size (largest first)
236236+ result.sort_by(|a, b| b.size.cmp(&a.size));
237237+238238+ result
239239+}
240240+241241+/// Replace a directory with a subfs node in the tree
242242+pub fn replace_directory_with_subfs(
243243+ directory: FsDirectory<'static>,
244244+ target_path: &str,
245245+ subfs_uri: &str,
246246+ flat: bool,
247247+) -> miette::Result<FsDirectory<'static>> {
248248+ use jacquard_common::CowStr;
249249+ use crate::place_wisp::fs::{Entry, Subfs};
250250+251251+ let path_parts: Vec<&str> = target_path.split('/').collect();
252252+253253+ if path_parts.is_empty() {
254254+ return Err(miette::miette!("Cannot replace root directory"));
255255+ }
256256+257257+ // Parse the subfs URI and make it owned/'static
258258+ let at_uri = AtUri::new_cow(jacquard_common::CowStr::from(subfs_uri.to_string())).into_diagnostic()?;
259259+260260+ // If this is a root-level directory
261261+ if path_parts.len() == 1 {
262262+ let target_name = path_parts[0];
263263+ let new_entries: Vec<Entry> = directory.entries.into_iter().map(|entry| {
264264+ if entry.name == target_name {
265265+ // Replace this directory with a subfs node
266266+ Entry::new()
267267+ .name(entry.name)
268268+ .node(FsEntryNode::Subfs(Box::new(
269269+ Subfs::new()
270270+ .r#type(CowStr::from("subfs"))
271271+ .subject(at_uri.clone())
272272+ .flat(Some(flat))
273273+ .build()
274274+ )))
275275+ .build()
276276+ } else {
277277+ entry
278278+ }
279279+ }).collect();
280280+281281+ return Ok(FsDirectory::new()
282282+ .r#type(CowStr::from("directory"))
283283+ .entries(new_entries)
284284+ .build());
285285+ }
286286+287287+ // Recursively navigate to parent directory
288288+ let first_part = path_parts[0];
289289+ let remaining_path = path_parts[1..].join("/");
290290+291291+ let new_entries: Vec<Entry> = directory.entries.into_iter().filter_map(|entry| {
292292+ if entry.name == first_part {
293293+ if let FsEntryNode::Directory(subdir) = entry.node {
294294+ // Recursively process this subdirectory
295295+ match replace_directory_with_subfs((*subdir).into_static(), &remaining_path, subfs_uri, flat) {
296296+ Ok(updated_subdir) => {
297297+ Some(Entry::new()
298298+ .name(entry.name)
299299+ .node(FsEntryNode::Directory(Box::new(updated_subdir)))
300300+ .build())
301301+ }
302302+ Err(_) => None, // Skip entries that fail to update
303303+ }
304304+ } else {
305305+ Some(entry)
306306+ }
307307+ } else {
308308+ Some(entry)
309309+ }
310310+ }).collect();
311311+312312+ Ok(FsDirectory::new()
313313+ .r#type(CowStr::from("directory"))
314314+ .entries(new_entries)
315315+ .build())
316316+}
317317+318318+/// Delete a subfs record from the PDS
319319+pub async fn delete_subfs_record(
320320+ agent: &Agent<impl AgentSession + IdentityResolver>,
321321+ uri: &str,
322322+) -> miette::Result<()> {
323323+ use jacquard_common::types::uri::RecordUri;
324324+325325+ // Construct AT-URI and convert to RecordUri
326326+ let at_uri = AtUri::new(uri).into_diagnostic()?;
327327+ let record_uri: RecordUri<'_, crate::place_wisp::subfs::SubfsRecordRecord> = RecordUri::try_from_uri(at_uri).into_diagnostic()?;
328328+329329+ let rkey = record_uri.rkey()
330330+ .ok_or_else(|| miette::miette!("Invalid subfs URI: missing rkey"))?
331331+ .clone();
332332+333333+ agent.delete_record::<SubfsRecord>(rkey).await.into_diagnostic()?;
334334+335335+ Ok(())
336336+}