Nothing to see here, move along
1use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
2
3pub const BLOCK_SIZE_MIN: u32 = 4096;
4pub const BLOCK_SIZE_MAX: u32 = 65536;
5pub const BLOCK_SIZE_MIN_LOG2: u8 = 12;
6pub const BLOCK_SIZE_MAX_LOG2: u8 = 16;
7
8pub const BLOCKREF_SIZE: usize = 64;
9pub const INODE_SIZE: usize = 1024;
10pub const BTREE_NODE_SIZE: usize = 4096;
11pub const SUPERBLOCK_SIZE: usize = 4096;
12
13pub const BTREE_MAX_ENTRIES: usize = 63;
14pub const BTREE_MIN_ENTRIES: usize = 31;
15pub const INODE_DIRECT_REFS: usize = 4;
16pub const INODE_INLINE_MAX: usize = INODE_DIRECT_REFS * BLOCKREF_SIZE;
17
18pub const FREEMAP_BITS_PER_LEAF: usize = BTREE_NODE_SIZE * 8;
19pub const FREEMAP_COVERAGE_PER_LEAF: u64 = FREEMAP_BITS_PER_LEAF as u64 * BLOCK_SIZE_MIN as u64;
20
21pub const BTREE_NODE_MAGIC: u16 = 0x4C42;
22pub const SUPERBLOCK_MAGIC: u64 = 0x4C61_6E63_6572_4653;
23pub const SUPERBLOCK_VERSION: u32 = 2;
24pub const DEDUP_SHARDS: usize = 16;
25
26pub const ADDR_MASK: u64 = !0xF;
27pub const SIZE_SHIFT_MASK: u64 = 0xF;
28pub const MAX_SIZE_SHIFT: u8 = BLOCK_SIZE_MAX_LOG2 - BLOCK_SIZE_MIN_LOG2;
29pub const MAX_SYMLINK_DEPTH: u8 = 8;
30
31#[derive(Debug, Clone, Copy, PartialEq, Eq)]
32#[repr(u8)]
33pub enum BlockType {
34 Data = 0,
35 Inode = 1,
36 Indirect = 2,
37 Freemap = 3,
38 Directory = 4,
39}
40
41impl BlockType {
42 pub const fn from_u8(v: u8) -> Option<Self> {
43 match v {
44 0 => Some(Self::Data),
45 1 => Some(Self::Inode),
46 2 => Some(Self::Indirect),
47 3 => Some(Self::Freemap),
48 4 => Some(Self::Directory),
49 _ => None,
50 }
51 }
52}
53
54#[derive(Debug, Clone, Copy, PartialEq, Eq)]
55#[repr(u8)]
56pub enum Compression {
57 None = 0,
58 Lz4 = 1,
59}
60
61impl Compression {
62 pub const fn from_u8(v: u8) -> Option<Self> {
63 match v {
64 0 => Some(Self::None),
65 1 => Some(Self::Lz4),
66 _ => None,
67 }
68 }
69}
70
71#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72#[repr(u8)]
73pub enum InodeType {
74 File = 0,
75 Directory = 1,
76 Symlink = 2,
77}
78
79impl InodeType {
80 pub const fn from_u8(v: u8) -> Option<Self> {
81 match v {
82 0 => Some(Self::File),
83 1 => Some(Self::Directory),
84 2 => Some(Self::Symlink),
85 _ => None,
86 }
87 }
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
91#[repr(u8)]
92pub enum CompressionPolicy {
93 Inherit = 0,
94 Lz4 = 1,
95 Disabled = 2,
96}
97
98impl CompressionPolicy {
99 pub const fn from_u8(v: u8) -> Option<Self> {
100 match v {
101 0 => Some(Self::Inherit),
102 1 => Some(Self::Lz4),
103 2 => Some(Self::Disabled),
104 _ => None,
105 }
106 }
107}
108
109pub struct InodeFlags;
110
111impl InodeFlags {
112 pub const INLINE: u8 = 0x01;
113 pub const INDIRECT: u8 = 0x02;
114}
115
116#[derive(Clone, Copy, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
117#[repr(C)]
118pub struct BlockRef {
119 pub physical_addr: u64,
120 pub key: u64,
121 pub transaction_id: u64,
122 pub content_hash: [u8; 16],
123 pub integrity_crc: u32,
124 pub logical_size: u32,
125 pub block_type: u8,
126 pub compression: u8,
127 pub flags: u8,
128 _reserved: [u8; 13],
129}
130
131const _: () = assert!(core::mem::size_of::<BlockRef>() == BLOCKREF_SIZE);
132
133impl BlockRef {
134 pub const ZERO: Self = Self {
135 physical_addr: 0,
136 key: 0,
137 transaction_id: 0,
138 content_hash: [0; 16],
139 integrity_crc: 0,
140 logical_size: 0,
141 block_type: 0,
142 compression: 0,
143 flags: 0,
144 _reserved: [0; 13],
145 };
146
147 pub const fn is_null(&self) -> bool {
148 self.physical_addr == 0 && self.key == 0
149 }
150
151 pub const fn physical_block_addr(&self) -> u64 {
152 self.physical_addr & ADDR_MASK
153 }
154
155 pub const fn size_shift(&self) -> u8 {
156 (self.physical_addr & SIZE_SHIFT_MASK) as u8
157 }
158
159 pub const fn is_valid_size_shift(&self) -> bool {
160 self.size_shift() <= MAX_SIZE_SHIFT
161 }
162
163 pub const fn block_size_log2(&self) -> Option<u8> {
164 match self.is_valid_size_shift() {
165 true => Some(self.size_shift() + BLOCK_SIZE_MIN_LOG2),
166 false => None,
167 }
168 }
169
170 pub const fn block_size(&self) -> Option<u32> {
171 match self.block_size_log2() {
172 Some(log2) => Some(1u32 << log2),
173 None => None,
174 }
175 }
176
177 pub const fn block_type_enum(&self) -> Option<BlockType> {
178 BlockType::from_u8(self.block_type)
179 }
180
181 pub const fn compression_enum(&self) -> Option<Compression> {
182 Compression::from_u8(self.compression)
183 }
184
185 pub const fn content_hash_u128(&self) -> u128 {
186 u128::from_le_bytes(self.content_hash)
187 }
188
189 #[allow(clippy::too_many_arguments)]
190 pub const fn new(
191 addr: u64,
192 size_log2: u8,
193 key: u64,
194 transaction_id: u64,
195 content_hash: u128,
196 integrity_crc: u32,
197 block_type: BlockType,
198 compression: Compression,
199 logical_size: u32,
200 flags: u8,
201 ) -> Self {
202 let size_shift = size_log2 - BLOCK_SIZE_MIN_LOG2;
203 assert!(size_shift <= MAX_SIZE_SHIFT);
204 assert!(addr & 0xF == 0);
205
206 Self {
207 physical_addr: addr | (size_shift as u64),
208 key,
209 transaction_id,
210 content_hash: content_hash.to_le_bytes(),
211 integrity_crc,
212 logical_size,
213 block_type: block_type as u8,
214 compression: compression as u8,
215 flags,
216 _reserved: [0; 13],
217 }
218 }
219}
220
221impl core::fmt::Debug for BlockRef {
222 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
223 f.debug_struct("BlockRef")
224 .field("addr", &self.physical_block_addr())
225 .field("size", &self.block_size().unwrap_or(0))
226 .field("key", &self.key)
227 .field("txn", &self.transaction_id)
228 .field("type", &self.block_type_enum())
229 .field("compression", &self.compression_enum())
230 .field("crc", &self.integrity_crc)
231 .field("flags", &self.flags)
232 .finish()
233 }
234}
235
236#[derive(Clone, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
237#[repr(C)]
238pub struct Inode {
239 pub object_id: u64,
240 pub generation: u64,
241 pub transaction_id: u64,
242 pub size: u64,
243 pub block_count: u64,
244 pub create_time: u64,
245 pub modify_time: u64,
246 pub subtree_hash: [u8; 16],
247 pub link_count: u32,
248 pub rights_template: u16,
249 pub inode_type: u8,
250 pub compression_policy: u8,
251 pub flags: u8,
252 _reserved: [u8; 7],
253 pub direct: [BlockRef; INODE_DIRECT_REFS],
254 pub extended_attrs: [u8; 680],
255}
256
257const _: () = assert!(core::mem::size_of::<Inode>() == INODE_SIZE);
258
259impl Inode {
260 pub const ZERO: Self = Self {
261 object_id: 0,
262 generation: 0,
263 transaction_id: 0,
264 size: 0,
265 block_count: 0,
266 create_time: 0,
267 modify_time: 0,
268 subtree_hash: [0; 16],
269 link_count: 0,
270 rights_template: 0,
271 inode_type: 0,
272 compression_policy: 0,
273 flags: 0,
274 _reserved: [0; 7],
275 direct: [BlockRef::ZERO; INODE_DIRECT_REFS],
276 extended_attrs: [0; 680],
277 };
278
279 pub const fn inode_type_enum(&self) -> Option<InodeType> {
280 InodeType::from_u8(self.inode_type)
281 }
282
283 pub const fn compression_policy_enum(&self) -> Option<CompressionPolicy> {
284 CompressionPolicy::from_u8(self.compression_policy)
285 }
286
287 pub const fn is_inline(&self) -> bool {
288 self.flags & InodeFlags::INLINE != 0
289 }
290
291 pub const fn subtree_hash_u128(&self) -> u128 {
292 u128::from_le_bytes(self.subtree_hash)
293 }
294
295 pub fn inline_data(&self) -> Option<&[u8]> {
296 match self.is_inline() {
297 true => {
298 let len = core::cmp::min(self.size as usize, INODE_INLINE_MAX);
299 Some(&self.direct.as_bytes()[..len])
300 }
301 false => None,
302 }
303 }
304
305 pub fn new_file(
306 object_id: u64,
307 generation: u64,
308 transaction_id: u64,
309 rights_template: u16,
310 create_time: u64,
311 ) -> Self {
312 Self {
313 object_id,
314 generation,
315 transaction_id,
316 inode_type: InodeType::File as u8,
317 compression_policy: CompressionPolicy::Inherit as u8,
318 rights_template,
319 create_time,
320 modify_time: create_time,
321 link_count: 1,
322 ..Self::ZERO
323 }
324 }
325
326 pub fn new_directory(
327 object_id: u64,
328 generation: u64,
329 transaction_id: u64,
330 rights_template: u16,
331 create_time: u64,
332 ) -> Self {
333 Self {
334 object_id,
335 generation,
336 transaction_id,
337 inode_type: InodeType::Directory as u8,
338 compression_policy: CompressionPolicy::Inherit as u8,
339 rights_template,
340 create_time,
341 modify_time: create_time,
342 link_count: 1,
343 ..Self::ZERO
344 }
345 }
346
347 pub fn new_symlink(
348 object_id: u64,
349 generation: u64,
350 transaction_id: u64,
351 rights_template: u16,
352 create_time: u64,
353 target: &[u8],
354 ) -> Self {
355 assert!(target.len() <= INODE_INLINE_MAX);
356 let mut inode = Self {
357 object_id,
358 generation,
359 transaction_id,
360 inode_type: InodeType::Symlink as u8,
361 compression_policy: CompressionPolicy::Disabled as u8,
362 rights_template,
363 create_time,
364 modify_time: create_time,
365 size: target.len() as u64,
366 flags: InodeFlags::INLINE,
367 link_count: 1,
368 ..Self::ZERO
369 };
370 inode.direct.as_mut_bytes()[..target.len()].copy_from_slice(target);
371 inode
372 }
373}
374
375impl core::fmt::Debug for Inode {
376 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
377 f.debug_struct("Inode")
378 .field("object_id", &self.object_id)
379 .field("generation", &self.generation)
380 .field("txn", &self.transaction_id)
381 .field("type", &self.inode_type_enum())
382 .field("size", &self.size)
383 .field("blocks", &self.block_count)
384 .field("links", &self.link_count)
385 .field("inline", &self.is_inline())
386 .finish()
387 }
388}
389
390#[derive(Clone, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
391#[repr(C)]
392pub struct BTreeNodeHeader {
393 pub magic: u16,
394 pub entry_count: u16,
395 pub level: u8,
396 pub flags: u8,
397 _reserved: [u8; 2],
398}
399
400const _: () = assert!(core::mem::size_of::<BTreeNodeHeader>() == 8);
401
402impl BTreeNodeHeader {
403 pub const fn new(level: u8) -> Self {
404 Self {
405 magic: BTREE_NODE_MAGIC,
406 entry_count: 0,
407 level,
408 flags: 0,
409 _reserved: [0; 2],
410 }
411 }
412
413 pub const fn is_valid(&self) -> bool {
414 self.magic == BTREE_NODE_MAGIC
415 }
416
417 pub const fn is_leaf(&self) -> bool {
418 self.level == 0
419 }
420}
421
422impl core::fmt::Debug for BTreeNodeHeader {
423 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
424 f.debug_struct("BTreeNodeHeader")
425 .field("magic", &format_args!("{:#06x}", self.magic))
426 .field("level", &self.level)
427 .field("entries", &self.entry_count)
428 .field("flags", &self.flags)
429 .finish()
430 }
431}
432
433#[derive(Clone, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
434#[repr(C)]
435pub struct BTreeNode {
436 pub header: BTreeNodeHeader,
437 pub entries: [BlockRef; BTREE_MAX_ENTRIES],
438 _padding: [u8; 56],
439}
440
441const _: () = assert!(core::mem::size_of::<BTreeNode>() == BTREE_NODE_SIZE);
442
443impl BTreeNode {
444 pub const ZEROED: Self = Self {
445 header: BTreeNodeHeader {
446 magic: 0,
447 entry_count: 0,
448 level: 0,
449 flags: 0,
450 _reserved: [0; 2],
451 },
452 entries: [BlockRef::ZERO; BTREE_MAX_ENTRIES],
453 _padding: [0; 56],
454 };
455
456 pub const EMPTY_LEAF: Self = Self {
457 header: BTreeNodeHeader {
458 magic: BTREE_NODE_MAGIC,
459 entry_count: 0,
460 level: 0,
461 flags: 0,
462 _reserved: [0; 2],
463 },
464 entries: [BlockRef::ZERO; BTREE_MAX_ENTRIES],
465 _padding: [0; 56],
466 };
467
468 pub const fn new(level: u8) -> Self {
469 Self {
470 header: BTreeNodeHeader {
471 magic: BTREE_NODE_MAGIC,
472 entry_count: 0,
473 level,
474 flags: 0,
475 _reserved: [0; 2],
476 },
477 entries: [BlockRef::ZERO; BTREE_MAX_ENTRIES],
478 _padding: [0; 56],
479 }
480 }
481
482 pub const fn is_valid(&self) -> bool {
483 self.header.is_valid()
484 }
485
486 pub const fn is_leaf(&self) -> bool {
487 self.header.is_leaf()
488 }
489
490 pub const fn entry_count(&self) -> usize {
491 self.header.entry_count as usize
492 }
493
494 pub const fn is_full(&self) -> bool {
495 self.entry_count() >= BTREE_MAX_ENTRIES
496 }
497
498 pub const fn is_underflow(&self) -> bool {
499 self.entry_count() < BTREE_MIN_ENTRIES
500 }
501
502 pub fn active_entries(&self) -> &[BlockRef] {
503 &self.entries[..self.entry_count()]
504 }
505
506 pub fn search_by_key(&self, target: u64) -> Result<usize, usize> {
507 self.active_entries()
508 .binary_search_by_key(&target, |entry| entry.key)
509 }
510
511 pub fn find_child_index(&self, target: u64) -> usize {
512 let count = self.entry_count();
513 match self.search_by_key(target) {
514 Ok(idx) => idx,
515 Err(idx) => idx.min(count.saturating_sub(1)),
516 }
517 }
518}
519
520impl core::fmt::Debug for BTreeNode {
521 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
522 f.debug_struct("BTreeNode")
523 .field("header", &self.header)
524 .field("active_entries", &self.entry_count())
525 .finish()
526 }
527}
528
529#[derive(Clone, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
530#[repr(C)]
531pub struct Superblock {
532 pub magic: u64,
533 pub version: u32,
534 _pad0: u32,
535 pub sequence: u64,
536 pub transaction_id: u64,
537 pub total_blocks: u64,
538 pub block_size: u32,
539 _pad1: u32,
540 pub tree_root: BlockRef,
541 pub freemap_root: BlockRef,
542 pub dedup_roots: [BlockRef; DEDUP_SHARDS],
543 pub snapshot_root: BlockRef,
544 pub scrub_cursor: u64,
545 pub next_object_id: u64,
546 _reserved: [u8; 2812],
547 pub checksum: u32,
548}
549
550const _: () = assert!(core::mem::size_of::<Superblock>() == SUPERBLOCK_SIZE);
551
552impl Superblock {
553 pub const fn new(total_blocks: u64, block_size: u32) -> Self {
554 Self {
555 magic: SUPERBLOCK_MAGIC,
556 version: SUPERBLOCK_VERSION,
557 _pad0: 0,
558 sequence: 0,
559 transaction_id: 0,
560 total_blocks,
561 block_size,
562 _pad1: 0,
563 tree_root: BlockRef::ZERO,
564 freemap_root: BlockRef::ZERO,
565 dedup_roots: [BlockRef::ZERO; DEDUP_SHARDS],
566 snapshot_root: BlockRef::ZERO,
567 scrub_cursor: 0,
568 next_object_id: 0,
569 _reserved: [0; 2812],
570 checksum: 0,
571 }
572 }
573
574 pub const fn is_valid_magic(&self) -> bool {
575 self.magic == SUPERBLOCK_MAGIC && self.version == SUPERBLOCK_VERSION
576 }
577
578 pub const fn next_sequence(&self) -> Option<u64> {
579 self.sequence.checked_add(1)
580 }
581
582 pub const fn next_transaction(&self) -> Option<u64> {
583 self.transaction_id.checked_add(1)
584 }
585}
586
587impl core::fmt::Debug for Superblock {
588 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
589 f.debug_struct("Superblock")
590 .field("magic", &format_args!("{:#018x}", self.magic))
591 .field("version", &self.version)
592 .field("sequence", &self.sequence)
593 .field("txn", &self.transaction_id)
594 .field("total_blocks", &self.total_blocks)
595 .field("block_size", &self.block_size)
596 .field("tree_root", &self.tree_root)
597 .field("freemap_root", &self.freemap_root)
598 .field("checksum", &format_args!("{:#010x}", self.checksum))
599 .finish()
600 }
601}
602
603pub fn select_superblock<'a>(a: &'a Superblock, b: &'a Superblock) -> &'a Superblock {
604 let a_valid = a.is_valid_magic();
605 let b_valid = b.is_valid_magic();
606
607 match (a_valid, b_valid) {
608 (true, true) => match a.sequence >= b.sequence {
609 true => a,
610 false => b,
611 },
612 (true, false) => a,
613 (false, true) => b,
614 (false, false) => a,
615 }
616}
617
618pub fn commit_target<'a>(a: &'a Superblock, b: &'a Superblock) -> SuperblockSlot {
619 let a_valid = a.is_valid_magic();
620 let b_valid = b.is_valid_magic();
621
622 match (a_valid, b_valid) {
623 (true, true) => match a.sequence < b.sequence {
624 true => SuperblockSlot::A,
625 false => SuperblockSlot::B,
626 },
627 (true, false) => SuperblockSlot::B,
628 (false, true) => SuperblockSlot::A,
629 (false, false) => SuperblockSlot::A,
630 }
631}
632
633#[derive(Debug, Clone, Copy, PartialEq, Eq)]
634pub enum SuperblockSlot {
635 A,
636 B,
637}
638
639impl SuperblockSlot {
640 pub const fn block_number(self) -> u64 {
641 match self {
642 Self::A => 0,
643 Self::B => 1,
644 }
645 }
646}
647
648#[derive(Clone, PartialEq, Eq, FromBytes, IntoBytes, KnownLayout, Immutable)]
649#[repr(C)]
650pub struct FreemapLeaf {
651 pub bits: [u8; BTREE_NODE_SIZE],
652}
653
654const _: () = assert!(core::mem::size_of::<FreemapLeaf>() == BTREE_NODE_SIZE);
655
656impl FreemapLeaf {
657 pub const ZERO: Self = Self {
658 bits: [0; BTREE_NODE_SIZE],
659 };
660 pub const FULL: Self = Self {
661 bits: [0xFF; BTREE_NODE_SIZE],
662 };
663
664 pub const fn is_allocated(&self, index: usize) -> bool {
665 let byte = index / 8;
666 let bit = index % 8;
667 (self.bits[byte] & (1 << bit)) != 0
668 }
669
670 pub const fn with_bit_set(mut self, index: usize) -> Self {
671 let byte = index / 8;
672 let bit = index % 8;
673 self.bits[byte] |= 1 << bit;
674 self
675 }
676
677 pub const fn with_bit_cleared(mut self, index: usize) -> Self {
678 let byte = index / 8;
679 let bit = index % 8;
680 self.bits[byte] &= !(1 << bit);
681 self
682 }
683
684 pub fn set_bit(&mut self, index: usize) {
685 let byte = index / 8;
686 let bit = index % 8;
687 self.bits[byte] |= 1 << bit;
688 }
689
690 pub fn clear_bit(&mut self, index: usize) {
691 let byte = index / 8;
692 let bit = index % 8;
693 self.bits[byte] &= !(1 << bit);
694 }
695
696 pub fn set_range(&mut self, start: usize, count: usize) {
697 (start..start + count).for_each(|i| self.set_bit(i));
698 }
699
700 pub fn clear_range(&mut self, start: usize, count: usize) {
701 (start..start + count).for_each(|i| self.clear_bit(i));
702 }
703
704 pub fn is_range_allocated(&self, start: usize, count: usize) -> bool {
705 (start..start + count).all(|i| self.is_allocated(i))
706 }
707
708 pub fn find_contiguous_free(&self, count: usize) -> Option<usize> {
709 self.find_contiguous_free_from(0, count)
710 }
711
712 pub fn find_contiguous_free_from(&self, start_bit: usize, count: usize) -> Option<usize> {
713 if count == 0 {
714 return None;
715 }
716
717 let total_bits = self.bits.len() * 8;
718 let start_word = start_bit / 64;
719 let (mut run_start, mut run_len) = (start_bit, 0usize);
720
721 self.bits
722 .chunks_exact(8)
723 .enumerate()
724 .skip(start_word)
725 .find_map(|(word_idx, chunk)| {
726 let word = u64::from_le_bytes([
727 chunk[0], chunk[1], chunk[2], chunk[3], chunk[4], chunk[5], chunk[6], chunk[7],
728 ]);
729 let skip_bits = match word_idx == start_word {
730 true => (start_bit % 64) as u32,
731 false => 0,
732 };
733 let masked = match skip_bits {
734 0 => word,
735 n => word | ((1u64 << n) - 1),
736 };
737 match masked {
738 u64::MAX => {
739 run_len = 0;
740 run_start = (word_idx + 1) * 64;
741 None
742 }
743 _ if masked == 0 && skip_bits == 0 => {
744 if run_len == 0 {
745 run_start = word_idx * 64;
746 }
747 run_len += 64;
748 match run_len >= count {
749 true => Some(run_start),
750 false => None,
751 }
752 }
753 partial => {
754 let base = word_idx * 64;
755 (skip_bits..64).find_map(|bit| match (partial >> bit) & 1 != 0 {
756 true => {
757 run_len = 0;
758 run_start = base + bit as usize + 1;
759 None
760 }
761 false => {
762 if run_len == 0 {
763 run_start = base + bit as usize;
764 }
765 run_len += 1;
766 match run_len >= count {
767 true => Some(run_start),
768 false => None,
769 }
770 }
771 })
772 }
773 }
774 })
775 .filter(|&start| start + count <= total_bits)
776 }
777}
778
779impl core::fmt::Debug for FreemapLeaf {
780 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
781 let allocated = self
782 .bits
783 .iter()
784 .fold(0u32, |acc, &byte| acc + byte.count_ones());
785 let total = self.bits.len() * 8;
786 f.debug_struct("FreemapLeaf")
787 .field("allocated", &allocated)
788 .field("total", &total)
789 .finish()
790 }
791}