Nothing to see here, move along
1use crate::block_io::BlockIo;
2use crate::error::FsError;
3use lancer_core::static_vec::StaticVec;
4
5const CACHE_ENTRIES: usize = 512;
6const NULL_IDX: u16 = u16::MAX;
7const BLOCK_SIZE: usize = 4096;
8const MAX_CPU_SLOTS: usize = 1024;
9const DIRTY_BITMAP_WORDS: usize = CACHE_ENTRIES / 64;
10const INODE_BITMAP_WORDS: usize = CACHE_ENTRIES / 64;
11
12#[derive(Clone, Copy)]
13struct CpuDirtyBitmap {
14 bits: [u64; DIRTY_BITMAP_WORDS],
15}
16
17impl CpuDirtyBitmap {
18 const EMPTY: Self = Self {
19 bits: [0u64; DIRTY_BITMAP_WORDS],
20 };
21
22 fn mark(&mut self, idx: u16) {
23 let word = idx as usize / 64;
24 let bit = idx as usize % 64;
25 self.bits[word] |= 1u64 << bit;
26 }
27
28 fn clear(&mut self) {
29 self.bits.iter_mut().for_each(|w| *w = 0);
30 }
31
32 fn unmark(&mut self, idx: u16) {
33 let word = idx as usize / 64;
34 let bit = idx as usize % 64;
35 self.bits[word] &= !(1u64 << bit);
36 }
37
38 fn is_empty(&self) -> bool {
39 self.bits.iter().all(|&w| w == 0)
40 }
41}
42
43#[derive(Clone, Copy, PartialEq, Eq)]
44pub enum CacheState {
45 Free,
46 Clean,
47 Dirty,
48 Locked,
49}
50
51#[derive(Clone, Copy)]
52struct CacheEntry {
53 block_num: u64,
54 state: CacheState,
55 lru_prev: u16,
56 lru_next: u16,
57}
58
59impl CacheEntry {
60 const FREE: Self = Self {
61 block_num: 0,
62 state: CacheState::Free,
63 lru_prev: NULL_IDX,
64 lru_next: NULL_IDX,
65 };
66}
67
68pub struct BlockCache {
69 meta: [CacheEntry; CACHE_ENTRIES],
70 data: [[u8; BLOCK_SIZE]; CACHE_ENTRIES],
71 lru_head: u16,
72 lru_tail: u16,
73 used_count: u16,
74 cpu_dirty: [CpuDirtyBitmap; MAX_CPU_SLOTS],
75 inode_bits: [u64; INODE_BITMAP_WORDS],
76 current_cpu: usize,
77 max_cpu: usize,
78}
79
80impl BlockCache {
81 pub const fn new() -> Self {
82 Self {
83 meta: [CacheEntry::FREE; CACHE_ENTRIES],
84 data: [[0u8; BLOCK_SIZE]; CACHE_ENTRIES],
85 lru_head: NULL_IDX,
86 lru_tail: NULL_IDX,
87 used_count: 0,
88 cpu_dirty: [CpuDirtyBitmap::EMPTY; MAX_CPU_SLOTS],
89 inode_bits: [0u64; INODE_BITMAP_WORDS],
90 current_cpu: 0,
91 max_cpu: 0,
92 }
93 }
94
95 pub fn set_current_cpu(&mut self, cpu: usize) {
96 let clamped = cpu.min(MAX_CPU_SLOTS - 1);
97 self.current_cpu = clamped;
98 if clamped > self.max_cpu {
99 self.max_cpu = clamped;
100 }
101 }
102
103 fn mark_cpu_dirty(&mut self, idx: u16) {
104 self.cpu_dirty[self.current_cpu].mark(idx);
105 }
106
107 fn clear_cpu_dirty_slot(&mut self, idx: u16) {
108 (0..=self.max_cpu).for_each(|cpu| self.cpu_dirty[cpu].unmark(idx));
109 }
110
111 pub fn mark_inode(&mut self, block_num: u64) {
112 self.find_entry(block_num).into_iter().for_each(|idx| {
113 let word = idx as usize / 64;
114 let bit = idx as usize % 64;
115 self.inode_bits[word] |= 1u64 << bit;
116 });
117 }
118
119 fn clear_inode_bit(&mut self, idx: u16) {
120 let word = idx as usize / 64;
121 let bit = idx as usize % 64;
122 self.inode_bits[word] &= !(1u64 << bit);
123 }
124
125 pub fn dirty_inode_block_nums(&self, mut f: impl FnMut(u64)) {
126 (0..CACHE_ENTRIES as u16)
127 .filter(|&i| {
128 let word = i as usize / 64;
129 let bit = i as usize % 64;
130 self.meta[i as usize].state == CacheState::Dirty
131 && self.inode_bits[word] & (1u64 << bit) != 0
132 })
133 .for_each(|i| f(self.meta[i as usize].block_num));
134 }
135}
136
137impl Default for BlockCache {
138 fn default() -> Self {
139 Self::new()
140 }
141}
142
143impl BlockCache {
144 fn find_entry(&self, block_num: u64) -> Option<u16> {
145 (0..CACHE_ENTRIES as u16).find(|&scan| {
146 let idx = scan as usize;
147 self.meta[idx].state != CacheState::Free && self.meta[idx].block_num == block_num
148 })
149 }
150
151 fn unlink(&mut self, idx: u16) {
152 let entry = self.meta[idx as usize];
153 match entry.lru_prev {
154 NULL_IDX => self.lru_head = entry.lru_next,
155 prev => self.meta[prev as usize].lru_next = entry.lru_next,
156 }
157 match entry.lru_next {
158 NULL_IDX => self.lru_tail = entry.lru_prev,
159 next => self.meta[next as usize].lru_prev = entry.lru_prev,
160 }
161 self.meta[idx as usize].lru_prev = NULL_IDX;
162 self.meta[idx as usize].lru_next = NULL_IDX;
163 }
164
165 fn link_head(&mut self, idx: u16) {
166 self.meta[idx as usize].lru_prev = NULL_IDX;
167 self.meta[idx as usize].lru_next = self.lru_head;
168 match self.lru_head {
169 NULL_IDX => self.lru_tail = idx,
170 old_head => self.meta[old_head as usize].lru_prev = idx,
171 }
172 self.lru_head = idx;
173 }
174
175 fn promote(&mut self, idx: u16) {
176 match self.lru_head == idx {
177 true => {}
178 false => {
179 self.unlink(idx);
180 self.link_head(idx);
181 }
182 }
183 }
184
185 fn find_free_slot(&self) -> Option<u16> {
186 (0..CACHE_ENTRIES as u16).find(|&i| self.meta[i as usize].state == CacheState::Free)
187 }
188
189 fn find_lru_clean(&self) -> Option<u16> {
190 let mut cursor = self.lru_tail;
191 core::iter::from_fn(|| match cursor {
192 NULL_IDX => None,
193 idx => {
194 cursor = self.meta[idx as usize].lru_prev;
195 Some(idx)
196 }
197 })
198 .find(|&idx| self.meta[idx as usize].state == CacheState::Clean)
199 }
200
201 fn find_lru_dirty(&self) -> Option<u16> {
202 let mut cursor = self.lru_tail;
203 core::iter::from_fn(|| match cursor {
204 NULL_IDX => None,
205 idx => {
206 cursor = self.meta[idx as usize].lru_prev;
207 Some(idx)
208 }
209 })
210 .find(|&idx| self.meta[idx as usize].state == CacheState::Dirty)
211 }
212
213 fn evict_clean(&mut self, idx: u16) -> u16 {
214 self.clear_cpu_dirty_slot(idx);
215 self.clear_inode_bit(idx);
216 self.unlink(idx);
217 self.meta[idx as usize].state = CacheState::Free;
218 self.used_count -= 1;
219 idx
220 }
221
222 fn flush_and_evict(&mut self, idx: u16, bio: &mut BlockIo) -> Result<u16, FsError> {
223 bio.write_blocks(
224 self.meta[idx as usize].block_num,
225 1,
226 &self.data[idx as usize],
227 )?;
228 self.clear_cpu_dirty_slot(idx);
229 self.clear_inode_bit(idx);
230 self.unlink(idx);
231 self.meta[idx as usize].state = CacheState::Free;
232 self.used_count -= 1;
233 Ok(idx)
234 }
235
236 fn evict_one(&mut self, bio: &mut BlockIo) -> Result<u16, FsError> {
237 match self.find_lru_clean() {
238 Some(idx) => Ok(self.evict_clean(idx)),
239 None => match self.find_lru_dirty() {
240 Some(idx) => self.flush_and_evict(idx, bio),
241 None => Err(FsError::CacheFull),
242 },
243 }
244 }
245
246 fn allocate_slot(&mut self, bio: &mut BlockIo) -> Result<u16, FsError> {
247 match self.find_free_slot() {
248 Some(idx) => Ok(idx),
249 None => self.evict_one(bio),
250 }
251 }
252
253 fn insert_at(&mut self, idx: u16, block_num: u64, state: CacheState) {
254 self.meta[idx as usize].block_num = block_num;
255 self.meta[idx as usize].state = state;
256 self.link_head(idx);
257 self.used_count += 1;
258 }
259
260 pub fn cache_read<'a>(
261 &'a mut self,
262 bio: &mut BlockIo,
263 block_num: u64,
264 ) -> Result<&'a [u8; BLOCK_SIZE], FsError> {
265 match self.find_entry(block_num) {
266 Some(idx) => {
267 self.promote(idx);
268 Ok(&self.data[idx as usize])
269 }
270 None => {
271 let idx = self.allocate_slot(bio)?;
272 bio.read_blocks(block_num, 1, &mut self.data[idx as usize])?;
273 self.insert_at(idx, block_num, CacheState::Clean);
274 Ok(&self.data[idx as usize])
275 }
276 }
277 }
278
279 pub fn cache_write(
280 &mut self,
281 bio: &mut BlockIo,
282 block_num: u64,
283 data: &[u8; BLOCK_SIZE],
284 ) -> Result<(), FsError> {
285 match self.find_entry(block_num) {
286 Some(idx) => {
287 self.data[idx as usize] = *data;
288 self.meta[idx as usize].state = CacheState::Dirty;
289 self.promote(idx);
290 self.mark_cpu_dirty(idx);
291 Ok(())
292 }
293 None => {
294 let idx = self.allocate_slot(bio)?;
295 self.data[idx as usize] = *data;
296 self.insert_at(idx, block_num, CacheState::Dirty);
297 self.mark_cpu_dirty(idx);
298 Ok(())
299 }
300 }
301 }
302
303 pub fn cache_get_mut(
304 &mut self,
305 bio: &mut BlockIo,
306 block_num: u64,
307 ) -> Result<&mut [u8; BLOCK_SIZE], FsError> {
308 match self.find_entry(block_num) {
309 Some(idx) => {
310 self.meta[idx as usize].state = CacheState::Dirty;
311 self.promote(idx);
312 self.mark_cpu_dirty(idx);
313 Ok(&mut self.data[idx as usize])
314 }
315 None => {
316 let idx = self.allocate_slot(bio)?;
317 bio.read_blocks(block_num, 1, &mut self.data[idx as usize])?;
318 self.insert_at(idx, block_num, CacheState::Dirty);
319 self.mark_cpu_dirty(idx);
320 Ok(&mut self.data[idx as usize])
321 }
322 }
323 }
324
325 pub(crate) fn reset_lru(&mut self) {
326 self.lru_head = NULL_IDX;
327 self.lru_tail = NULL_IDX;
328 (0..=self.max_cpu).for_each(|cpu| self.cpu_dirty[cpu].clear());
329 }
330
331 pub fn cache_lock(&mut self, block_num: u64) -> bool {
332 match self.find_entry(block_num) {
333 Some(idx) => {
334 self.meta[idx as usize].state = CacheState::Locked;
335 true
336 }
337 None => false,
338 }
339 }
340
341 pub fn cache_unlock(&mut self, block_num: u64, dirty: bool) {
342 if let Some(idx) = self.find_entry(block_num) {
343 self.meta[idx as usize].state = match dirty {
344 true => {
345 self.mark_cpu_dirty(idx);
346 CacheState::Dirty
347 }
348 false => CacheState::Clean,
349 };
350 }
351 }
352
353 pub fn dirty_block_nums(&self, mut f: impl FnMut(u64)) {
354 (0..CACHE_ENTRIES as u16)
355 .filter(|&i| self.meta[i as usize].state == CacheState::Dirty)
356 .for_each(|i| f(self.meta[i as usize].block_num));
357 }
358
359 pub fn cache_flush(&mut self, bio: &mut BlockIo) -> Result<(), FsError> {
360 let max = self.max_cpu;
361 (0..=max).try_for_each(|cpu| match self.cpu_dirty[cpu].is_empty() {
362 true => Ok(()),
363 false => {
364 let bitmap = self.cpu_dirty[cpu].bits;
365 let mut dirty_indices: StaticVec<u16, CACHE_ENTRIES> = StaticVec::new();
366
367 collect_dirty_from_bitmap(&bitmap, &self.meta, &mut dirty_indices);
368
369 insertion_sort_by_block(&mut dirty_indices, &self.meta);
370
371 dirty_indices.as_slice().iter().try_for_each(|&idx| {
372 bio.write_blocks(
373 self.meta[idx as usize].block_num,
374 1,
375 &self.data[idx as usize],
376 )?;
377 self.meta[idx as usize].state = CacheState::Clean;
378 Ok::<(), FsError>(())
379 })?;
380
381 self.cpu_dirty[cpu].clear();
382 Ok(())
383 }
384 })
385 }
386}
387
388fn collect_dirty_from_bitmap(
389 bitmap: &[u64; DIRTY_BITMAP_WORDS],
390 meta: &[CacheEntry; CACHE_ENTRIES],
391 out: &mut StaticVec<u16, CACHE_ENTRIES>,
392) {
393 bitmap.iter().enumerate().for_each(|(word_idx, &word)| {
394 let base = word_idx as u16 * 64;
395 let mut remaining = word;
396 core::iter::from_fn(|| match remaining {
397 0 => None,
398 _ => {
399 let bit = remaining.trailing_zeros() as u16;
400 remaining &= remaining - 1;
401 Some(base + bit)
402 }
403 })
404 .filter(|&idx| meta[idx as usize].state == CacheState::Dirty)
405 .for_each(|idx| {
406 let _ = out.push(idx);
407 });
408 });
409}
410
411fn insertion_sort_by_block(
412 indices: &mut StaticVec<u16, CACHE_ENTRIES>,
413 meta: &[CacheEntry; CACHE_ENTRIES],
414) {
415 let slice = indices.as_mut_slice();
416 (1..slice.len()).for_each(|i| {
417 let key = slice[i];
418 let key_block = meta[key as usize].block_num;
419 let insert_pos = slice[..i]
420 .iter()
421 .rposition(|&idx| meta[idx as usize].block_num <= key_block)
422 .map_or(0, |p| p + 1);
423 if insert_pos < i {
424 let val = slice[i];
425 slice.copy_within(insert_pos..i, insert_pos + 1);
426 slice[insert_pos] = val;
427 }
428 });
429}