Nothing to see here, move along
1use super::object::ObjectData;
2use super::object_table::{DecRefOutcome, ObjectTable};
3use crate::error::KernelError;
4use crate::mem::addr;
5use crate::mem::phys::BitmapFrameAllocator;
6use crate::sync::IrqMutex;
7use crate::types::{Generation, ObjectId};
8use x86_64::PhysAddr;
9
10const SLAB_FRAMES: usize = 128;
11
12pub struct ObjectPool {
13 table: ObjectTable,
14 kernel_region_phys: u64,
15 kernel_region_capacity: u32,
16}
17
18impl ObjectPool {
19 const fn new() -> Self {
20 Self {
21 table: ObjectTable::new(),
22 kernel_region_phys: 0,
23 kernel_region_capacity: 0,
24 }
25 }
26
27 pub fn init(&mut self, allocator: &mut BitmapFrameAllocator) {
28 let base_frame = allocator
29 .allocate_contiguous(SLAB_FRAMES)
30 .expect("ObjectPool kernel region allocation failed");
31
32 self.kernel_region_phys = base_frame.as_u64();
33
34 let base_virt = addr::phys_to_virt(base_frame);
35 unsafe {
36 core::ptr::write_bytes(base_virt.as_mut_ptr::<u8>(), 0, SLAB_FRAMES * 4096);
37 }
38
39 let obj_size = core::mem::size_of::<ObjectData>();
40 let total_bytes = SLAB_FRAMES * 4096;
41 self.kernel_region_capacity = (total_bytes / obj_size) as u32;
42
43 self.table.init();
44
45 crate::kprintln!(
46 " ObjectPool: {} kernel region slots ({} frames, entry={}B)",
47 self.kernel_region_capacity,
48 SLAB_FRAMES,
49 obj_size,
50 );
51 }
52
53 pub fn capacity(&self) -> usize {
54 self.kernel_region_capacity as usize
55 }
56
57 pub fn kernel_region(&self) -> (u64, usize) {
58 (self.kernel_region_phys, SLAB_FRAMES * 4096)
59 }
60
61 fn slot_phys(&self, id: u16) -> u64 {
62 let obj_size = core::mem::size_of::<ObjectData>() as u64;
63 self.kernel_region_phys + (id as u64) * obj_size
64 }
65
66 fn is_kernel_region_slot(&self, id: u16) -> bool {
67 (id as u32) < self.kernel_region_capacity
68 }
69
70 pub fn allocate(&mut self, data: ObjectData) -> Result<(ObjectId, Generation), KernelError> {
71 debug_assert!(self.kernel_region_phys != 0, "ObjectPool not initialized");
72
73 let id = self.table.peek_free_head().ok_or(KernelError::PoolExhausted)?;
74 match self.is_kernel_region_slot(id) {
75 true => {
76 let phys = self.slot_phys(id);
77 let (id, generation) = self.table.allocate(phys)?;
78 let virt = addr::phys_to_virt(PhysAddr::new(phys));
79 unsafe {
80 core::ptr::write(virt.as_mut_ptr::<ObjectData>(), data);
81 }
82 Ok((ObjectId::new(id), Generation::new(generation as u64)))
83 }
84 false => Err(KernelError::PoolExhausted),
85 }
86 }
87
88 pub fn get(&self, id: ObjectId, generation: Generation) -> Result<&ObjectData, KernelError> {
89 let phys = self.table.get(id.raw(), generation.raw() as u32)?;
90 Ok(unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<ObjectData>()) })
91 }
92
93 pub fn get_mut(
94 &mut self,
95 id: ObjectId,
96 generation: Generation,
97 ) -> Result<&mut ObjectData, KernelError> {
98 let phys = self.table.get(id.raw(), generation.raw() as u32)?;
99 Ok(unsafe { &mut *(addr::phys_to_virt(PhysAddr::new(phys)).as_mut_ptr::<ObjectData>()) })
100 }
101
102 pub fn inc_ref(&mut self, id: ObjectId, generation: Generation) -> Result<(), KernelError> {
103 self.table.inc_ref(id.raw(), generation.raw() as u32)
104 }
105
106 pub fn dec_ref(&mut self, id: ObjectId, generation: Generation) -> Option<ObjectData> {
107 let outcome = self.table.dec_ref(id.raw(), generation.raw() as u32);
108 match outcome {
109 DecRefOutcome::Underflow => {
110 crate::kprintln!(
111 "[cap] BUG: refcount underflow on object {}. indicates double-free",
112 id.raw()
113 );
114 None
115 }
116 DecRefOutcome::Freed(phys) => {
117 let data = unsafe {
118 core::ptr::read(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<ObjectData>())
119 };
120 Some(data)
121 }
122 DecRefOutcome::Alive | DecRefOutcome::Stale => None,
123 }
124 }
125
126 pub fn revoke(
127 &mut self,
128 id: ObjectId,
129 generation: Generation,
130 ) -> Result<(Generation, Option<ObjectData>), KernelError> {
131 let (new_gen, old_phys) = self.table.revoke(id.raw(), generation.raw() as u32)?;
132 let data = unsafe {
133 core::ptr::read(addr::phys_to_virt(PhysAddr::new(old_phys)).as_ptr::<ObjectData>())
134 };
135 Ok((Generation::new(new_gen as u64), Some(data)))
136 }
137
138 pub fn free(
139 &mut self,
140 id: ObjectId,
141 generation: Generation,
142 ) -> Result<Option<ObjectData>, KernelError> {
143 let (old_phys, _new_gen) = self.table.free(id.raw(), generation.raw() as u32)?;
144 match old_phys {
145 0 => Ok(None),
146 _ => {
147 let data = unsafe {
148 core::ptr::read(
149 addr::phys_to_virt(PhysAddr::new(old_phys)).as_ptr::<ObjectData>(),
150 )
151 };
152 Ok(Some(data))
153 }
154 }
155 }
156
157 pub fn for_each_active_mut(&mut self, mut f: impl FnMut(&mut ObjectData)) {
158 self.table.for_each_active(|_id, phys| {
159 let data = unsafe {
160 &mut *(addr::phys_to_virt(PhysAddr::new(phys)).as_mut_ptr::<ObjectData>())
161 };
162 f(data);
163 });
164 }
165
166 pub fn invalidate_process_object(
167 &mut self,
168 target_pid: crate::types::Pid,
169 ) -> crate::static_vec::StaticVec<(ObjectId, Generation), 4> {
170 let mut freed = crate::static_vec::StaticVec::<(ObjectId, Generation), 4>::new();
171 self.table.free_where(
172 |_id, phys, _gen| {
173 let data = unsafe {
174 &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<ObjectData>())
175 };
176 matches!(data, ObjectData::Process(p) if p.pid == target_pid)
177 },
178 |id, stale_gen, phys| {
179 let _ = freed.push((ObjectId::new(id), Generation::new(stale_gen as u64)));
180 let data = unsafe {
181 core::ptr::read(
182 addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<ObjectData>(),
183 )
184 };
185 crate::cap::ops::cleanup_object_data(&data);
186 },
187 );
188 freed
189 }
190
191 #[allow(dead_code)]
192 pub fn table(&self) -> &ObjectTable {
193 &self.table
194 }
195
196 #[allow(dead_code)]
197 pub fn table_mut(&mut self) -> &mut ObjectTable {
198 &mut self.table
199 }
200}
201
202pub static POOL: IrqMutex<ObjectPool, 1> = IrqMutex::new(ObjectPool::new());