Nothing to see here, move along
1use lancer_core::arena::{ArenaEntry, SlabArena};
2use x86_64::structures::paging::page_table::PageTableFlags;
3use x86_64::structures::paging::{Mapper, OffsetPageTable, Page, Size4KiB};
4use x86_64::{PhysAddr, VirtAddr};
5
6use crate::mem::addr;
7use crate::mem::phys::BitmapFrameAllocator;
8use crate::sync::IrqMutex;
9use crate::types::{BlockedPid, CreatedPid, Generation, MAX_PIDS, Pid, Priority};
10
11use super::address_space::{self, Pml4ReleaseResult};
12use super::context::{CpuContext, FpuState, IpcMessage};
13use super::{
14 Process, ProcessState, GUARD_PAGE_SIZE, KERNEL_STACK_PAGES, KERNEL_STACK_SIZE, PROC_NAME_LEN,
15 STACK_CANARY, STACK_PAINT_BYTE,
16};
17use crate::mem::typed_addr::Pml4Phys;
18use crate::ring::RingIndex;
19
20const SLAB_FRAMES: usize = 128;
21const STACK_TOTAL_FRAMES: usize = KERNEL_STACK_PAGES + 1;
22
23fn try_unmap_guard_page(mapper: &mut OffsetPageTable, target_virt: VirtAddr) -> bool {
24 let page_4k: Page<Size4KiB> = Page::containing_address(target_virt);
25 match mapper.unmap(page_4k) {
26 Ok((_, flush)) => {
27 flush.flush();
28 true
29 }
30 Err(x86_64::structures::paging::mapper::UnmapError::ParentEntryHugePage) => false,
31 Err(_) => false,
32 }
33}
34
35fn remap_guard_and_free_stack(stack_base_phys: PhysAddr, allocator: &mut BitmapFrameAllocator) {
36 let guard_virt = addr::phys_to_virt(stack_base_phys);
37 let hhdm_offset = addr::hhdm_offset();
38 let mut mapper = unsafe { crate::arch::paging::init(hhdm_offset) };
39 let guard_page: Page<Size4KiB> = Page::containing_address(guard_virt);
40 let frame = x86_64::structures::paging::PhysFrame::containing_address(stack_base_phys);
41 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
42 match unsafe { mapper.map_to(guard_page, frame, flags, &mut *allocator) } {
43 Ok(flush) => flush.flush(),
44 Err(_) => {}
45 }
46 (0..STACK_TOTAL_FRAMES).for_each(|i| {
47 BitmapFrameAllocator::free_frame_by_addr(PhysAddr::new(
48 stack_base_phys.as_u64() + (i as u64) * 4096,
49 ));
50 });
51}
52
53
54pub struct ProcessManager {
55 arena: SlabArena<Process>,
56 slab_base_phys: u64,
57}
58
59impl ProcessManager {
60 pub const fn empty() -> Self {
61 Self {
62 arena: SlabArena::new_null(),
63 slab_base_phys: 0,
64 }
65 }
66
67 pub fn init(&mut self, allocator: &mut BitmapFrameAllocator) {
68 let entry_size = core::mem::size_of::<ArenaEntry<Process>>();
69 let total_bytes = SLAB_FRAMES * 4096;
70 let cap = total_bytes / entry_size;
71 let cap = cap.min(MAX_PIDS) as u16;
72
73 let base_frame = allocator
74 .allocate_contiguous(SLAB_FRAMES)
75 .expect("ProcessManager slab allocation failed");
76
77 self.slab_base_phys = base_frame.as_u64();
78 let base_virt = addr::phys_to_virt(base_frame);
79 let ptr = base_virt.as_mut_ptr::<ArenaEntry<Process>>();
80
81 unsafe {
82 core::ptr::write_bytes(ptr as *mut u8, 0, total_bytes);
83 self.arena.init_from_raw_parts(ptr, cap);
84 }
85
86 crate::kprintln!(
87 " ProcessManager: {} slots ({} frames, entry={}B)",
88 cap,
89 SLAB_FRAMES,
90 entry_size,
91 );
92 }
93
94 pub fn capacity(&self) -> usize {
95 self.arena.capacity()
96 }
97
98 pub fn slab_region(&self) -> (u64, usize) {
99 (self.slab_base_phys, SLAB_FRAMES * 4096)
100 }
101
102 pub fn allocate(&mut self, allocator: &mut BitmapFrameAllocator) -> Option<CreatedPid> {
103 let pml4_phys = Pml4Phys::from_create(address_space::create_user_pml4(allocator)?);
104
105 let stack_base_phys = match allocator.allocate_contiguous(STACK_TOTAL_FRAMES) {
106 Some(phys) => phys,
107 None => {
108 address_space::teardown_user_space(pml4_phys.raw(), allocator);
109 return None;
110 }
111 };
112
113 let stack_base_virt = addr::phys_to_virt(stack_base_phys);
114 let hhdm_offset = addr::hhdm_offset();
115 let mut mapper = unsafe { crate::arch::paging::init(hhdm_offset) };
116 let _guard_unmapped = try_unmap_guard_page(&mut mapper, stack_base_virt);
117
118 let data_base_virt = VirtAddr::new(stack_base_virt.as_u64() + GUARD_PAGE_SIZE as u64);
119 unsafe {
120 let data_ptr = data_base_virt.as_mut_ptr::<u8>();
121 core::ptr::write_bytes(data_ptr, STACK_PAINT_BYTE, KERNEL_STACK_SIZE);
122 core::ptr::copy_nonoverlapping(
123 STACK_CANARY.to_le_bytes().as_ptr(),
124 data_ptr,
125 8,
126 );
127 }
128
129 let kernel_stack_top =
130 VirtAddr::new(stack_base_virt.as_u64() + (STACK_TOTAL_FRAMES as u64) * 4096);
131
132 let process = Process {
133 pid: Pid::new(0),
134 generation: Generation::new(0),
135 state: ProcessState::Created,
136 pml4_phys,
137 kernel_stack_top,
138 saved_context: CpuContext::zero(),
139 fpu_state: FpuState::default_init(),
140 priority: Priority::IDLE,
141 next_ipc: None,
142 blocked_reason: None,
143 ipc_message: IpcMessage::zero(),
144 ipc_badge: 0,
145 reply_target: None,
146 sched_context: None,
147 death_notification: None,
148 bound_notification: None,
149 effective_priority: Priority::IDLE,
150 ring_region_id: None,
151 ring_sq_head: RingIndex::new(0),
152 ring_cq_tail: RingIndex::new(0),
153 allocated_frames: 0,
154 run_cpu: None,
155 context_checksum: 0,
156 name: [0u8; PROC_NAME_LEN],
157 name_len: 0,
158 stack_phys_base: stack_base_phys,
159 fs_base: 0,
160 root_cnode: None,
161 cnode_depth: 0,
162 };
163
164 let (idx, generation) = match self.arena.allocate(process) {
165 Ok(pair) => pair,
166 Err(_) => {
167 remap_guard_and_free_stack(stack_base_phys, allocator);
168 address_space::teardown_user_space(pml4_phys.raw(), allocator);
169 return None;
170 }
171 };
172
173 let pid = Pid::new(idx);
174
175 if address_space::pml4_ref_create(pml4_phys.raw(), pid).is_err() {
176 self.arena
177 .free(idx, generation)
178 .expect("pml4_ref_create rollback: arena free failed");
179 remap_guard_and_free_stack(stack_base_phys, allocator);
180 address_space::teardown_user_space(pml4_phys.raw(), allocator);
181 return None;
182 }
183
184 let proc = self.arena.get_by_index_mut(idx).unwrap();
185 proc.pid = pid;
186 proc.generation = generation;
187 proc.seal_context();
188
189 Some(CreatedPid::trust(pid))
190 }
191
192 pub fn allocate_thread(
193 &mut self,
194 parent_pid: Pid,
195 allocator: &mut BitmapFrameAllocator,
196 ) -> Option<CreatedPid> {
197 let parent = self.get(parent_pid)?;
198 let parent_pml4 = parent.pml4_phys;
199 let parent_root_cnode = parent.root_cnode;
200 let parent_cnode_depth = parent.cnode_depth;
201
202 let stack_base_phys = allocator.allocate_contiguous(STACK_TOTAL_FRAMES)?;
203
204 let stack_base_virt = addr::phys_to_virt(stack_base_phys);
205 let hhdm_offset = addr::hhdm_offset();
206 let mut mapper = unsafe { crate::arch::paging::init(hhdm_offset) };
207 let _guard_unmapped = try_unmap_guard_page(&mut mapper, stack_base_virt);
208
209 let data_base_virt = VirtAddr::new(stack_base_virt.as_u64() + GUARD_PAGE_SIZE as u64);
210 unsafe {
211 let data_ptr = data_base_virt.as_mut_ptr::<u8>();
212 core::ptr::write_bytes(data_ptr, STACK_PAINT_BYTE, KERNEL_STACK_SIZE);
213 core::ptr::copy_nonoverlapping(
214 STACK_CANARY.to_le_bytes().as_ptr(),
215 data_ptr,
216 8,
217 );
218 }
219
220 let kernel_stack_top =
221 VirtAddr::new(stack_base_virt.as_u64() + (STACK_TOTAL_FRAMES as u64) * 4096);
222
223 let process = Process {
224 pid: Pid::new(0),
225 generation: Generation::new(0),
226 state: ProcessState::Created,
227 pml4_phys: parent_pml4,
228 kernel_stack_top,
229 saved_context: CpuContext::zero(),
230 fpu_state: FpuState::default_init(),
231 priority: Priority::IDLE,
232 next_ipc: None,
233 blocked_reason: None,
234 ipc_message: IpcMessage::zero(),
235 ipc_badge: 0,
236 reply_target: None,
237 sched_context: None,
238 death_notification: None,
239 bound_notification: None,
240 effective_priority: Priority::IDLE,
241 ring_region_id: None,
242 ring_sq_head: RingIndex::new(0),
243 ring_cq_tail: RingIndex::new(0),
244 allocated_frames: 0,
245 run_cpu: None,
246 context_checksum: 0,
247 name: [0u8; PROC_NAME_LEN],
248 name_len: 0,
249 stack_phys_base: stack_base_phys,
250 fs_base: 0,
251 root_cnode: parent_root_cnode,
252 cnode_depth: parent_cnode_depth,
253 };
254
255 let (idx, generation) = match self.arena.allocate(process) {
256 Ok(pair) => pair,
257 Err(_) => {
258 remap_guard_and_free_stack(stack_base_phys, allocator);
259 return None;
260 }
261 };
262
263 let pid = Pid::new(idx);
264
265 if address_space::pml4_ref_share(parent_pml4.raw()).is_err() {
266 self.arena
267 .free(idx, generation)
268 .expect("allocate_thread rollback: arena free failed");
269 remap_guard_and_free_stack(stack_base_phys, allocator);
270 return None;
271 }
272
273 let proc = self.arena.get_by_index_mut(idx).unwrap();
274 proc.pid = pid;
275 proc.generation = generation;
276 proc.seal_context();
277
278 Some(CreatedPid::trust(pid))
279 }
280
281 pub fn get(&self, pid: Pid) -> Option<&Process> {
282 self.arena.get_by_index(pid.raw()).inspect(|p| {
283 debug_assert!(
284 p.state() != ProcessState::Free,
285 "occupied arena slot has Free state for pid {}",
286 pid.raw()
287 );
288 })
289 }
290
291 pub fn get_mut(&mut self, pid: Pid) -> Option<&mut Process> {
292 self.arena.get_by_index_mut(pid.raw()).inspect(|p| {
293 debug_assert!(
294 p.state() != ProcessState::Free,
295 "occupied arena slot has Free state for pid {}",
296 pid.raw()
297 );
298 })
299 }
300
301 pub fn as_created(&self, pid: Pid) -> Option<CreatedPid> {
302 self.get(pid)
303 .filter(|p| p.state() == ProcessState::Created)
304 .map(|_| CreatedPid::trust(pid))
305 }
306
307 pub fn start(&mut self, created: CreatedPid) -> Result<(), crate::error::KernelError> {
308 self[created.pid()].transition_to(ProcessState::Ready)
309 }
310
311 pub fn clear_reply_targets_for(&mut self, target: Pid) {
312 self.arena.for_each_active_mut(|proc| {
313 if proc.reply_target == Some(target) {
314 proc.reply_target = None;
315 }
316 });
317 }
318
319 pub fn zombify(&mut self, pid: Pid) -> bool {
320 let proc = match self.arena.get_by_index_mut(pid.raw()) {
321 Some(p) => p,
322 None => return false,
323 };
324
325 if matches!(proc.state(), ProcessState::Free | ProcessState::Zombie) {
326 return false;
327 }
328
329 if proc.zombify_state().is_err() {
330 crate::kprintln!(
331 "[proc] BUG: pid {} failed -> Zombie (state={:?})",
332 pid.raw(),
333 proc.state()
334 );
335 return false;
336 }
337
338 let mut pool = crate::cap::pool::POOL.lock();
339 crate::ipc::endpoint::remove_from_queues(pid, &mut pool, self);
340
341 match self[pid].death_notification() {
342 Some((notif_id, notif_gen, bits)) => {
343 match pool
344 .get_mut(notif_id, notif_gen)
345 .and_then(|d| d.as_notification_mut())
346 {
347 Ok(notif) => {
348 let has_waiters = crate::ipc::notification::signal_inner(notif, bits);
349 if has_waiters {
350 crate::ipc::notification::drain_and_wake(notif, self);
351 }
352 }
353 Err(_) => {}
354 }
355 let _ = crate::ipc::notification::wake_bound_receivers_with_pool(
356 notif_id, notif_gen, bits, self, &mut pool,
357 );
358 }
359 None => {}
360 }
361
362 drop(pool);
363 self.clear_reply_targets_for(pid);
364
365 true
366 }
367
368 pub fn reap(&mut self, pid: Pid, allocator: &mut BitmapFrameAllocator) {
369 let proc = match self.arena.get_by_index(pid.raw()) {
370 Some(p) => p,
371 None => return,
372 };
373
374 if proc.state() != ProcessState::Zombie {
375 return;
376 }
377
378 let pml4 = proc.pml4_phys.raw();
379 let stack_base = proc.stack_phys_base;
380 let root_cnode = proc.root_cnode;
381
382 self.arena.get_by_index_mut(pid.raw()).unwrap().root_cnode = None;
383
384 {
385 let mut pool = crate::cap::pool::POOL.lock();
386
387 let freed_proc_objs = pool.invalidate_process_object(pid);
388 freed_proc_objs.iter().for_each(|&(oid, stale_gen)| {
389 crate::cap::ops::invalidate_stale_caps_via_cnode(self, &pool, oid, stale_gen);
390 });
391
392 if let Some((sc_id, sc_gen)) = self[pid].sched_context()
393 && let Ok(sc) = pool
394 .get_mut(sc_id, sc_gen)
395 .and_then(|d| d.as_sched_context_mut())
396 {
397 sc.attached_pid = None;
398 }
399
400 if let Some((cnode_id, cnode_gen)) = root_cnode {
401 match pool.dec_ref(cnode_id, cnode_gen) {
402 Some(crate::cap::object::ObjectData::CNode(ref cnode_data)) => {
403 crate::cap::cnode::drain_cnode_data(
404 cnode_data,
405 &mut pool,
406 &mut |cap, pool| {
407 if let Some(ref data) =
408 pool.dec_ref(cap.object_id(), cap.generation())
409 {
410 crate::cap::ops::cleanup_object_data_with_ptable(data, self);
411 }
412 },
413 );
414 crate::cap::cnode::destroy_cnode(
415 cnode_data,
416 &crate::mem::phys::BitmapFrameAllocator,
417 );
418 }
419 Some(ref data) => {
420 crate::cap::ops::cleanup_object_data(data);
421 }
422 None => {}
423 }
424 }
425 }
426
427 let entry_gen = self
428 .arena
429 .generation_of(pid.raw())
430 .expect("reap: arena slot empty before free");
431 match address_space::pml4_ref_release(pml4) {
432 Pml4ReleaseResult::LastRef => address_space::teardown_user_space(pml4, allocator),
433 Pml4ReleaseResult::StillShared => {}
434 }
435 remap_guard_and_free_stack(stack_base, allocator);
436
437 self.arena
438 .free(pid.raw(), entry_gen)
439 .expect("reap: arena free failed (generation mismatch)");
440 }
441
442 pub fn destroy(&mut self, pid: Pid, allocator: &mut BitmapFrameAllocator) -> bool {
443 let proc = match self.arena.get_by_index(pid.raw()) {
444 Some(p) => p,
445 None => return true,
446 };
447
448 if proc.state() == ProcessState::Free {
449 return true;
450 }
451
452 if proc.state() != ProcessState::Zombie && !self.zombify(pid) {
453 return false;
454 }
455
456 self.reap(pid, allocator);
457 true
458 }
459
460 pub fn identify_guard_stack(&self, rsp: u64) -> Option<usize> {
461 let cap = self.arena.capacity();
462 (0..cap as u16).find(|&idx| {
463 self.arena
464 .get_by_index(idx)
465 .filter(|p| p.state() != ProcessState::Free)
466 .is_some_and(|p| {
467 let guard_virt = addr::phys_to_virt(p.stack_phys_base).as_u64();
468 rsp >= guard_virt && rsp < guard_virt + GUARD_PAGE_SIZE as u64
469 })
470 })
471 .map(|idx| idx as usize)
472 }
473
474 pub fn verify_stack_canary(&self, pid: Pid) {
475 let proc = self
476 .arena
477 .get_by_index(pid.raw())
478 .unwrap_or_else(|| panic!("verify_stack_canary: pid {} not in arena", pid.raw()));
479 let data_virt =
480 addr::phys_to_virt(proc.stack_phys_base).as_u64() + GUARD_PAGE_SIZE as u64;
481 let actual = unsafe {
482 let ptr = data_virt as *const u8;
483 u64::from_le_bytes(core::slice::from_raw_parts(ptr, 8).try_into().unwrap())
484 };
485 assert!(
486 actual == STACK_CANARY,
487 "stack canary corrupted for pid {} (expected={:#x}, found={:#x})",
488 pid.raw(),
489 STACK_CANARY,
490 actual
491 );
492 }
493
494 #[cfg(lancer_test)]
495 pub fn stack_high_water_mark(&self, pid: Pid) -> usize {
496 let proc = match self.arena.get_by_index(pid.raw()) {
497 Some(p) => p,
498 None => return 0,
499 };
500 let data_virt =
501 addr::phys_to_virt(proc.stack_phys_base).as_u64() + GUARD_PAGE_SIZE as u64;
502 let data = unsafe { core::slice::from_raw_parts(data_virt as *const u8, KERNEL_STACK_SIZE) };
503 let unpainted = data.iter().skip(8).take_while(|&&b| b == STACK_PAINT_BYTE).count();
504 KERNEL_STACK_SIZE - 8 - unpainted
505 }
506
507 #[cfg(lancer_test)]
508 pub fn stack_data_mut(&mut self, pid: Pid) -> Option<&'static mut [u8; KERNEL_STACK_SIZE]> {
509 let proc = self.arena.get_by_index(pid.raw())?;
510 let data_virt =
511 addr::phys_to_virt(proc.stack_phys_base).as_u64() + GUARD_PAGE_SIZE as u64;
512 Some(unsafe { &mut *(data_virt as *mut [u8; KERNEL_STACK_SIZE]) })
513 }
514}
515
516impl core::ops::Index<Pid> for ProcessManager {
517 type Output = Process;
518 fn index(&self, pid: Pid) -> &Process {
519 self.arena
520 .get_by_index(pid.raw())
521 .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw()))
522 }
523}
524
525impl core::ops::IndexMut<Pid> for ProcessManager {
526 fn index_mut(&mut self, pid: Pid) -> &mut Process {
527 self.arena
528 .get_by_index_mut(pid.raw())
529 .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw()))
530 }
531}
532
533impl core::ops::Index<&BlockedPid> for ProcessManager {
534 type Output = Process;
535 fn index(&self, bp: &BlockedPid) -> &Process {
536 &self[bp.pid()]
537 }
538}
539
540impl core::ops::IndexMut<&BlockedPid> for ProcessManager {
541 fn index_mut(&mut self, bp: &BlockedPid) -> &mut Process {
542 &mut self[bp.pid()]
543 }
544}
545
546impl core::ops::Index<CreatedPid> for ProcessManager {
547 type Output = Process;
548 fn index(&self, cp: CreatedPid) -> &Process {
549 &self[cp.pid()]
550 }
551}
552
553impl core::ops::IndexMut<CreatedPid> for ProcessManager {
554 fn index_mut(&mut self, cp: CreatedPid) -> &mut Process {
555 &mut self[cp.pid()]
556 }
557}
558
559pub static PROCESSES: IrqMutex<ProcessManager, 0> = IrqMutex::new(ProcessManager::empty());