Nothing to see here, move along
at main 255 lines 8.9 kB view raw
1crate::kernel_test!( 2 fn dmar_table_present() { 3 let rsdp = crate::arch::boot::rsdp_address(); 4 assert!(rsdp.is_some(), "no RSDP, cannot test DMAR"); 5 } 6); 7 8crate::kernel_test!( 9 fn iommu_units_discovered() { 10 let units = crate::iommu::IOMMU_UNITS.lock(); 11 assert!( 12 !units.is_empty(), 13 "no IOMMU units discovered (DMAR parsing may have failed)" 14 ); 15 } 16); 17 18crate::kernel_test!( 19 fn iommu_version_valid() { 20 let units = crate::iommu::IOMMU_UNITS.lock(); 21 units.iter().for_each(|unit| { 22 let version = unit.regs.version(); 23 assert!(version != 0, "IOMMU version register is zero"); 24 let major = (version >> 4) & 0xF; 25 assert!(major >= 1, "IOMMU major version {} < 1", major); 26 }); 27 } 28); 29 30crate::kernel_test!( 31 fn iommu_capability_sagaw() { 32 let units = crate::iommu::IOMMU_UNITS.lock(); 33 units.iter().for_each(|unit| { 34 let sagaw = crate::iommu::regs::cap_sagaw(unit.cap); 35 assert!( 36 sagaw & crate::iommu::regs::SAGAW_48BIT != 0, 37 "IOMMU SAGAW {:#x} does not support 48-bit AGAW (bit 2)", 38 sagaw 39 ); 40 }); 41 } 42); 43 44crate::kernel_test!( 45 fn iommu_translation_enabled() { 46 let units = crate::iommu::IOMMU_UNITS.lock(); 47 units.iter().for_each(|unit| { 48 let gsts = unit.regs.global_status(); 49 assert!( 50 gsts & crate::iommu::regs::GSTS_TES != 0, 51 "IOMMU translation not enabled (GSTS={:#x})", 52 gsts 53 ); 54 }); 55 } 56); 57 58crate::kernel_test!( 59 fn iommu_fsts_constants() { 60 assert_eq!(crate::iommu::regs::FSTS_PFO, 1 << 0, "PFO must be bit 0"); 61 assert_eq!(crate::iommu::regs::FSTS_PPF, 1 << 1, "PPF must be bit 1"); 62 } 63); 64 65crate::kernel_test!( 66 fn iommu_no_pending_faults() { 67 let units = crate::iommu::IOMMU_UNITS.lock(); 68 units.iter().for_each(|unit| { 69 let fsts = unit.regs.fault_status(); 70 assert!( 71 fsts & crate::iommu::regs::FSTS_PPF == 0, 72 "IOMMU has pending faults (FSTS={:#x})", 73 fsts 74 ); 75 }); 76 } 77); 78 79crate::kernel_test!( 80 fn iommu_root_table_present() { 81 assert!( 82 crate::iommu::root_table_present(), 83 "IOMMU root table should be set after enable" 84 ); 85 } 86); 87 88crate::kernel_test!( 89 fn dma_alloc_rejects_zero_pages() { 90 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 91 let pid = crate::types::Pid::new(0); 92 let mut alloc = crate::mem::phys::BitmapFrameAllocator; 93 let hhdm = crate::mem::addr::hhdm_offset(); 94 let result = crate::iommu::dma_alloc(0, 0, 0, 0x1000, pml4, pid, &mut alloc, hhdm); 95 assert!( 96 matches!(result, Err(crate::error::KernelError::InvalidParameter)), 97 "dma_alloc with 0 pages must return InvalidParameter" 98 ); 99 } 100); 101 102crate::kernel_test!( 103 fn dma_alloc_rejects_excessive_pages() { 104 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 105 let pid = crate::types::Pid::new(0); 106 let mut alloc = crate::mem::phys::BitmapFrameAllocator; 107 let hhdm = crate::mem::addr::hhdm_offset(); 108 let result = crate::iommu::dma_alloc(0, 0, 17, 0x1000, pml4, pid, &mut alloc, hhdm); 109 assert!( 110 matches!(result, Err(crate::error::KernelError::InvalidParameter)), 111 "dma_alloc with >16 pages must return InvalidParameter" 112 ); 113 } 114); 115 116crate::kernel_test!( 117 fn dma_free_rejects_misaligned_iova() { 118 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 119 let pid = crate::types::Pid::new(0); 120 let hhdm = crate::mem::addr::hhdm_offset(); 121 let result = crate::iommu::dma_free(0, 0, 0x1001, 1, pml4, pid, hhdm); 122 assert!( 123 matches!(result, Err(crate::error::KernelError::InvalidAddress)), 124 "dma_free with misaligned IOVA must return InvalidAddress" 125 ); 126 } 127); 128 129crate::kernel_test!( 130 fn dma_free_rejects_zero_pages() { 131 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 132 let pid = crate::types::Pid::new(0); 133 let hhdm = crate::mem::addr::hhdm_offset(); 134 let result = crate::iommu::dma_free(0, 0, 0x1000, 0, pml4, pid, hhdm); 135 assert!( 136 matches!(result, Err(crate::error::KernelError::InvalidParameter)), 137 "dma_free with 0 pages must return InvalidParameter" 138 ); 139 } 140); 141 142crate::kernel_test!( 143 fn dma_free_rejects_unknown_device() { 144 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 145 let pid = crate::types::Pid::new(0); 146 let hhdm = crate::mem::addr::hhdm_offset(); 147 let result = crate::iommu::dma_free(0xFF, 0xFF, 0x1000_0000, 1, pml4, pid, hhdm); 148 assert!( 149 matches!(result, Err(crate::error::KernelError::NotFound)), 150 "dma_free for non-existent device must return NotFound" 151 ); 152 } 153); 154 155crate::kernel_test!( 156 fn iova_alloc_returns_page_aligned() { 157 let mut alloc = crate::iommu::iova::IovaAllocator::new(); 158 let iova = alloc.allocate(1).expect("single page IOVA alloc failed"); 159 assert!(iova & 0xFFF == 0, "IOVA {:#x} not page-aligned", iova); 160 assert!(iova >= 0x1000_0000, "IOVA {:#x} below start range", iova); 161 } 162); 163 164crate::kernel_test!( 165 fn iova_alloc_free_reuse() { 166 let mut alloc = crate::iommu::iova::IovaAllocator::new(); 167 let first = alloc.allocate(2).expect("first alloc failed"); 168 alloc.free(first, 2).expect("free failed"); 169 let second = alloc.allocate(2).expect("second alloc failed"); 170 assert_eq!(first, second, "freed IOVA range should be reused"); 171 } 172); 173 174crate::kernel_test!( 175 fn iova_alloc_free_partial_reuse() { 176 let mut alloc = crate::iommu::iova::IovaAllocator::new(); 177 let base = alloc.allocate(4).expect("4-page alloc failed"); 178 alloc.free(base, 4).expect("free failed"); 179 let small = alloc 180 .allocate(1) 181 .expect("1-page alloc from freed range failed"); 182 assert_eq!(small, base, "should reuse start of freed range"); 183 let next = alloc.allocate(1).expect("second 1-page alloc failed"); 184 assert_eq!( 185 next, 186 base + 4096, 187 "remainder of freed range should be available" 188 ); 189 } 190); 191 192crate::kernel_test!( 193 fn iommu_high_iova_not_mapped() { 194 let devices = crate::iommu::device_count(); 195 if devices == 0 { 196 return; 197 } 198 let dev_table = crate::pci::DEVICE_TABLE.lock(); 199 let nvme = dev_table 200 .iter() 201 .find(|d| d.class_code == 0x01 && d.subclass == 0x08 && d.prog_if == 0x02); 202 let (bus, devfn) = match nvme { 203 Some(d) => (d.bus, (d.device << 3) | d.function), 204 None => return, 205 }; 206 drop(dev_table); 207 208 let high_iova = 0xFFFF_F000_0000_0000u64; 209 let result = crate::iommu::is_iova_mapped(bus, devfn, high_iova); 210 match result { 211 Some(mapped) => assert!(!mapped, "high IOVA should not be mapped"), 212 None => {} 213 } 214 } 215); 216 217crate::kernel_test!( 218 fn iommu_fault_status_readable() { 219 assert!( 220 crate::iommu::fault_status_readable(), 221 "fault status registers must be readable without panic" 222 ); 223 } 224); 225 226crate::kernel_test!( 227 fn iommu_dma_alloc_rejects_wrong_pid() { 228 let pml4 = crate::mem::typed_addr::Pml4Phys::from_create(x86_64::PhysAddr::new(0x1000)); 229 let wrong_pid = crate::types::Pid::new(63); 230 let mut alloc = crate::mem::phys::BitmapFrameAllocator; 231 let hhdm = crate::mem::addr::hhdm_offset(); 232 let result = 233 crate::iommu::dma_alloc(0xFF, 0xFF, 1, 0x1000, pml4, wrong_pid, &mut alloc, hhdm); 234 assert!( 235 result.is_err(), 236 "dma_alloc for non-existent device/pid must fail" 237 ); 238 } 239); 240 241crate::kernel_test!( 242 fn iova_free_coalesces_adjacent() { 243 let mut alloc = crate::iommu::iova::IovaAllocator::new(); 244 let a = alloc.allocate(1).expect("alloc a"); 245 let b = alloc.allocate(1).expect("alloc b"); 246 let c = alloc.allocate(1).expect("alloc c"); 247 alloc.free(a, 1).expect("free a"); 248 alloc.free(c, 1).expect("free c"); 249 alloc.free(b, 1).expect("free b (should coalesce a+b+c)"); 250 let big = alloc 251 .allocate(3) 252 .expect("3-page alloc should succeed after coalescing"); 253 assert_eq!(big, a, "coalesced range should start at original base"); 254 } 255);