strat9_kernel/memory/
paging.rs1use x86_64::{
9 registers::control::Cr3,
10 structures::paging::{
11 FrameAllocator as X86FrameAllocator, Mapper, OffsetPageTable, Page, PageTable,
12 PageTableFlags, PhysFrame as X86PhysFrame, Size4KiB, Translate,
13 },
14 PhysAddr, VirtAddr,
15};
16
17use crate::{
18 memory::frame::{FrameAllocOptions, FramePurpose},
19 sync::SpinLock,
20};
21
22pub struct BuddyFrameAllocator;
52
53unsafe impl X86FrameAllocator<Size4KiB> for BuddyFrameAllocator {
58 fn allocate_frame(&mut self) -> Option<X86PhysFrame<Size4KiB>> {
59 let token = unsafe { crate::sync::IrqDisabledToken::token_from_trusted_context() };
65
66 let frame = FrameAllocOptions::new()
73 .purpose(FramePurpose::PageTable)
74 .allocate(&token)
75 .ok()?;
76
77 X86PhysFrame::from_start_address(frame.start_address).ok()
78 }
79}
80
81static mut PAGING_READY: bool = false;
83
84static mut KERNEL_CR3: PhysAddr = PhysAddr::new_truncate(0);
86
87static KERNEL_PT_LOCK: SpinLock<()> = SpinLock::new(());
93
94pub fn is_initialized() -> bool {
96 unsafe { *(&raw const PAGING_READY) }
97}
98
99pub fn init(hhdm_offset: u64) {
106 let phys_offset = VirtAddr::new(hhdm_offset);
107 let (level_4_frame, _flags) = Cr3::read();
108 let level_4_phys = level_4_frame.start_address().as_u64();
109 let level_4_virt = phys_offset + level_4_phys;
110
111 unsafe {
115 let kcr3 = &raw mut KERNEL_CR3;
116 *kcr3 = level_4_frame.start_address();
117 let ready = &raw mut PAGING_READY;
118 *ready = true;
119 }
120
121 log::info!(
122 "Paging initialized: CR3={:#x}, HHDM={:#x}, L4 table @ {:#x}",
123 level_4_phys,
124 hhdm_offset,
125 level_4_virt.as_u64(),
126 );
127}
128
129pub fn map_all_ram(memory_regions: &[crate::boot::entry::MemoryRegion]) {
142 use crate::boot::entry::MemoryKind;
143
144 for region in memory_regions {
145 if matches!(region.kind, MemoryKind::Free | MemoryKind::Reclaim) {
146 log::debug!(
147 "Mapping RAM region to HHDM: phys=0x{:x}..0x{:x}",
148 region.base,
149 region.base + region.size
150 );
151 ensure_identity_map_range(region.base, region.size);
152 }
153 }
154}
155
156pub fn map_page(
160 page: Page<Size4KiB>,
161 frame: X86PhysFrame<Size4KiB>,
162 flags: PageTableFlags,
163) -> Result<(), &'static str> {
164 if !is_initialized() {
165 return Err("Paging not initialized");
166 }
167 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
168 let (level_4_frame, _) = Cr3::read();
169 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
170 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
172 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
173 let mut allocator = BuddyFrameAllocator;
174
175 unsafe {
176 mapper
177 .map_to(page, frame, flags, &mut allocator)
178 .map_err(|_| "Failed to map page")?
179 .flush();
180 }
181 Ok(())
182}
183
184pub fn map_page_kernel(
193 page: Page<Size4KiB>,
194 frame: X86PhysFrame<Size4KiB>,
195 flags: PageTableFlags,
196) -> Result<(), &'static str> {
197 if !is_initialized() {
198 return Err("Paging not initialized");
199 }
200 let _guard = KERNEL_PT_LOCK.lock();
201 let kernel_cr3 = unsafe { *(&raw const KERNEL_CR3) };
203 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
204 let level_4_virt = phys_offset + kernel_cr3.as_u64();
205 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
207 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
208 let mut allocator = BuddyFrameAllocator;
209
210 unsafe {
211 mapper
212 .map_to(page, frame, flags, &mut allocator)
213 .map_err(|_| "Failed to map page (kernel)")?
214 .flush();
215 }
216 Ok(())
217}
218
219pub fn unmap_page(page: Page<Size4KiB>) -> Result<X86PhysFrame<Size4KiB>, &'static str> {
221 if !is_initialized() {
222 return Err("Paging not initialized");
223 }
224 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
225 let (level_4_frame, _) = Cr3::read();
226 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
227 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
229 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
230 let (frame, flush) = mapper.unmap(page).map_err(|_| "Failed to unmap page")?;
231 flush.flush();
232 Ok(frame)
233}
234
235pub fn unmap_page_kernel(page: Page<Size4KiB>) -> Result<X86PhysFrame<Size4KiB>, &'static str> {
241 if !is_initialized() {
242 return Err("Paging not initialized");
243 }
244 let _guard = KERNEL_PT_LOCK.lock();
245 let kernel_cr3 = unsafe { *(&raw const KERNEL_CR3) };
247 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
248 let level_4_virt = phys_offset + kernel_cr3.as_u64();
249 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
251 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
252 let (frame, flush) = mapper
253 .unmap(page)
254 .map_err(|_| "Failed to unmap page (kernel)")?;
255 flush.flush();
256 Ok(frame)
257}
258
259pub fn translate(addr: VirtAddr) -> Option<PhysAddr> {
263 if !is_initialized() {
264 return None;
265 }
266 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
267 let (level_4_frame, _) = Cr3::read();
268 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
269 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
271 let mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
272 mapper.translate_addr(addr)
273}
274
275fn translate_via_active_page_tables(addr: VirtAddr) -> Option<PhysAddr> {
276 let hhdm = crate::memory::hhdm_offset();
277 let (level_4_frame, _) = Cr3::read();
278
279 unsafe {
280 let l4_ptr = (level_4_frame.start_address().as_u64() + hhdm) as *const u64;
281 let l4e = *l4_ptr.add(((addr.as_u64() >> 39) & 0x1FF) as usize);
282 if l4e & 1 == 0 {
283 return None;
284 }
285
286 let l3_ptr = ((l4e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
287 let l3e = *l3_ptr.add(((addr.as_u64() >> 30) & 0x1FF) as usize);
288 if l3e & 1 == 0 {
289 return None;
290 }
291 if l3e & 0x80 != 0 {
292 return Some(PhysAddr::new(
293 (l3e & 0x000F_FFFF_C000_0000) + (addr.as_u64() & 0x3FFF_FFFF),
294 ));
295 }
296
297 let l2_ptr = ((l3e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
298 let l2e = *l2_ptr.add(((addr.as_u64() >> 21) & 0x1FF) as usize);
299 if l2e & 1 == 0 {
300 return None;
301 }
302 if l2e & 0x80 != 0 {
303 return Some(PhysAddr::new(
304 (l2e & 0x000F_FFFF_FFE0_0000) + (addr.as_u64() & 0x1F_FFFF),
305 ));
306 }
307
308 let l1_ptr = ((l2e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
309 let l1e = *l1_ptr.add(((addr.as_u64() >> 12) & 0x1FF) as usize);
310 if l1e & 1 == 0 {
311 return None;
312 }
313
314 Some(PhysAddr::new(
315 (l1e & 0x000F_FFFF_FFFF_F000) + (addr.as_u64() & 0xFFF),
316 ))
317 }
318}
319
320pub fn is_hhdm_range_mapped_now(phys_base: u64, size: u64) -> bool {
326 if size == 0 {
327 return true;
328 }
329
330 let start = phys_base & !0xFFF;
331 let end = phys_base.saturating_add(size).saturating_add(0xFFF) & !0xFFF;
332
333 let mut phys = start;
334 while phys < end {
335 let virt = VirtAddr::new(crate::memory::phys_to_virt(phys));
336 let Some(mapped) = translate_via_active_page_tables(virt) else {
337 return false;
338 };
339 if mapped.as_u64() != phys {
340 return false;
341 }
342 phys = phys.saturating_add(4096);
343 }
344 true
345}
346
347pub fn active_page_table() -> PhysAddr {
349 let (frame, _) = Cr3::read();
350 frame.start_address()
351}
352
353pub fn kernel_l4_phys() -> PhysAddr {
358 unsafe { *(&raw const KERNEL_CR3) }
360}
361
362pub fn ensure_identity_map(phys_addr: u64) {
368 let virt_addr = crate::memory::phys_to_virt(phys_addr);
369 let page = Page::<Size4KiB>::containing_address(VirtAddr::new(virt_addr));
370 let frame = X86PhysFrame::containing_address(PhysAddr::new(phys_addr));
371
372 if translate(VirtAddr::new(virt_addr)).is_none() {
373 log::debug!(
374 "Identity mapping missing page: {:#x} -> {:#x}",
375 phys_addr,
376 virt_addr
377 );
378 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
380 if let Err(e) = map_page(page, frame, flags) {
381 log::error!("Failed to identity map {:#x}: {}", phys_addr, e);
382 }
383 }
384}
385
386pub fn ensure_identity_map_range(phys_base: u64, size: u64) {
391 if size == 0 || !is_initialized() {
392 return;
393 }
394
395 let page_size = 4096u64;
396 let start = phys_base & !(page_size - 1);
397 let end = (phys_base.saturating_add(size).saturating_add(page_size - 1)) & !(page_size - 1);
398 if start >= end {
399 return;
400 }
401
402 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
403 let (level_4_frame, _) = Cr3::read();
404 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
405
406 let l4_table = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
408 let mut mapper = unsafe { OffsetPageTable::new(l4_table, phys_offset) };
409 let mut allocator = BuddyFrameAllocator;
410 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
411
412 let mut mapped_count: u64 = 0;
413 let mut p = start;
414 while p < end {
415 let virt = VirtAddr::new(crate::memory::phys_to_virt(p));
416 if mapper.translate_addr(virt).is_none() {
418 let page = Page::<Size4KiB>::containing_address(virt);
419 let frame = X86PhysFrame::containing_address(PhysAddr::new(p));
420 match unsafe { mapper.map_to(page, frame, flags, &mut allocator) } {
422 Ok(flush) => {
423 flush.flush();
424 mapped_count += 1;
425 }
426 Err(_) => {
427 log::error!("ensure_identity_map_range: failed to map {:#x}", p);
428 }
429 }
430 }
431 p = p.saturating_add(page_size);
432 }
433
434 if mapped_count > 0 {
435 log::debug!(
436 "Identity mapped {} pages: phys {:#x}..{:#x}",
437 mapped_count,
438 start,
439 end,
440 );
441 }
442}