Skip to main content

strat9_kernel/memory/
paging.rs

1//! Virtual Memory Management (Paging) for Strat9-OS
2//!
3//! Uses the `x86_64` crate's `OffsetPageTable` which is designed for the HHDM
4//! (Higher Half Direct Map) pattern — exactly what Limine provides.
5//!
6//! Provides map/unmap/translate operations on the active page table.
7
8use x86_64::{
9    registers::control::Cr3,
10    structures::paging::{
11        FrameAllocator as X86FrameAllocator, Mapper, OffsetPageTable, Page, PageTable,
12        PageTableFlags, PhysFrame as X86PhysFrame, Size4KiB, Translate,
13    },
14    PhysAddr, VirtAddr,
15};
16
17/// Wrapper around our buddy allocator implementing the x86_64 crate's FrameAllocator trait.
18///
19/// This is used by `OffsetPageTable` when it needs to allocate intermediate page tables.
20pub struct BuddyFrameAllocator;
21
22// SAFETY: allocate_frame returns valid, unused, 4KiB-aligned physical frames
23// from the buddy allocator.
24unsafe impl X86FrameAllocator<Size4KiB> for BuddyFrameAllocator {
25    /// Performs the allocate frame operation.
26    fn allocate_frame(&mut self) -> Option<X86PhysFrame<Size4KiB>> {
27        // SAFETY: BuddyFrameAllocator is called from OffsetPageTable during page-table
28        // operations, which always occur either in early single-threaded init or while
29        // holding the scheduler SpinLock (which disables IRQs). IRQs are therefore
30        // guaranteed to be off at this point.
31        let token = unsafe { crate::sync::IrqDisabledToken::new_unchecked() };
32        let frame = crate::memory::allocate_frame(&token).ok()?;
33        X86PhysFrame::from_start_address(frame.start_address).ok()
34    }
35}
36
37/// Paging initialization flag.
38static mut PAGING_READY: bool = false;
39
40/// Physical address of the kernel's level-4 page table (set at init, never changes).
41static mut KERNEL_CR3: PhysAddr = PhysAddr::new_truncate(0);
42
43/// Returns whether initialized.
44pub fn is_initialized() -> bool {
45    unsafe { *(&raw const PAGING_READY) }
46}
47
48/// Initialize the paging subsystem.
49///
50/// Reads the active CR3 (level-4 page table) and creates an `OffsetPageTable`
51/// mapper using the HHDM offset for physical-to-virtual translation.
52///
53/// Must be called after the buddy allocator and HHDM offset are initialized.
54pub fn init(hhdm_offset: u64) {
55    let phys_offset = VirtAddr::new(hhdm_offset);
56    let (level_4_frame, _flags) = Cr3::read();
57    let level_4_phys = level_4_frame.start_address().as_u64();
58    let level_4_virt = phys_offset + level_4_phys;
59
60    // SAFETY: Called once during single-threaded init. The HHDM offset correctly
61    // maps all physical RAM to virtual addresses. CR3 points to a valid page table
62    // set up by Limine.
63    unsafe {
64        let kcr3 = &raw mut KERNEL_CR3;
65        *kcr3 = level_4_frame.start_address();
66        let ready = &raw mut PAGING_READY;
67        *ready = true;
68    }
69
70    log::info!(
71        "Paging initialized: CR3={:#x}, HHDM={:#x}, L4 table @ {:#x}",
72        level_4_phys,
73        hhdm_offset,
74        level_4_virt.as_u64(),
75    );
76}
77
78/// Map all RAM regions from the memory map into the HHDM.
79///
80/// This ensures that every byte of physical RAM is accessible through the
81/// higher-half direct map. Should be called after paging::init.
82/// Fix for VMWare Workstation which doesn't identity-map all RAM by default, causing
83/// the kernel to crash when it tries to access unmapped RAM (e.g. for the buddy allocator's
84/// metadata array). Limine's initial map only covers the first 1GB of RAM, which is not enough
85/// for our 2GB test VM. This function lazily maps any missing RAM regions on
86/// demand using `ensure_identity_map_range()`, which checks if the region is already mapped
87/// before mapping it. This allows the kernel to boot successfully on VMWare Workstation without
88/// requiring changes to the bootloader or Limine configuration.
89/// 
90pub fn map_all_ram(memory_regions: &[crate::boot::entry::MemoryRegion]) {
91    use crate::boot::entry::MemoryKind;
92
93    for region in memory_regions {
94        if matches!(region.kind, MemoryKind::Free | MemoryKind::Reclaim) {
95            log::debug!(
96                "Mapping RAM region to HHDM: phys=0x{:x}..0x{:x}",
97                region.base,
98                region.base + region.size
99            );
100            ensure_identity_map_range(region.base, region.size);
101        }
102    }
103}
104
105/// Map a virtual page to a physical frame with the given flags.
106///
107/// Intermediate page tables are allocated from the buddy allocator as needed.
108pub fn map_page(
109    page: Page<Size4KiB>,
110    frame: X86PhysFrame<Size4KiB>,
111    flags: PageTableFlags,
112) -> Result<(), &'static str> {
113    if !is_initialized() {
114        return Err("Paging not initialized");
115    }
116    let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
117    let (level_4_frame, _) = Cr3::read();
118    let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
119    // SAFETY: level_4_virt points to the active CR3 PML4 via HHDM.
120    let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
121    let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
122    let mut allocator = BuddyFrameAllocator;
123
124    unsafe {
125        mapper
126            .map_to(page, frame, flags, &mut allocator)
127            .map_err(|_| "Failed to map page")?
128            .flush();
129    }
130    Ok(())
131}
132
133/// Unmap a virtual page, returning the physical frame it was mapped to.
134pub fn unmap_page(page: Page<Size4KiB>) -> Result<X86PhysFrame<Size4KiB>, &'static str> {
135    if !is_initialized() {
136        return Err("Paging not initialized");
137    }
138    let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
139    let (level_4_frame, _) = Cr3::read();
140    let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
141    // SAFETY: level_4_virt points to the active CR3 PML4 via HHDM.
142    let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
143    let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
144    let (frame, flush) = mapper.unmap(page).map_err(|_| "Failed to unmap page")?;
145    flush.flush();
146    Ok(frame)
147}
148
149/// Translate a virtual address to its mapped physical address.
150///
151/// Returns `None` if the address is not mapped.
152pub fn translate(addr: VirtAddr) -> Option<PhysAddr> {
153    if !is_initialized() {
154        return None;
155    }
156    let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
157    let (level_4_frame, _) = Cr3::read();
158    let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
159    // SAFETY: level_4_virt points to the active CR3 PML4 via HHDM.
160    let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
161    let mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
162    mapper.translate_addr(addr)
163}
164
165/// Read the current CR3 value (physical address of the active level-4 page table).
166pub fn active_page_table() -> PhysAddr {
167    let (frame, _) = Cr3::read();
168    frame.start_address()
169}
170
171/// Return the physical address of the kernel's level-4 page table.
172///
173/// This is the CR3 value captured at init time — used by `AddressSpace::new_user()`
174/// to clone kernel mappings (PML4 entries 256..512) into new address spaces.
175pub fn kernel_l4_phys() -> PhysAddr {
176    // SAFETY: Written once during single-threaded init, read-only after that.
177    unsafe { *(&raw const KERNEL_CR3) }
178}
179
180/// Ensure a physical address is identity-mapped in the HHDM region.
181///
182/// If the page is not present, it is mapped with Read/Write permissions.
183/// This is used to lazily map MMIO or legacy BIOS regions (like ACPI tables)
184/// that might have been skipped by the bootloader's initial map.
185pub fn ensure_identity_map(phys_addr: u64) {
186    let virt_addr = crate::memory::phys_to_virt(phys_addr);
187    let page = Page::<Size4KiB>::containing_address(VirtAddr::new(virt_addr));
188    let frame = X86PhysFrame::containing_address(PhysAddr::new(phys_addr));
189
190    if translate(VirtAddr::new(virt_addr)).is_none() {
191        log::debug!(
192            "Identity mapping missing page: {:#x} -> {:#x}",
193            phys_addr,
194            virt_addr
195        );
196        // Map as Present | Writable (generic safe default for MMIO/BIOS)
197        let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
198        if let Err(e) = map_page(page, frame, flags) {
199            log::error!("Failed to identity map {:#x}: {}", phys_addr, e);
200        }
201    }
202}
203
204/// Ensure a physical range is mapped in the HHDM region.
205///
206/// Builds a single `OffsetPageTable` for the entire range instead of
207/// one per page, and emits a single summary log instead of per-page noise.
208pub fn ensure_identity_map_range(phys_base: u64, size: u64) {
209    if size == 0 || !is_initialized() {
210        return;
211    }
212
213    let page_size = 4096u64;
214    let start = phys_base & !(page_size - 1);
215    let end = (phys_base.saturating_add(size).saturating_add(page_size - 1)) & !(page_size - 1);
216    if start >= end {
217        return;
218    }
219
220    let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
221    let (level_4_frame, _) = Cr3::read();
222    let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
223
224    // SAFETY: level_4_virt points to the active CR3 PML4 via HHDM.
225    let l4_table = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
226    let mut mapper = unsafe { OffsetPageTable::new(l4_table, phys_offset) };
227    let mut allocator = BuddyFrameAllocator;
228    let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
229
230    let mut mapped_count: u64 = 0;
231    let mut p = start;
232    while p < end {
233        let virt = VirtAddr::new(crate::memory::phys_to_virt(p));
234        // Only map if not already present.
235        if mapper.translate_addr(virt).is_none() {
236            let page = Page::<Size4KiB>::containing_address(virt);
237            let frame = X86PhysFrame::containing_address(PhysAddr::new(p));
238            // SAFETY: frame is a valid physical page; mapper uses HHDM offset.
239            match unsafe { mapper.map_to(page, frame, flags, &mut allocator) } {
240                Ok(flush) => {
241                    flush.flush();
242                    mapped_count += 1;
243                }
244                Err(_) => {
245                    log::error!("ensure_identity_map_range: failed to map {:#x}", p);
246                }
247            }
248        }
249        p = p.saturating_add(page_size);
250    }
251
252    if mapped_count > 0 {
253        log::debug!(
254            "Identity mapped {} pages: phys {:#x}..{:#x}",
255            mapped_count,
256            start,
257            end,
258        );
259    }
260}