strat9_kernel/memory/
paging.rs1use x86_64::{
9 registers::control::Cr3,
10 structures::paging::{
11 FrameAllocator as X86FrameAllocator, Mapper, OffsetPageTable, Page, PageTable,
12 PageTableFlags, PhysFrame as X86PhysFrame, Size4KiB, Translate,
13 },
14 PhysAddr, VirtAddr,
15};
16
17pub struct BuddyFrameAllocator;
21
22unsafe impl X86FrameAllocator<Size4KiB> for BuddyFrameAllocator {
25 fn allocate_frame(&mut self) -> Option<X86PhysFrame<Size4KiB>> {
27 let token = unsafe { crate::sync::IrqDisabledToken::new_unchecked() };
32 let frame = crate::memory::allocate_frame(&token).ok()?;
33 X86PhysFrame::from_start_address(frame.start_address).ok()
34 }
35}
36
37static mut PAGING_READY: bool = false;
39
40static mut KERNEL_CR3: PhysAddr = PhysAddr::new_truncate(0);
42
43pub fn is_initialized() -> bool {
45 unsafe { *(&raw const PAGING_READY) }
46}
47
48pub fn init(hhdm_offset: u64) {
55 let phys_offset = VirtAddr::new(hhdm_offset);
56 let (level_4_frame, _flags) = Cr3::read();
57 let level_4_phys = level_4_frame.start_address().as_u64();
58 let level_4_virt = phys_offset + level_4_phys;
59
60 unsafe {
64 let kcr3 = &raw mut KERNEL_CR3;
65 *kcr3 = level_4_frame.start_address();
66 let ready = &raw mut PAGING_READY;
67 *ready = true;
68 }
69
70 log::info!(
71 "Paging initialized: CR3={:#x}, HHDM={:#x}, L4 table @ {:#x}",
72 level_4_phys,
73 hhdm_offset,
74 level_4_virt.as_u64(),
75 );
76}
77
78pub fn map_all_ram(memory_regions: &[crate::boot::entry::MemoryRegion]) {
91 use crate::boot::entry::MemoryKind;
92
93 for region in memory_regions {
94 if matches!(region.kind, MemoryKind::Free | MemoryKind::Reclaim) {
95 log::debug!(
96 "Mapping RAM region to HHDM: phys=0x{:x}..0x{:x}",
97 region.base,
98 region.base + region.size
99 );
100 ensure_identity_map_range(region.base, region.size);
101 }
102 }
103}
104
105pub fn map_page(
109 page: Page<Size4KiB>,
110 frame: X86PhysFrame<Size4KiB>,
111 flags: PageTableFlags,
112) -> Result<(), &'static str> {
113 if !is_initialized() {
114 return Err("Paging not initialized");
115 }
116 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
117 let (level_4_frame, _) = Cr3::read();
118 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
119 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
121 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
122 let mut allocator = BuddyFrameAllocator;
123
124 unsafe {
125 mapper
126 .map_to(page, frame, flags, &mut allocator)
127 .map_err(|_| "Failed to map page")?
128 .flush();
129 }
130 Ok(())
131}
132
133pub fn unmap_page(page: Page<Size4KiB>) -> Result<X86PhysFrame<Size4KiB>, &'static str> {
135 if !is_initialized() {
136 return Err("Paging not initialized");
137 }
138 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
139 let (level_4_frame, _) = Cr3::read();
140 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
141 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
143 let mut mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
144 let (frame, flush) = mapper.unmap(page).map_err(|_| "Failed to unmap page")?;
145 flush.flush();
146 Ok(frame)
147}
148
149pub fn translate(addr: VirtAddr) -> Option<PhysAddr> {
153 if !is_initialized() {
154 return None;
155 }
156 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
157 let (level_4_frame, _) = Cr3::read();
158 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
159 let mapper = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
161 let mapper = unsafe { OffsetPageTable::new(mapper, phys_offset) };
162 mapper.translate_addr(addr)
163}
164
165pub fn active_page_table() -> PhysAddr {
167 let (frame, _) = Cr3::read();
168 frame.start_address()
169}
170
171pub fn kernel_l4_phys() -> PhysAddr {
176 unsafe { *(&raw const KERNEL_CR3) }
178}
179
180pub fn ensure_identity_map(phys_addr: u64) {
186 let virt_addr = crate::memory::phys_to_virt(phys_addr);
187 let page = Page::<Size4KiB>::containing_address(VirtAddr::new(virt_addr));
188 let frame = X86PhysFrame::containing_address(PhysAddr::new(phys_addr));
189
190 if translate(VirtAddr::new(virt_addr)).is_none() {
191 log::debug!(
192 "Identity mapping missing page: {:#x} -> {:#x}",
193 phys_addr,
194 virt_addr
195 );
196 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
198 if let Err(e) = map_page(page, frame, flags) {
199 log::error!("Failed to identity map {:#x}: {}", phys_addr, e);
200 }
201 }
202}
203
204pub fn ensure_identity_map_range(phys_base: u64, size: u64) {
209 if size == 0 || !is_initialized() {
210 return;
211 }
212
213 let page_size = 4096u64;
214 let start = phys_base & !(page_size - 1);
215 let end = (phys_base.saturating_add(size).saturating_add(page_size - 1)) & !(page_size - 1);
216 if start >= end {
217 return;
218 }
219
220 let phys_offset = VirtAddr::new(crate::memory::hhdm_offset());
221 let (level_4_frame, _) = Cr3::read();
222 let level_4_virt = phys_offset + level_4_frame.start_address().as_u64();
223
224 let l4_table = unsafe { &mut *level_4_virt.as_mut_ptr::<PageTable>() };
226 let mut mapper = unsafe { OffsetPageTable::new(l4_table, phys_offset) };
227 let mut allocator = BuddyFrameAllocator;
228 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
229
230 let mut mapped_count: u64 = 0;
231 let mut p = start;
232 while p < end {
233 let virt = VirtAddr::new(crate::memory::phys_to_virt(p));
234 if mapper.translate_addr(virt).is_none() {
236 let page = Page::<Size4KiB>::containing_address(virt);
237 let frame = X86PhysFrame::containing_address(PhysAddr::new(p));
238 match unsafe { mapper.map_to(page, frame, flags, &mut allocator) } {
240 Ok(flush) => {
241 flush.flush();
242 mapped_count += 1;
243 }
244 Err(_) => {
245 log::error!("ensure_identity_map_range: failed to map {:#x}", p);
246 }
247 }
248 }
249 p = p.saturating_add(page_size);
250 }
251
252 if mapped_count > 0 {
253 log::debug!(
254 "Identity mapped {} pages: phys {:#x}..{:#x}",
255 mapped_count,
256 start,
257 end,
258 );
259 }
260}