Skip to main content

strat9_kernel/memory/
mod.rs

1// Memory management module
2
3pub mod address_space;
4pub mod boot_alloc;
5pub mod buddy;
6pub mod cow;
7pub mod frame;
8pub mod heap;
9pub mod paging;
10pub mod userslice;
11pub mod zone;
12
13use crate::{boot::entry::MemoryRegion, sync::IrqDisabledToken};
14use core::sync::atomic::{AtomicU64, Ordering};
15
16/// Higher Half Direct Map offset.
17/// Set by Limine entry (non-zero) or left at 0 for BIOS/identity-mapped boot.
18/// All physical-to-virtual conversions must add this offset.
19static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
20
21/// Store the HHDM offset (call once, early in boot)
22pub fn set_hhdm_offset(offset: u64) {
23    HHDM_OFFSET.store(offset, Ordering::Relaxed);
24}
25
26/// Get the current HHDM offset
27pub fn hhdm_offset() -> u64 {
28    HHDM_OFFSET.load(Ordering::Relaxed)
29}
30
31/// Convert a physical address to a virtual address using the HHDM offset
32#[inline]
33pub fn phys_to_virt(phys: u64) -> u64 {
34    phys.wrapping_add(HHDM_OFFSET.load(Ordering::Relaxed))
35}
36
37/// Convert a virtual address back to a physical address (reverse of phys_to_virt)
38#[inline]
39pub fn virt_to_phys(virt: u64) -> u64 {
40    virt.wrapping_sub(HHDM_OFFSET.load(Ordering::Relaxed))
41}
42
43/// Initialize the memory management subsystem
44pub fn init_memory_manager(memory_regions: &[MemoryRegion]) {
45    buddy::init_buddy_allocator(memory_regions);
46}
47
48/// Initialize copy-on-write metadata.
49pub fn init_cow_subsystem(_memory_regions: &[MemoryRegion]) {}
50
51// Re-exports
52pub use address_space::{kernel_address_space, AddressSpace, VmaFlags, VmaPageSize, VmaType};
53pub use buddy::get_allocator;
54pub use frame::{AllocError, FrameAllocator, PhysFrame};
55pub use userslice::{UserSliceError, UserSliceRead, UserSliceReadWrite, UserSliceWrite};
56pub use crate::sync::with_irqs_disabled;
57
58/// Allocate `2^order` contiguous physical frames.
59///
60/// Requires an `IrqDisabledToken` proving that IRQs are disabled on the calling CPU,
61/// preventing re-entrant allocation from an interrupt handler on the same lock.
62#[inline]
63pub fn allocate_frames(token: &IrqDisabledToken, order: u8) -> Result<PhysFrame, AllocError> {
64    buddy::alloc(token, order)
65}
66
67/// Free `2^order` contiguous physical frames.
68///
69/// Requires an `IrqDisabledToken` proving that IRQs are disabled on the calling CPU.
70#[inline]
71pub fn free_frames(token: &IrqDisabledToken, frame: PhysFrame, order: u8) {
72    buddy::free(token, frame, order);
73}
74
75/// Allocate a single physical frame.
76#[inline]
77pub fn allocate_frame(token: &IrqDisabledToken) -> Result<PhysFrame, AllocError> {
78    allocate_frames(token, 0)
79}
80
81/// Free a single physical frame.
82#[inline]
83pub fn free_frame(token: &IrqDisabledToken, frame: PhysFrame) {
84    free_frames(token, frame, 0);
85}
86
87/// Allocate a zeroed 4KB frame suitable for DMA operations.
88///
89/// Disables IRQs internally to satisfy the allocator contract.
90pub fn allocate_dma_frame() -> Option<PhysFrame> {
91    with_irqs_disabled(|token| {
92        let frame = allocate_frame(token).ok()?;
93        // Zero the frame
94        let virt = phys_to_virt(frame.start_address.as_u64()) as *mut u8;
95        // SAFETY: frame is freshly allocated and HHDM-mapped; we own it exclusively.
96        unsafe {
97            core::ptr::write_bytes(virt, 0, 4096);
98        }
99        Some(frame)
100    })
101}
102
103// TODO: Implement slab allocator