strat9_kernel/memory/mod.rs
1// Memory management module
2
3pub mod address_space;
4pub mod block;
5pub mod block_meta;
6pub mod boot_alloc;
7pub mod buddy;
8pub mod cow;
9pub mod frame;
10pub mod heap;
11pub mod mapping_index;
12pub mod ownership;
13pub mod paging;
14pub mod region_cap;
15pub mod userslice;
16pub mod vmalloc;
17pub mod zone;
18
19use crate::{
20 boot::entry::MemoryRegion, capability::CapId, process::get_task_by_pid, sync::IrqDisabledToken,
21};
22use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
23use spin::Once;
24
25/// Higher Half Direct Map offset.
26/// Set by Limine entry (non-zero) or left at 0 for BIOS/identity-mapped boot.
27/// All physical-to-virtual conversions must add this offset.
28static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
29
30/// Store the HHDM offset (call once, early in boot)
31pub fn set_hhdm_offset(offset: u64) {
32 HHDM_OFFSET.store(offset, Ordering::Relaxed);
33}
34
35/// Get the current HHDM offset
36pub fn hhdm_offset() -> u64 {
37 HHDM_OFFSET.load(Ordering::Relaxed)
38}
39
40/// Convert a physical address to a virtual address using the HHDM offset
41#[inline]
42pub fn phys_to_virt(phys: u64) -> u64 {
43 phys.wrapping_add(HHDM_OFFSET.load(Ordering::Relaxed))
44}
45
46/// Convert a virtual address back to a physical address (reverse of phys_to_virt)
47#[inline]
48pub fn virt_to_phys(virt: u64) -> u64 {
49 virt.wrapping_sub(HHDM_OFFSET.load(Ordering::Relaxed))
50}
51
52/// Initialize the memory management subsystem
53pub fn init_memory_manager(memory_regions: &[MemoryRegion]) {
54 buddy::init_buddy_allocator(memory_regions);
55 // Race/corruption diagnostic: register slab lock for E9 LOCK-A/LOCK-R traces.
56 heap::debug_register_slab_trace();
57}
58
59/// Initialize copy-on-write metadata.
60pub fn init_cow_subsystem(_memory_regions: &[MemoryRegion]) {}
61
62static OWNERSHIP_TABLE: Once<OwnershipTable> = Once::new();
63static GLOBAL_MAPPING_INDEX: Once<MappingIndex> = Once::new();
64static MEMORY_REGION_REGISTRY: Once<MemoryRegionRegistry> = Once::new();
65
66// Re-exports
67pub use crate::sync::with_irqs_disabled;
68pub use address_space::{
69 kernel_address_space, AddressSpace, EffectiveMapping, VmaFlags, VmaPageSize, VmaType,
70};
71pub use block::{
72 BlockHandle, BuddyReserved, Exclusive, MappedExclusive, MappedShared, PhysBlock, Released,
73};
74pub use block_meta::{get_block_meta, resolve_handle};
75pub use buddy::{
76 buddy_alloc_fail_counts_snapshot, get_allocator, poison_quarantine_pages_snapshot,
77};
78pub use frame::{
79 block_phys_has_poison_guard, frame_meta_debug_snapshot, get_meta, get_meta_slot,
80 invoke_vtable_on_last_ref, invoke_vtable_on_unmap, meta_generation_matches, meta_guard,
81 AllocError, FrameAllocOptions, FrameAllocator, FrameMeta, FrameMetaVtable, FramePurpose,
82 FreeListLink, MetaSlot, PhysFrame, DEFAULT_FRAME_META_VTABLE, META_SLOT_REFCOUNT_BYTE_OFFSET,
83};
84pub use mapping_index::{MappingIndex, MappingRef};
85pub use ownership::{BlockState, OwnerEntry, OwnerError, OwnershipTable, RemoveRefResult};
86pub use region_cap::{
87 MemoryRegionRegistry, PublicMemoryRegionInfo, RegionCapError, ReleaseRegionResult,
88};
89pub use userslice::{UserSliceError, UserSliceRead, UserSliceReadWrite, UserSliceWrite};
90
91/// Returns the global ownership table used by the memory runtime.
92pub fn ownership_table() -> &'static OwnershipTable {
93 OWNERSHIP_TABLE.call_once(OwnershipTable::new)
94}
95
96/// Returns the global reverse mapping index used by the memory runtime.
97pub fn mapping_index() -> &'static MappingIndex {
98 GLOBAL_MAPPING_INDEX.call_once(MappingIndex::new)
99}
100
101/// Returns the global public memory-region registry.
102pub fn memory_region_registry() -> &'static MemoryRegionRegistry {
103 MEMORY_REGION_REGISTRY.call_once(MemoryRegionRegistry::new)
104}
105
106/// Allocates a fresh internal mapping capability identifier.
107pub fn allocate_mapping_cap_id() -> CapId {
108 CapId::new()
109}
110
111/// Records that `cap_id` now names `handle` in the ownership table.
112pub fn register_mapping_identity(handle: BlockHandle, cap_id: CapId) {
113 match try_register_mapping_identity(handle, cap_id) {
114 Ok(_) | Err(OwnerError::CapAlreadyPresent) => {}
115 Err(error) => {
116 log::warn!(
117 "memory: failed to register block identity cap={} handle={:#x}/{}: {:?}",
118 cap_id.as_u64(),
119 handle.base.as_u64(),
120 handle.order,
121 error
122 );
123 }
124 }
125}
126
127/// Fallible variant of mapping identity registration for transactional callers.
128pub fn try_register_mapping_identity(handle: BlockHandle, cap_id: CapId) -> Result<(), OwnerError> {
129 ownership_table().ensure_ref(handle, cap_id).map(|_| ())
130}
131
132/// Releases a block back to the buddy allocator.
133///
134/// Lifecycle order:
135/// 1. Invoke per-block [`FrameMetaVtable::on_last_ref`] (once, for the head frame) :
136/// signals that the last shared ownership reference has been dropped.
137/// 2. Invoke per-page [`FrameMetaVtable::on_unmap`] hooks (once per constituent 4 KiB page)
138/// : signals that mappings are being torn down.
139/// 3. Return the block to buddy. Poisoned frames ([`meta_guard::POISONED`]) are quarantined
140/// and not recycled ([`poison_quarantine_pages_snapshot`]).
141///
142/// # Hook ordering guarantee
143/// `on_last_ref` and `on_unmap` are called **before** the buddy allocator decides to recycle
144/// or quarantine. Hooks that rely on the frame being recyclable may run pointlessly on
145/// poisoned blocks : they must therefore be idempotent and side-effect-safe.
146pub fn release_owned_block(block: PhysBlock<Released>) {
147 let handle = block.into_handle();
148 with_irqs_disabled(|token| {
149 let frame_phys = handle.base.as_u64();
150 let order = handle.order;
151
152 // 1. on_last_ref : once per block (head frame only).
153 frame::invoke_vtable_on_last_ref(x86_64::PhysAddr::new(frame_phys));
154
155 // 2. on_unmap : once per constituent page.
156 for i in 0..(1u64 << order) {
157 let p = x86_64::PhysAddr::new(frame_phys + i * frame::PAGE_SIZE);
158 frame::invoke_vtable_on_unmap(p);
159 }
160 buddy::free(
161 token,
162 PhysFrame {
163 start_address: handle.base,
164 },
165 order,
166 );
167 });
168}
169
170/// Removes `cap_id` from the ownership table entry associated with `handle`.
171pub fn unregister_mapping_identity(
172 handle: BlockHandle,
173 cap_id: CapId,
174) -> Option<PhysBlock<Released>> {
175 match ownership_table().remove_ref(handle, cap_id) {
176 Ok(RemoveRefResult::Freed(block)) => Some(block),
177 Ok(_) | Err(OwnerError::NotFound) | Err(OwnerError::CapNotFound) => None,
178 Err(error) => {
179 log::warn!(
180 "memory: failed to unregister mapping identity cap={} handle={:#x}/{}: {:?}",
181 cap_id.as_u64(),
182 handle.base.as_u64(),
183 handle.order,
184 error
185 );
186 None
187 }
188 }
189}
190
191/// Revokes every live mapping associated with `cap_id`.
192pub fn revoke_mapping_cap_id(cap_id: CapId) -> usize {
193 let mappings = mapping_index().lookup(cap_id);
194 let mut revoked = 0usize;
195
196 for mapping in mappings {
197 let Some(task) = get_task_by_pid(mapping.pid) else {
198 mapping_index().unregister(cap_id, mapping.pid, mapping.vaddr);
199 continue;
200 };
201
202 let address_space = task.process.address_space_arc();
203 match address_space.unmap_effective_mapping(mapping.vaddr.as_u64()) {
204 Ok(()) => {
205 revoked = revoked.saturating_add(1);
206 }
207 Err(error) => {
208 if address_space
209 .effective_mapping_by_start(mapping.vaddr.as_u64())
210 .is_none()
211 {
212 mapping_index().unregister(cap_id, mapping.pid, mapping.vaddr);
213 } else {
214 log::warn!(
215 "memory: failed to revoke mapping cap={} pid={} vaddr={:#x}: {}",
216 cap_id.as_u64(),
217 mapping.pid,
218 mapping.vaddr.as_u64(),
219 error
220 );
221 }
222 }
223 }
224 }
225
226 if revoked != 0 {
227 crate::arch::x86_64::tlb::shootdown_all();
228 }
229
230 revoked
231}
232
233/// Allocate `2^order` contiguous physical frames (raw, no zeroing).
234///
235/// **Deprecated** : use [`allocate_phys_contiguous`] for DMA / hardware-ring
236/// allocations where physical contiguity is the explicit requirement, or
237/// [`allocate_frame`] for single kernel-data frames. This name remains for
238/// internal callers that pre-date the explicit-intent API.
239#[deprecated(
240 note = "use allocate_phys_contiguous() for DMA/contiguous allocations, \
241 or allocate_frame() for single kernel-data frames"
242)]
243#[inline]
244pub fn allocate_frames(token: &IrqDisabledToken, order: u8) -> Result<PhysFrame, AllocError> {
245 buddy::alloc(token, order)
246}
247
248/// Total pages handed out by [`allocate_phys_contiguous`] (in units of 4 KiB pages, summed across orders).
249///
250/// Incremented on every successful contiguous alloc; never decremented.
251/// Paired with [`CONTIGUOUS_FREE_PAGES`] to derive the live count.
252static CONTIGUOUS_ALLOC_PAGES: AtomicUsize = AtomicUsize::new(0);
253/// Total pages returned via [`free_phys_contiguous`].
254static CONTIGUOUS_FREE_PAGES: AtomicUsize = AtomicUsize::new(0);
255/// Total failed contiguous allocation attempts (fragmentation indicator).
256static CONTIGUOUS_ALLOC_FAIL_COUNT: AtomicUsize = AtomicUsize::new(0);
257
258/// Allocate a physically contiguous block of `2^order` pages.
259///
260/// Use when **physical contiguity** is required: DMA rings, MMIO-adjacent
261/// buffers, hardware tables, copy-on-write multi-page copies, kernel stacks
262/// ([`allocate_kernel_stack_frames`]), etc.
263///
264/// Do not use for general large kernel buffers : prefer [`allocate_kernel_virtual`]
265/// (virtually contiguous, physically fragmented) or [`allocate_frame`] for single pages.
266///
267/// **Telemetry:** increments [`CONTIGUOUS_ALLOC_PAGES`] on success and
268/// [`CONTIGUOUS_ALLOC_FAIL_COUNT`] on failure; [`CONTIGUOUS_FREE_PAGES`] on
269/// [`free_phys_contiguous`]. Those counters include **every** use of this
270/// allocate/free pair (DMA buffers, kernel stacks, COW multi-page blocks,
271/// etc.) : not only hardware DMA. Treat [`phys_contiguous_diag`] as overall
272/// buddy multi-page contiguous traffic.
273#[inline]
274pub(crate) fn allocate_phys_contiguous(
275 token: &IrqDisabledToken,
276 order: u8,
277) -> Result<PhysFrame, AllocError> {
278 match buddy::alloc(token, order) {
279 Ok(frame) => {
280 CONTIGUOUS_ALLOC_PAGES.fetch_add(1usize << order, Ordering::Relaxed);
281 Ok(frame)
282 }
283 Err(e) => {
284 CONTIGUOUS_ALLOC_FAIL_COUNT.fetch_add(1, Ordering::Relaxed);
285 Err(e)
286 }
287 }
288}
289
290/// Free `2^order` contiguous physical frames.
291///
292/// **Deprecated** : use [`free_phys_contiguous`] for blocks returned by
293/// [`allocate_phys_contiguous`], or [`free_frame`] for single frames.
294#[deprecated(
295 note = "use free_phys_contiguous() for blocks from allocate_phys_contiguous, \
296 or free_frame() for single frames"
297)]
298#[inline]
299pub fn free_frames(token: &IrqDisabledToken, frame: PhysFrame, order: u8) {
300 buddy::free(token, frame, order);
301}
302
303/// Free a physically contiguous block previously returned by
304/// [`allocate_phys_contiguous`].
305///
306/// DO NOT USE to free frames from [`allocate_frame`] : use [`free_frame`] instead.
307#[inline]
308pub(crate) fn free_phys_contiguous(token: &IrqDisabledToken, frame: PhysFrame, order: u8) {
309 CONTIGUOUS_FREE_PAGES.fetch_add(1usize << order, Ordering::Relaxed);
310 buddy::free(token, frame, order);
311}
312
313/// Snapshot of contiguous-physical-allocation telemetry.
314///
315/// Covers all [`allocate_phys_contiguous`] / [`free_phys_contiguous`] usage
316/// (DMA-style buffers, kernel stacks, COW multi-page blocks, etc.).
317pub struct PhysContiguousDiag {
318 pub pages_allocated: usize,
319 pub pages_freed: usize,
320 pub pages_live: usize,
321 pub alloc_fail_count: usize,
322}
323
324/// Read current contiguous-allocation telemetry without locking.
325///
326/// Counters are not limited to device DMA; any code path using
327/// [`allocate_phys_contiguous`] contributes (see module docs on that function).
328pub fn phys_contiguous_diag() -> PhysContiguousDiag {
329 let alloc = CONTIGUOUS_ALLOC_PAGES.load(Ordering::Relaxed);
330 let freed = CONTIGUOUS_FREE_PAGES.load(Ordering::Relaxed);
331 PhysContiguousDiag {
332 pages_allocated: alloc,
333 pages_freed: freed,
334 pages_live: alloc.saturating_sub(freed),
335 alloc_fail_count: CONTIGUOUS_ALLOC_FAIL_COUNT.load(Ordering::Relaxed),
336 }
337}
338
339/// Allocate physically contiguous pages for a kernel stack.
340///
341/// Kernel stacks need a contiguous physical backing in the current design
342/// because they are carved as a single block and directly accessed through the
343/// HHDM. This is distinct from DMA intent, so keep a dedicated API.
344#[inline]
345pub fn allocate_kernel_stack_frames(
346 token: &IrqDisabledToken,
347 order: u8,
348) -> Result<PhysFrame, AllocError> {
349 allocate_phys_contiguous(token, order)
350}
351
352/// Free pages previously returned by [`allocate_kernel_stack_frames`].
353#[inline]
354pub fn free_kernel_stack_frames(token: &IrqDisabledToken, frame: PhysFrame, order: u8) {
355 free_phys_contiguous(token, frame, order);
356}
357
358/// Allocate a single **zeroed** physical frame with `KernelData` purpose.
359///
360/// This is the standard allocation path for all kernel-internal frames. It
361/// uses `FrameAllocOptions::new()` (zeroed = true, purpose = KernelData) and
362/// performs the UNUSED → 0 → 1 refcount CAS (Asterinas OSTD pattern).
363///
364/// For page-table node allocation use `BuddyFrameAllocator` (via paging.rs)
365/// or `FrameAllocOptions::new().purpose(FramePurpose::PageTable).allocate()`.
366/// For user-space frames use `FrameAllocOptions::new().purpose(FramePurpose::UserData)`.
367#[inline]
368pub fn allocate_frame(token: &IrqDisabledToken) -> Result<PhysFrame, AllocError> {
369 allocate_frame_for_purpose(token, FramePurpose::KernelData)
370}
371
372/// Allocate a single zeroed 4 KiB frame for an explicit purpose.
373///
374/// This wrapper keeps the default zeroing policy while allowing callers to
375/// select the ownership class that drives buddy migratetype selection.
376#[inline]
377pub fn allocate_frame_for_purpose(
378 token: &IrqDisabledToken,
379 purpose: FramePurpose,
380) -> Result<PhysFrame, AllocError> {
381 FrameAllocOptions::new().purpose(purpose).allocate(token)
382}
383
384/// Allocate a single zeroed 4 KiB frame intended for user-space memory.
385///
386/// The frame is tagged movable, which biases the buddy allocator toward the
387/// zone order intended to preserve scarce low memory for pinned kernel pages.
388#[inline]
389pub fn allocate_user_frame(token: &IrqDisabledToken) -> Result<PhysFrame, AllocError> {
390 allocate_frame_for_purpose(token, FramePurpose::UserData)
391}
392
393/// Free a single physical frame.
394/// Requires an `IrqDisabledToken` proving that IRQs are disabled on the calling CPU.
395/// The caller must ensure that the frame is not currently mapped anywhere and that
396/// the buddy allocator's internal metadata is consistent with the frame's state (e.g. refcount = 0).
397/// Prefer `free_frames()` for multi-frame blocks or when the buddy allocator's internal state may need to be updated.
398/// This raw path is kept for symmetry with `allocate_frames()` and for special cases where the caller manages zeroing and metadata explicitly.
399/// For standard single-frame deallocation, prefer `release_owned_block()` which also handles ownership table updates and safety checks.
400#[inline]
401pub fn free_frame(token: &IrqDisabledToken, frame: PhysFrame) {
402 buddy::free(token, frame, 0);
403}
404
405/// Allocate virtually contiguous kernel memory backed by fragmented physical
406/// pages.
407///
408/// This is the explicit large-allocation API for kernel callers that require a
409/// large virtually contiguous range but not physical contiguity.
410///
411/// Returned pointers are aligned to a **4 KiB** page boundary. For alignment
412/// stricter than one page, do not route through [`GlobalAlloc`] / large
413/// [`Layout`] on this heap : use a dedicated aligned mapping or slab path.
414#[inline]
415#[track_caller]
416pub fn allocate_kernel_virtual(
417 size: usize,
418 token: &IrqDisabledToken,
419) -> Result<*mut u8, vmalloc::VmallocError> {
420 vmalloc::vmalloc(size, token)
421}
422
423/// Free memory previously returned by [`allocate_kernel_virtual`].
424///
425/// Returns `true` if a mapping was released. `false` means nothing was freed
426/// (e.g. pointer not in the vmalloc arena or not a live allocation start).
427#[inline]
428pub fn free_kernel_virtual(ptr: *mut u8, token: &IrqDisabledToken) -> bool {
429 vmalloc::vfree(ptr, token)
430}
431
432/// Allocate a single zeroed 4 KiB frame.
433///
434/// Disables IRQs internally. The frame is zeroed before being returned
435/// (`FrameAllocOptions::new()` defaults to zeroed = true).
436///
437/// This is a convenience wrapper for hardware drivers that need a single
438/// device-accessible page. It carries no DMA-zone or address-range
439/// guarantee : the frame may come from any zone. For strict DMA
440/// requirements (e.g. <16 MiB for ISA DMA), allocate from the DMA zone
441/// directly via `FrameAllocOptions`.
442pub fn allocate_zeroed_frame() -> Option<PhysFrame> {
443 with_irqs_disabled(|token| {
444 FrameAllocOptions::new()
445 .purpose(FramePurpose::KernelData)
446 .allocate(token)
447 .ok()
448 })
449}