Skip to main content

strat9_kernel/ostd/
mm.rs

1//! Memory management abstraction layer
2//!
3//! Provides safe abstractions for memory operations including:
4//! - Physical and virtual address types
5//! - Memory mapping abstractions (MappedPages)
6//! - Page table management
7//!
8//! Inspired by OSes Theseus MappedPages and Asterinas VM modules.
9
10#![allow(unsafe_code)]
11#![allow(unsafe_op_in_unsafe_fn)]
12
13extern crate alloc;
14
15use core::{marker::PhantomData, ops::Range};
16
17/// Physical address type
18#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
19#[repr(transparent)]
20pub struct PhysAddr(u64);
21
22impl PhysAddr {
23    /// Creates a new physical address
24    pub const fn new(addr: u64) -> Self {
25        Self(addr)
26    }
27
28    /// Creates a null physical address
29    pub const fn null() -> Self {
30        Self(0)
31    }
32
33    /// Returns the raw address value
34    pub const fn as_u64(&self) -> u64 {
35        self.0
36    }
37
38    /// Returns the raw address value as usize
39    pub const fn as_usize(&self) -> usize {
40        self.0 as usize
41    }
42
43    /// Checks if the address is null
44    pub const fn is_null(&self) -> bool {
45        self.0 == 0
46    }
47
48    /// Aligns the address up to the given alignment
49    pub const fn align_up(&self, align: u64) -> Self {
50        Self((self.0 + align - 1) & !(align - 1))
51    }
52
53    /// Aligns the address down to the given alignment
54    pub const fn align_down(&self, align: u64) -> Self {
55        Self(self.0 & !(align - 1))
56    }
57
58    /// Checks if the address is aligned to the given alignment
59    pub const fn is_aligned(&self, align: u64) -> bool {
60        self.0 & (align - 1) == 0
61    }
62
63    /// Adds an offset to the address
64    pub const fn add(&self, offset: u64) -> Self {
65        Self(self.0 + offset)
66    }
67
68    /// Subtracts an offset from the address
69    pub const fn sub(&self, offset: u64) -> Self {
70        Self(self.0 - offset)
71    }
72}
73
74impl From<u64> for PhysAddr {
75    /// Performs the from operation.
76    fn from(addr: u64) -> Self {
77        Self::new(addr)
78    }
79}
80
81impl From<PhysAddr> for u64 {
82    /// Performs the from operation.
83    fn from(addr: PhysAddr) -> u64 {
84        addr.as_u64()
85    }
86}
87
88/// Virtual address type
89#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
90#[repr(transparent)]
91pub struct VirtAddr(u64);
92
93impl VirtAddr {
94    /// Creates a new virtual address
95    pub const fn new(addr: u64) -> Self {
96        Self(addr)
97    }
98
99    /// Creates a null virtual address
100    pub const fn null() -> Self {
101        Self(0)
102    }
103
104    /// Returns the raw address value
105    pub const fn as_u64(&self) -> u64 {
106        self.0
107    }
108
109    /// Returns the raw address value as usize
110    pub const fn as_usize(&self) -> usize {
111        self.0 as usize
112    }
113
114    /// Checks if the address is null
115    pub const fn is_null(&self) -> bool {
116        self.0 == 0
117    }
118
119    /// Aligns the address up to the given alignment
120    pub const fn align_up(&self, align: u64) -> Self {
121        Self((self.0 + align - 1) & !(align - 1))
122    }
123
124    /// Aligns the address down to the given alignment
125    pub const fn align_down(&self, align: u64) -> Self {
126        Self(self.0 & !(align - 1))
127    }
128
129    /// Checks if the address is aligned to the given alignment
130    pub const fn is_aligned(&self, align: u64) -> bool {
131        self.0 & (align - 1) == 0
132    }
133
134    /// Adds an offset to the address
135    pub const fn add(&self, offset: u64) -> Self {
136        Self(self.0 + offset)
137    }
138
139    /// Subtracts an offset from the address
140    pub const fn sub(&self, offset: u64) -> Self {
141        Self(self.0 - offset)
142    }
143}
144
145impl From<u64> for VirtAddr {
146    /// Performs the from operation.
147    fn from(addr: u64) -> Self {
148        Self::new(addr)
149    }
150}
151
152impl From<VirtAddr> for u64 {
153    /// Performs the from operation.
154    fn from(addr: VirtAddr) -> u64 {
155        addr.as_u64()
156    }
157}
158
159/// Page size constant (4KB)
160pub const PAGE_SIZE: usize = 4096;
161
162/// Converts a physical address to a virtual address using HHDM offset
163#[inline]
164pub fn phys_to_virt(phys: PhysAddr) -> VirtAddr {
165    VirtAddr::new(crate::memory::phys_to_virt(phys.as_u64()))
166}
167
168/// Converts a virtual address to a physical address
169#[inline]
170pub fn virt_to_phys(virt: VirtAddr) -> PhysAddr {
171    PhysAddr::new(crate::memory::virt_to_phys(virt.as_u64()))
172}
173
174/// A safely mapped memory region
175///
176/// `MappedPages` represents a contiguous virtual memory mapping to physical frames.
177/// The mapping is automatically unmapped when the `MappedPages` is dropped.
178///
179/// This is inspired by Theseus's MappedPages abstraction.
180pub struct MappedPages {
181    /// Starting virtual address
182    start_vaddr: VirtAddr,
183    /// Size in bytes
184    size: usize,
185    /// Whether this mapping owns the underlying frames
186    owned: bool,
187    /// Marker to prevent Send/Sync (mapping is CPU-local)
188    _marker: PhantomData<*mut ()>,
189}
190
191// SAFETY: MappedPages can be sent between CPUs if explicitly transferred
192unsafe impl Send for MappedPages {}
193
194impl MappedPages {
195    /// Creates a new MappedPages from an existing mapping
196    ///
197    /// # Safety
198    ///
199    /// - The virtual address range must be a valid mapping
200    /// - The caller must ensure the mapping remains valid for the lifetime
201    /// - The size must match the actual mapping size
202    pub unsafe fn new(start_vaddr: VirtAddr, size: usize, owned: bool) -> Self {
203        Self {
204            start_vaddr,
205            size,
206            owned,
207            _marker: PhantomData,
208        }
209    }
210
211    /// Returns the starting virtual address
212    pub fn start_address(&self) -> VirtAddr {
213        self.start_vaddr
214    }
215
216    /// Returns the size in bytes
217    pub fn size(&self) -> usize {
218        self.size
219    }
220
221    /// Returns the ending virtual address (exclusive)
222    pub fn end_address(&self) -> VirtAddr {
223        self.start_vaddr.add(self.size as u64)
224    }
225
226    /// Returns the virtual address range
227    pub fn range(&self) -> Range<VirtAddr> {
228        self.start_vaddr..self.end_address()
229    }
230
231    /// Returns a pointer to the start of the mapping
232    pub fn as_ptr(&self) -> *const u8 {
233        self.start_vaddr.as_usize() as *const u8
234    }
235
236    /// Returns a mutable pointer to the start of the mapping
237    pub fn as_mut_ptr(&mut self) -> *mut u8 {
238        self.start_vaddr.as_usize() as *mut u8
239    }
240
241    /// Reads a value from the mapped memory at the given offset
242    ///
243    /// # Safety
244    ///
245    /// - The offset + size_of::<T>() must be within the mapping
246    /// - The memory must be properly initialized for type T
247    /// - Proper alignment must be ensured
248    pub unsafe fn read<T>(&self, offset: usize) -> Result<T, MapError> {
249        if offset + core::mem::size_of::<T>() > self.size {
250            return Err(MapError::OutOfBounds);
251        }
252        let ptr = self.start_vaddr.as_usize().wrapping_add(offset) as *const T;
253        // SAFETY: Caller guarantees the pointer is valid and properly aligned
254        Ok(ptr.read_volatile())
255    }
256
257    /// Writes a value to the mapped memory at the given offset
258    ///
259    /// # Safety
260    ///
261    /// - The offset + size_of::<T>() must be within the mapping
262    /// - The memory must be writable (not read-only)
263    /// - Proper alignment must be ensured
264    pub unsafe fn write<T>(&mut self, offset: usize, value: T) -> Result<(), MapError> {
265        if offset + core::mem::size_of::<T>() > self.size {
266            return Err(MapError::OutOfBounds);
267        }
268        let ptr = self.start_vaddr.as_usize().wrapping_add(offset) as *mut T;
269        // SAFETY: Caller guarantees the pointer is valid and writable
270        ptr.write_volatile(value);
271        Ok(())
272    }
273
274    /// Returns a slice reference to the mapped memory
275    ///
276    /// # Safety
277    ///
278    /// - The mapping must contain initialized data
279    /// - No other mutable references to this memory can exist
280    pub unsafe fn as_slice(&self, len: usize) -> Result<&[u8], MapError> {
281        if len > self.size {
282            return Err(MapError::OutOfBounds);
283        }
284        Ok(core::slice::from_raw_parts(self.as_ptr(), len))
285    }
286
287    /// Returns a mutable slice reference to the mapped memory
288    ///
289    /// # Safety
290    ///
291    /// - The mapping must be writable
292    /// - No other references to this memory can exist
293    pub unsafe fn as_mut_slice(&mut self, len: usize) -> Result<&mut [u8], MapError> {
294        if len > self.size {
295            return Err(MapError::OutOfBounds);
296        }
297        Ok(core::slice::from_raw_parts_mut(self.as_mut_ptr(), len))
298    }
299
300    /// Converts this MappedPages into an AllocatedPages, consuming the mapping
301    ///
302    /// This transfers ownership of the underlying frames.
303    pub fn into_allocated_pages(self) -> Result<AllocatedPages, MapError> {
304        if !self.owned {
305            return Err(MapError::NotOwner);
306        }
307        let pages = AllocatedPages {
308            start_vaddr: self.start_vaddr,
309            size: self.size,
310        };
311        // Prevent the Drop implementation from running
312        core::mem::forget(self);
313        Ok(pages)
314    }
315}
316
317impl Drop for MappedPages {
318    /// Performs the drop operation.
319    fn drop(&mut self) {
320        if self.owned {
321            // Calculate page count
322            let page_count = (self.size + PAGE_SIZE - 1) / PAGE_SIZE;
323            // We own the mapping and are responsible for unmapping
324            crate::memory::address_space::kernel_address_space()
325                .unmap_region(
326                    self.start_vaddr.as_u64(),
327                    page_count,
328                    crate::memory::address_space::VmaPageSize::Small,
329                )
330                .ok();
331        }
332    }
333}
334
335impl core::fmt::Debug for MappedPages {
336    /// Performs the fmt operation.
337    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
338        f.debug_struct("MappedPages")
339            .field("start", &self.start_vaddr)
340            .field("size", &self.size)
341            .field("owned", &self.owned)
342            .finish()
343    }
344}
345
346/// Allocated pages that can be mapped
347///
348/// Represents virtually allocated pages that own their underlying frames.
349pub struct AllocatedPages {
350    start_vaddr: VirtAddr,
351    size: usize,
352}
353
354impl AllocatedPages {
355    /// Returns the starting virtual address
356    pub fn start_address(&self) -> VirtAddr {
357        self.start_vaddr
358    }
359
360    /// Returns the size in bytes
361    pub fn size(&self) -> usize {
362        self.size
363    }
364
365    /// Returns the number of pages
366    pub fn page_count(&self) -> usize {
367        (self.size + PAGE_SIZE - 1) / PAGE_SIZE
368    }
369}
370
371impl Drop for AllocatedPages {
372    /// Performs the drop operation.
373    fn drop(&mut self) {
374        // Deallocate the frames using the buddy allocator
375        // SAFETY: we own these pages and are responsible for deallocation
376        let phys_addr = virt_to_phys(self.start_vaddr).as_u64();
377        // TODO: implement proper frame deallocation
378        // For now, we just leak the frames to avoid double-free issues
379        let _ = phys_addr;
380        let _ = self.size;
381        // crate::memory::frame::deallocate_frames(phys_addr, self.size);
382    }
383}
384
385/// Memory mapping flags
386#[derive(Debug, Clone, Copy)]
387pub struct MapFlags {
388    /// Page is present (mapped)
389    pub present: bool,
390    /// Page is writable
391    pub writable: bool,
392    /// Page is user-accessible
393    pub user: bool,
394    /// Write-through caching
395    pub write_through: bool,
396    /// Cache disabled
397    pub cache_disabled: bool,
398    /// No-execute (NX)
399    pub no_execute: bool,
400}
401
402impl MapFlags {
403    /// Creates flags for a read-only kernel mapping
404    pub const fn read_only() -> Self {
405        Self {
406            present: true,
407            writable: false,
408            user: false,
409            write_through: false,
410            cache_disabled: false,
411            no_execute: false,
412        }
413    }
414
415    /// Creates flags for a read-write kernel mapping
416    pub const fn read_write() -> Self {
417        Self {
418            present: true,
419            writable: true,
420            user: false,
421            write_through: false,
422            cache_disabled: false,
423            no_execute: false,
424        }
425    }
426
427    /// Creates flags for a user mapping
428    pub const fn user_read_write() -> Self {
429        Self {
430            present: true,
431            writable: true,
432            user: true,
433            write_through: false,
434            cache_disabled: false,
435            no_execute: false,
436        }
437    }
438
439    /// Creates flags for MMIO (device memory)
440    pub const fn mmio() -> Self {
441        Self {
442            present: true,
443            writable: true,
444            user: false,
445            write_through: false,
446            cache_disabled: true,
447            no_execute: true,
448        }
449    }
450}
451
452#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
453pub enum MapError {
454    #[error("address out of bounds")]
455    OutOfBounds,
456    #[error("not owner of mapping")]
457    NotOwner,
458    #[error("address already mapped")]
459    AlreadyMapped,
460    #[error("invalid address")]
461    InvalidAddress,
462    #[error("out of memory")]
463    OutOfMemory,
464    #[error("architecture error: {0}")]
465    ArchError(&'static str),
466}
467
468/// Virtual Memory Address Region (VMAR)
469///
470/// Manages a region of virtual address space, similar to Asterinas VMAR.
471/// Used for process address space management.
472pub struct Vmar {
473    /// Base virtual address
474    base: VirtAddr,
475    /// Size of the region
476    size: usize,
477    /// Child regions
478    children: spin::Mutex<alloc::vec::Vec<VmarChild>>,
479}
480
481struct VmarChild {
482    /// Offset from parent base
483    offset: usize,
484    /// Size of the child region
485    #[allow(dead_code)]
486    size: usize,
487    /// The actual mapping
488    #[allow(dead_code)]
489    mapping: Option<MappedPages>,
490}
491
492impl Vmar {
493    /// Creates a new VMAR
494    pub fn new(base: VirtAddr, size: usize) -> Self {
495        Self {
496            base,
497            size,
498            children: spin::Mutex::new(alloc::vec![]),
499        }
500    }
501
502    /// Returns the base virtual address
503    pub fn base(&self) -> VirtAddr {
504        self.base
505    }
506
507    /// Returns the size of the region
508    pub fn size(&self) -> usize {
509        self.size
510    }
511
512    /// Allocates a new region within this VMAR
513    pub fn alloc(&self, offset: usize, size: usize, flags: MapFlags) -> Result<VirtAddr, MapError> {
514        // TODO: implement proper allocation with conflict detection
515        let vaddr = self.base.add(offset as u64);
516
517        // TODO: map the region with the given flags
518        let _ = flags; // Suppress unused warning
519
520        let mut children = self.children.lock();
521        children.push(VmarChild {
522            offset,
523            size,
524            mapping: None,
525        });
526
527        Ok(vaddr)
528    }
529
530    /// Deallocates a region within this VMAR
531    pub fn dealloc(&self, offset: usize) -> Result<(), MapError> {
532        let mut children = self.children.lock();
533        if let Some(pos) = children.iter().position(|c| c.offset == offset) {
534            children.remove(pos);
535            Ok(())
536        } else {
537            Err(MapError::InvalidAddress)
538        }
539    }
540}
541
542impl core::fmt::Debug for Vmar {
543    /// Performs the fmt operation.
544    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
545        f.debug_struct("Vmar")
546            .field("base", &self.base)
547            .field("size", &self.size)
548            .finish()
549    }
550}
551
552/// TLB flush operation for SMP systems
553///
554/// Flushes TLB entries on all CPUs that may have cached the given virtual address.
555pub fn tlb_flush_virt_addr(vaddr: VirtAddr) {
556    // SAFETY: invlpg is a privileged instruction that invalidates a TLB entry.
557    // This is safe to call in kernel mode.
558    unsafe {
559        core::arch::asm!(
560            "invlpg [{}]",
561            in(reg) vaddr.as_u64(),
562            options(nostack, preserves_flags)
563        );
564    }
565}
566
567/// Flushes the entire TLB on the current CPU
568///
569/// This is more expensive than `tlb_flush_virt_addr` and should be used sparingly.
570pub fn tlb_flush_all() {
571    // SAFETY: writing to CR3 with the same value flushes the TLB (except global pages).
572    // This is safe to call in kernel mode.
573    unsafe {
574        let cr3: u64;
575        core::arch::asm!(
576            "mov {}, cr3",
577            out(reg) cr3,
578            options(nostack, preserves_flags)
579        );
580        core::arch::asm!(
581            "mov cr3, {}",
582            in(reg) cr3,
583            options(nostack)
584        );
585    }
586}