Skip to main content

strat9_kernel/memory/
frame.rs

1// Physical frame allocator abstraction
2
3use crate::{memory::boot_alloc::BootAllocator, sync::IrqDisabledToken};
4use core::{
5    mem, ptr,
6    sync::atomic::{AtomicU32, AtomicU64, AtomicU8, Ordering},
7};
8use x86_64::PhysAddr;
9
10pub const PAGE_SIZE: u64 = 4096;
11pub const FRAME_META_ALIGN: usize = 64;
12pub const FRAME_META_SIZE: usize = 64;
13pub const FRAME_META_LINK_NONE: u64 = u64::MAX;
14
15/// Persistent flags stored in [`FrameMeta`].
16pub mod frame_flags {
17    /// La frame est allouée.
18    pub const ALLOCATED: u32 = 1 << 8;
19    /// La frame est libre.
20    pub const FREE: u32 = 1 << 9;
21    /// La frame est réservée au noyau.
22    pub const KERNEL: u32 = 1 << 10;
23    /// La frame appartient à l'espace utilisateur.
24    pub const USER: u32 = 1 << 11;
25    /// La frame est empoisonnée et ne doit plus être recyclée telle quelle.
26    pub const POISONED: u32 = 1 << 12;
27    /// Frame éligible au copy-on-write.
28    pub const COW: u32 = 1 << 0;
29    /// Frame partagée de type DLL, jamais COW.
30    pub const DLL: u32 = 1 << 1;
31    /// Frame anonyme.
32    pub const ANONYMOUS: u32 = 1 << 2;
33}
34
35/// Bytes used by the named fields of [`FrameMeta`] before the padding.
36const FRAME_META_FIELDS_SIZE: usize = 8 + 8 + 4 + 1 + 3 + 4; // next+prev+flags+order+_reserved0+refcount
37
38
39/// Intriside metadata for a physical frame.
40/// - 64 bytes (one cache line) for efficient atomic access and to avoid false sharing.
41
42#[repr(C, align(64))]
43pub struct FrameMeta {
44    pub(crate) next: AtomicU64,
45    pub(crate) prev: AtomicU64,
46    pub(crate) flags: AtomicU32,
47    pub(crate) order: AtomicU8,
48    _reserved0: [u8; 3],
49    pub(crate) refcount: AtomicU32,
50    _cacheline_pad: [u8; FRAME_META_SIZE - FRAME_META_FIELDS_SIZE],
51}
52
53impl FrameMeta {
54    
55    /// Create emplty metadata ready to be initialized by the boot allocator.
56    pub const fn new() -> Self {
57        Self {
58            next: AtomicU64::new(FRAME_META_LINK_NONE),
59            prev: AtomicU64::new(FRAME_META_LINK_NONE),
60            flags: AtomicU32::new(0),
61            order: AtomicU8::new(0),    /// 
62            _reserved0: [0; 3],
63            refcount: AtomicU32::new(0),
64            _cacheline_pad: [0; FRAME_META_SIZE - FRAME_META_FIELDS_SIZE],
65        }
66    }
67
68    #[inline]
69    pub fn next(&self) -> u64 {
70        self.next.load(Ordering::Acquire)
71    }
72
73    #[inline]
74    pub fn set_next(&self, next: u64) {
75        self.next.store(next, Ordering::Release);
76    }
77
78    #[inline]
79    pub fn prev(&self) -> u64 {
80        self.prev.load(Ordering::Acquire)
81    }
82
83    #[inline]
84    pub fn set_prev(&self, prev: u64) {
85        self.prev.store(prev, Ordering::Release);
86    }
87
88    #[inline]
89    pub fn inc_ref(&self) {
90        self.refcount.fetch_add(1, Ordering::Relaxed);
91    }
92
93    #[inline]
94    pub fn dec_ref(&self) -> u32 {
95        self.refcount.fetch_sub(1, Ordering::Release)
96    }
97
98    #[inline]
99    pub fn get_refcount(&self) -> u32 {
100        self.refcount.load(Ordering::Acquire)
101    }
102
103    #[inline]
104    pub fn set_flags(&self, flags: u32) {
105        self.flags.store(flags, Ordering::Release);
106    }
107
108    #[inline]
109    pub fn get_flags(&self) -> u32 {
110        self.flags.load(Ordering::Acquire)
111    }
112
113    #[inline]
114    pub fn get_order(&self) -> u8 {
115        self.order.load(Ordering::Acquire)
116    }
117
118    #[inline]
119    pub fn set_order(&self, order: u8) {
120        self.order.store(order, Ordering::Release);
121    }
122
123    #[inline]
124    pub fn reset_refcount(&self) {
125        self.refcount.store(0, Ordering::Release);
126    }
127
128    #[inline]
129    pub fn is_cow(&self) -> bool {
130        self.get_flags() & frame_flags::COW != 0
131    }
132
133    #[inline]
134    pub fn is_dll(&self) -> bool {
135        self.get_flags() & frame_flags::DLL != 0
136    }
137}
138
139const _: () = {
140    assert!(mem::align_of::<FrameMeta>() == FRAME_META_ALIGN);
141    assert!(mem::size_of::<FrameMeta>() == FRAME_META_SIZE);
142};
143
144
145/// The metadata array size for `ram_size` bytes, rounded up to the nearest page since each frame 
146/// has a dedicated metadata entry.
147
148/// @param ram_size Total RAM size to be covered by the metadata (in bytes).
149/// 
150pub const fn metadata_size_for(ram_size: u64) -> u64 {
151    let frames = (ram_size / PAGE_SIZE) + if ram_size % PAGE_SIZE == 0 { 0 } else { 1 };
152    frames * FRAME_META_SIZE as u64
153}
154
155static METADATA_BASE_VIRT: AtomicU64 = AtomicU64::new(0);
156static METADATA_FRAME_COUNT: AtomicU64 = AtomicU64::new(0);
157
158/// Initialize the global metadata array for all physical frames.
159pub fn init_metadata_array(total_ram: u64, boot_alloc: &mut BootAllocator) {
160    let frame_count = (total_ram / PAGE_SIZE) + if total_ram % PAGE_SIZE == 0 { 0 } else { 1 };
161    if frame_count == 0 {
162        METADATA_BASE_VIRT.store(0, Ordering::Release);
163        METADATA_FRAME_COUNT.store(0, Ordering::Release);
164        return;
165    }
166
167    let bytes = metadata_size_for(total_ram) as usize;
168    let phys = boot_alloc.alloc(bytes, FRAME_META_ALIGN);
169    let virt = crate::memory::phys_to_virt(phys.as_u64()) as *mut FrameMeta;
170
171    for idx in 0..frame_count as usize {
172        // SAFETY: le bloc a été réservé par le boot allocator avec un alignement
173        // compatible `FrameMeta` et une taille suffisante pour tout le tableau.
174        unsafe {
175            ptr::write(virt.add(idx), FrameMeta::new());
176        }
177    }
178
179    METADATA_FRAME_COUNT.store(frame_count, Ordering::Release);
180    METADATA_BASE_VIRT.store(virt as u64, Ordering::Release);
181}
182
183/// Get the metadata for a given physical frame.
184pub fn get_meta(phys: PhysAddr) -> &'static FrameMeta {
185    let base = METADATA_BASE_VIRT.load(Ordering::Acquire);
186    let frame_count = METADATA_FRAME_COUNT.load(Ordering::Acquire);
187    assert!(base != 0, "frame metadata array is not initialized");
188
189    let pfn = phys.as_u64() / PAGE_SIZE;
190    assert!(pfn < frame_count, "frame metadata access out of bounds");
191
192    let byte_offset = pfn as usize * FRAME_META_SIZE;
193    // SAFETY: le tableau global couvre au moins `frame_count` entrées et reste
194    // vivant pendant toute la durée du noyau.
195    unsafe { &*((base as usize + byte_offset) as *const FrameMeta) }
196}
197
198/// Physical frame (4KB aligned physical memory)
199#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
200pub struct PhysFrame {
201    pub start_address: PhysAddr,
202}
203
204/// Performs the phys frame containing address operation.
205impl PhysFrame {
206    /// Create a PhysFrame containing the given physical address
207    pub fn containing_address(addr: PhysAddr) -> Self {
208        PhysFrame {
209            start_address: PhysAddr::new(addr.as_u64() & !0xFFF),
210        }
211    }
212
213    /// Create a PhysFrame from a 4KB-aligned address
214    pub fn from_start_address(addr: PhysAddr) -> Result<Self, ()> {
215        if addr.is_aligned(4096u64) {
216            Ok(PhysFrame {
217                start_address: addr,
218            })
219        } else {
220            Err(())
221        }
222    }
223
224    /// Create an inclusive range of frames
225    pub fn range_inclusive(start: PhysFrame, end: PhysFrame) -> FrameRangeInclusive {
226        FrameRangeInclusive { start, end }
227    }
228}
229
230/// Iterator over an inclusive range of physical frames
231pub struct FrameRangeInclusive {
232    pub start: PhysFrame,
233    pub end: PhysFrame,
234}
235
236/// Performs the iterator operation for FrameRangeInclusive.
237impl Iterator for FrameRangeInclusive {
238    type Item = PhysFrame;
239
240    /// Performs the next operation.
241    fn next(&mut self) -> Option<Self::Item> {
242        if self.start <= self.end {
243            let frame = self.start;
244            self.start.start_address += 4096u64;
245            Some(frame)
246        } else {
247            None
248        }
249    }
250}
251
252/// Frame allocation errors
253#[derive(Debug, Clone, Copy, PartialEq, Eq)]
254pub enum AllocError {
255    /// No memory available
256    OutOfMemory,
257    /// Invalid order (> MAX_ORDER)
258    InvalidOrder,
259    /// Invalid address alignment
260    InvalidAddress,
261}
262
263/// Frame allocator trait
264pub trait FrameAllocator {
265    /// Allocate `2^order` contiguous frames.
266    ///
267    /// Le token interdit les appels depuis un contexte où le verrou global de
268    /// l'allocateur pourrait être ré-entré par interruption.
269    fn alloc(&mut self, order: u8, token: &IrqDisabledToken) -> Result<PhysFrame, AllocError>;
270
271    /// Free `2^order` contiguous frames starting at frame.
272    fn free(&mut self, frame: PhysFrame, order: u8, token: &IrqDisabledToken);
273
274    /// Allocate a single frame (convenience method)
275    fn alloc_frame(&mut self, token: &IrqDisabledToken) -> Result<PhysFrame, AllocError> {
276        self.alloc(0, token)
277    }
278}