strat9_kernel/memory/
frame.rs1use crate::{memory::boot_alloc::BootAllocator, sync::IrqDisabledToken};
4use core::{
5 mem, ptr,
6 sync::atomic::{AtomicU32, AtomicU64, AtomicU8, Ordering},
7};
8use x86_64::PhysAddr;
9
10pub const PAGE_SIZE: u64 = 4096;
11pub const FRAME_META_ALIGN: usize = 64;
12pub const FRAME_META_SIZE: usize = 64;
13pub const FRAME_META_LINK_NONE: u64 = u64::MAX;
14
15pub mod frame_flags {
17 pub const ALLOCATED: u32 = 1 << 8;
19 pub const FREE: u32 = 1 << 9;
21 pub const KERNEL: u32 = 1 << 10;
23 pub const USER: u32 = 1 << 11;
25 pub const POISONED: u32 = 1 << 12;
27 pub const COW: u32 = 1 << 0;
29 pub const DLL: u32 = 1 << 1;
31 pub const ANONYMOUS: u32 = 1 << 2;
33}
34
35const FRAME_META_FIELDS_SIZE: usize = 8 + 8 + 4 + 1 + 3 + 4; #[repr(C, align(64))]
43pub struct FrameMeta {
44 pub(crate) next: AtomicU64,
45 pub(crate) prev: AtomicU64,
46 pub(crate) flags: AtomicU32,
47 pub(crate) order: AtomicU8,
48 _reserved0: [u8; 3],
49 pub(crate) refcount: AtomicU32,
50 _cacheline_pad: [u8; FRAME_META_SIZE - FRAME_META_FIELDS_SIZE],
51}
52
53impl FrameMeta {
54
55 pub const fn new() -> Self {
57 Self {
58 next: AtomicU64::new(FRAME_META_LINK_NONE),
59 prev: AtomicU64::new(FRAME_META_LINK_NONE),
60 flags: AtomicU32::new(0),
61 order: AtomicU8::new(0), _reserved0: [0; 3],
63 refcount: AtomicU32::new(0),
64 _cacheline_pad: [0; FRAME_META_SIZE - FRAME_META_FIELDS_SIZE],
65 }
66 }
67
68 #[inline]
69 pub fn next(&self) -> u64 {
70 self.next.load(Ordering::Acquire)
71 }
72
73 #[inline]
74 pub fn set_next(&self, next: u64) {
75 self.next.store(next, Ordering::Release);
76 }
77
78 #[inline]
79 pub fn prev(&self) -> u64 {
80 self.prev.load(Ordering::Acquire)
81 }
82
83 #[inline]
84 pub fn set_prev(&self, prev: u64) {
85 self.prev.store(prev, Ordering::Release);
86 }
87
88 #[inline]
89 pub fn inc_ref(&self) {
90 self.refcount.fetch_add(1, Ordering::Relaxed);
91 }
92
93 #[inline]
94 pub fn dec_ref(&self) -> u32 {
95 self.refcount.fetch_sub(1, Ordering::Release)
96 }
97
98 #[inline]
99 pub fn get_refcount(&self) -> u32 {
100 self.refcount.load(Ordering::Acquire)
101 }
102
103 #[inline]
104 pub fn set_flags(&self, flags: u32) {
105 self.flags.store(flags, Ordering::Release);
106 }
107
108 #[inline]
109 pub fn get_flags(&self) -> u32 {
110 self.flags.load(Ordering::Acquire)
111 }
112
113 #[inline]
114 pub fn get_order(&self) -> u8 {
115 self.order.load(Ordering::Acquire)
116 }
117
118 #[inline]
119 pub fn set_order(&self, order: u8) {
120 self.order.store(order, Ordering::Release);
121 }
122
123 #[inline]
124 pub fn reset_refcount(&self) {
125 self.refcount.store(0, Ordering::Release);
126 }
127
128 #[inline]
129 pub fn is_cow(&self) -> bool {
130 self.get_flags() & frame_flags::COW != 0
131 }
132
133 #[inline]
134 pub fn is_dll(&self) -> bool {
135 self.get_flags() & frame_flags::DLL != 0
136 }
137}
138
139const _: () = {
140 assert!(mem::align_of::<FrameMeta>() == FRAME_META_ALIGN);
141 assert!(mem::size_of::<FrameMeta>() == FRAME_META_SIZE);
142};
143
144
145pub const fn metadata_size_for(ram_size: u64) -> u64 {
151 let frames = (ram_size / PAGE_SIZE) + if ram_size % PAGE_SIZE == 0 { 0 } else { 1 };
152 frames * FRAME_META_SIZE as u64
153}
154
155static METADATA_BASE_VIRT: AtomicU64 = AtomicU64::new(0);
156static METADATA_FRAME_COUNT: AtomicU64 = AtomicU64::new(0);
157
158pub fn init_metadata_array(total_ram: u64, boot_alloc: &mut BootAllocator) {
160 let frame_count = (total_ram / PAGE_SIZE) + if total_ram % PAGE_SIZE == 0 { 0 } else { 1 };
161 if frame_count == 0 {
162 METADATA_BASE_VIRT.store(0, Ordering::Release);
163 METADATA_FRAME_COUNT.store(0, Ordering::Release);
164 return;
165 }
166
167 let bytes = metadata_size_for(total_ram) as usize;
168 let phys = boot_alloc.alloc(bytes, FRAME_META_ALIGN);
169 let virt = crate::memory::phys_to_virt(phys.as_u64()) as *mut FrameMeta;
170
171 for idx in 0..frame_count as usize {
172 unsafe {
175 ptr::write(virt.add(idx), FrameMeta::new());
176 }
177 }
178
179 METADATA_FRAME_COUNT.store(frame_count, Ordering::Release);
180 METADATA_BASE_VIRT.store(virt as u64, Ordering::Release);
181}
182
183pub fn get_meta(phys: PhysAddr) -> &'static FrameMeta {
185 let base = METADATA_BASE_VIRT.load(Ordering::Acquire);
186 let frame_count = METADATA_FRAME_COUNT.load(Ordering::Acquire);
187 assert!(base != 0, "frame metadata array is not initialized");
188
189 let pfn = phys.as_u64() / PAGE_SIZE;
190 assert!(pfn < frame_count, "frame metadata access out of bounds");
191
192 let byte_offset = pfn as usize * FRAME_META_SIZE;
193 unsafe { &*((base as usize + byte_offset) as *const FrameMeta) }
196}
197
198#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
200pub struct PhysFrame {
201 pub start_address: PhysAddr,
202}
203
204impl PhysFrame {
206 pub fn containing_address(addr: PhysAddr) -> Self {
208 PhysFrame {
209 start_address: PhysAddr::new(addr.as_u64() & !0xFFF),
210 }
211 }
212
213 pub fn from_start_address(addr: PhysAddr) -> Result<Self, ()> {
215 if addr.is_aligned(4096u64) {
216 Ok(PhysFrame {
217 start_address: addr,
218 })
219 } else {
220 Err(())
221 }
222 }
223
224 pub fn range_inclusive(start: PhysFrame, end: PhysFrame) -> FrameRangeInclusive {
226 FrameRangeInclusive { start, end }
227 }
228}
229
230pub struct FrameRangeInclusive {
232 pub start: PhysFrame,
233 pub end: PhysFrame,
234}
235
236impl Iterator for FrameRangeInclusive {
238 type Item = PhysFrame;
239
240 fn next(&mut self) -> Option<Self::Item> {
242 if self.start <= self.end {
243 let frame = self.start;
244 self.start.start_address += 4096u64;
245 Some(frame)
246 } else {
247 None
248 }
249 }
250}
251
252#[derive(Debug, Clone, Copy, PartialEq, Eq)]
254pub enum AllocError {
255 OutOfMemory,
257 InvalidOrder,
259 InvalidAddress,
261}
262
263pub trait FrameAllocator {
265 fn alloc(&mut self, order: u8, token: &IrqDisabledToken) -> Result<PhysFrame, AllocError>;
270
271 fn free(&mut self, frame: PhysFrame, order: u8, token: &IrqDisabledToken);
273
274 fn alloc_frame(&mut self, token: &IrqDisabledToken) -> Result<PhysFrame, AllocError> {
276 self.alloc(0, token)
277 }
278}