strat9_kernel/memory/
boot_alloc.rs1use crate::{
4 boot::entry::{MemoryKind, MemoryRegion},
5 memory::phys_to_virt,
6 sync::SpinLock,
7};
8use x86_64::PhysAddr;
9
10const PAGE_SIZE: u64 = 4096;
11pub const MAX_BOOT_ALLOC_REGIONS: usize = 128;
12pub const MAX_PROTECTED_RANGES: usize = 17;
13
14#[derive(Clone, Copy)]
15struct BootRegion {
16 start: u64,
17 end: u64,
18}
19
20impl BootRegion {
21 const fn empty() -> Self {
22 Self { start: 0, end: 0 }
23 }
24
25 #[inline]
26 const fn is_empty(&self) -> bool {
27 self.start >= self.end
28 }
29}
30
31pub struct BootAllocator {
32 regions: [BootRegion; MAX_BOOT_ALLOC_REGIONS],
33 len: usize,
34}
35
36impl BootAllocator {
37 pub const fn new() -> Self {
38 Self {
39 regions: [BootRegion::empty(); MAX_BOOT_ALLOC_REGIONS],
40 len: 0,
41 }
42 }
43
44 pub fn init(&mut self, regions: &[MemoryRegion]) {
45 self.reset();
46
47 for region in regions {
48 if !matches!(region.kind, MemoryKind::Free | MemoryKind::Reclaim) {
49 continue;
50 }
51
52 let start = align_up(region.base, PAGE_SIZE);
53 let end = align_down(region.base.saturating_add(region.size), PAGE_SIZE);
54 if start >= end {
55 continue;
56 }
57
58 self.push_region(BootRegion { start, end });
59 }
60
61 for (base, size) in protected_module_ranges().into_iter().flatten() {
62 if size == 0 {
63 continue;
64 }
65 self.exclude_range(
66 align_down(base, PAGE_SIZE),
67 align_up(base.saturating_add(size), PAGE_SIZE),
68 );
69 }
70 }
71
72 pub fn alloc(&mut self, size: usize, align: usize) -> PhysAddr {
73 self.try_alloc(size, align).unwrap_or_else(|| {
74 panic!(
75 "boot allocator: out of physical memory for size={} align={}",
76 size, align
77 )
78 })
79 }
80
81 pub fn try_alloc(&mut self, size: usize, align: usize) -> Option<PhysAddr> {
82 if size == 0 {
83 return Some(PhysAddr::new(0));
84 }
85
86 let align = normalize_align(align) as u64;
87 let size = align_up(size as u64, PAGE_SIZE);
88
89 for idx in 0..self.len {
90 let region = self.regions[idx];
91 if region.is_empty() {
92 continue;
93 }
94
95 let alloc_start = align_up(region.start, align);
96 let alloc_end = alloc_start.checked_add(size)?;
97 if alloc_end > region.end {
98 continue;
99 }
100
101 self.consume_region(idx, alloc_start, alloc_end);
102 return Some(PhysAddr::new(alloc_start));
103 }
104
105 None
106 }
107
108 pub fn snapshot_free_regions(&self, out: &mut [MemoryRegion]) -> usize {
109 let count = core::cmp::min(self.len, out.len());
110 for (dst, region) in out.iter_mut().zip(self.regions.iter()).take(count) {
111 *dst = MemoryRegion {
112 base: region.start,
113 size: region.end.saturating_sub(region.start),
114 kind: MemoryKind::Free,
115 };
116 }
117 count
118 }
119
120 fn reset(&mut self) {
121 self.regions = [BootRegion::empty(); MAX_BOOT_ALLOC_REGIONS];
122 self.len = 0;
123 }
124
125 fn push_region(&mut self, region: BootRegion) {
126 if region.is_empty() || self.len >= self.regions.len() {
127 return;
128 }
129 self.regions[self.len] = region;
130 self.len += 1;
131 }
132
133 fn exclude_range(&mut self, exclude_start: u64, exclude_end: u64) {
134 if exclude_start >= exclude_end {
135 return;
136 }
137
138 let mut idx = 0usize;
139 while idx < self.len {
140 let region = self.regions[idx];
141 if exclude_end <= region.start || exclude_start >= region.end {
142 idx += 1;
143 continue;
144 }
145
146 if exclude_start <= region.start && exclude_end >= region.end {
147 self.remove_region(idx);
148 continue;
149 }
150
151 if exclude_start <= region.start {
152 self.regions[idx].start = exclude_end.min(region.end);
153 idx += 1;
154 continue;
155 }
156
157 if exclude_end >= region.end {
158 self.regions[idx].end = exclude_start.max(region.start);
159 idx += 1;
160 continue;
161 }
162
163 let right = BootRegion {
164 start: exclude_end,
165 end: region.end,
166 };
167 self.regions[idx].end = exclude_start;
168 if self.len < self.regions.len() {
169 self.insert_region(idx + 1, right);
170 }
171 idx += 2;
172 }
173 }
174
175 fn consume_region(&mut self, idx: usize, alloc_start: u64, alloc_end: u64) {
176 let region = self.regions[idx];
177
178 if alloc_start <= region.start && alloc_end >= region.end {
179 self.remove_region(idx);
180 return;
181 }
182
183 if alloc_start <= region.start {
184 self.regions[idx].start = alloc_end;
185 return;
186 }
187
188 if alloc_end >= region.end {
189 self.regions[idx].end = alloc_start;
190 return;
191 }
192
193 let right = BootRegion {
194 start: alloc_end,
195 end: region.end,
196 };
197 self.regions[idx].end = alloc_start;
198 if self.len < self.regions.len() {
199 self.insert_region(idx + 1, right);
200 }
201 }
202
203 fn insert_region(&mut self, idx: usize, region: BootRegion) {
204 if region.is_empty() || self.len >= self.regions.len() {
205 return;
206 }
207
208 for slot in (idx..self.len).rev() {
209 self.regions[slot + 1] = self.regions[slot];
210 }
211 self.regions[idx] = region;
212 self.len += 1;
213 }
214
215 fn remove_region(&mut self, idx: usize) {
216 if idx >= self.len {
217 return;
218 }
219 for slot in idx..self.len.saturating_sub(1) {
220 self.regions[slot] = self.regions[slot + 1];
221 }
222 if self.len != 0 {
223 self.len -= 1;
224 self.regions[self.len] = BootRegion::empty();
225 }
226 }
227}
228
229static BOOT_ALLOCATOR: SpinLock<BootAllocator> = SpinLock::new(BootAllocator::new());
230
231pub fn init_boot_allocator(regions: &[MemoryRegion]) {
232 BOOT_ALLOCATOR.lock().init(regions);
233}
234
235pub fn get_boot_allocator() -> &'static SpinLock<BootAllocator> {
236 &BOOT_ALLOCATOR
237}
238
239pub fn alloc_bytes(size: usize, align: usize) -> Option<PhysAddr> {
240 BOOT_ALLOCATOR.lock().try_alloc(size, align)
241}
242
243pub fn snapshot_free_regions(out: &mut [MemoryRegion]) -> usize {
244 BOOT_ALLOCATOR.lock().snapshot_free_regions(out)
245}
246
247pub fn seal() {
254 BOOT_ALLOCATOR.lock().reset();
255}
256
257pub fn alloc_stack(size: usize) -> Option<u64> {
258 let phys = alloc_bytes(size, PAGE_SIZE as usize)?;
259 let span = align_up(size as u64, PAGE_SIZE);
260 Some(phys_to_virt(phys.as_u64()).saturating_add(span))
261}
262
263pub(crate) fn protected_module_ranges() -> [Option<(u64, u64)>; MAX_PROTECTED_RANGES] {
264 [
265 crate::boot::limine::fs_ext4_module(),
266 crate::boot::limine::strate_fs_ramfs_module(),
267 crate::boot::limine::init_module(),
268 crate::boot::limine::console_admin_module(),
269 crate::boot::limine::strate_net_module(),
270 crate::boot::limine::strate_bus_module(),
271 crate::boot::limine::dhcp_client_module(),
272 crate::boot::limine::ping_module(),
273 crate::boot::limine::telnetd_module(),
274 crate::boot::limine::udp_tool_module(),
275 crate::boot::limine::strate_wasm_module(),
276 crate::boot::limine::strate_webrtc_module(),
277 crate::boot::limine::hello_wasm_module(),
278 crate::boot::limine::wasm_test_toml_module(),
279 crate::boot::limine::test_syscalls_module(),
280 crate::boot::limine::test_mem_module(),
281 crate::boot::limine::test_mem_stressed_module(),
282 ]
283}
284
285#[inline]
286const fn normalize_align(align: usize) -> usize {
287 let align = if align == 0 { 1 } else { align };
288 let align = if align < PAGE_SIZE as usize {
289 PAGE_SIZE as usize
290 } else {
291 align
292 };
293 if align.is_power_of_two() {
294 align
295 } else {
296 align.next_power_of_two()
297 }
298}
299
300#[inline]
301const fn align_up(value: u64, align: u64) -> u64 {
302 (value + align - 1) & !(align - 1)
303}
304
305#[inline]
306const fn align_down(value: u64, align: u64) -> u64 {
307 value & !(align - 1)
308}