1use crate::{
4 boot::entry::{MemoryKind, MemoryRegion},
5 memory::phys_to_virt,
6 serial_println,
7 sync::SpinLock,
8};
9use x86_64::PhysAddr;
10
11const PAGE_SIZE: u64 = 4096;
12pub const MAX_BOOT_ALLOC_REGIONS: usize = 512;
13pub const MAX_PROTECTED_RANGES: usize = 32;
14
15#[derive(Clone, Copy)]
16struct BootRegion {
17 start: u64,
18 end: u64,
19}
20
21impl BootRegion {
22 const fn empty() -> Self {
23 Self { start: 0, end: 0 }
24 }
25
26 #[inline]
27 const fn is_empty(&self) -> bool {
28 self.start >= self.end
29 }
30}
31
32pub struct BootAllocator {
33 regions: [BootRegion; MAX_BOOT_ALLOC_REGIONS],
34 len: usize,
35 accessible_limit: u64,
36}
37
38#[derive(Clone, Copy, Debug, Default)]
39pub struct BootAllocStats {
40 pub region_count: usize,
41 pub total_free_bytes: u64,
42 pub largest_region_bytes: u64,
43 pub accessible_limit: u64,
44}
45
46impl BootAllocator {
47 pub const fn new() -> Self {
48 Self {
49 regions: [BootRegion::empty(); MAX_BOOT_ALLOC_REGIONS],
50 len: 0,
51 accessible_limit: 0,
52 }
53 }
54
55 pub fn init(&mut self, regions: &[MemoryRegion]) {
56 self.reset();
57
58 for region in regions {
59 if !matches!(region.kind, MemoryKind::Free | MemoryKind::Reclaim) {
60 continue;
61 }
62
63 let start = align_up(region.base, PAGE_SIZE);
64 let end = align_down(region.base.saturating_add(region.size), PAGE_SIZE);
65 if start >= end {
66 continue;
67 }
68
69 self.push_region(BootRegion { start, end });
70 }
71
72 self.normalize_regions();
73
74 for (base, size) in protected_ranges_snapshot().into_iter().flatten() {
75 if size == 0 {
76 continue;
77 }
78 self.exclude_range(
79 align_down(base, PAGE_SIZE),
80 align_up(base.saturating_add(size), PAGE_SIZE),
81 );
82 }
83
84 self.normalize_regions();
85 self.rebuild_accessible_limit();
86 }
87
88 pub fn alloc(&mut self, size: usize, align: usize) -> PhysAddr {
89 self.try_alloc(size, align).unwrap_or_else(|| {
90 panic!(
91 "boot allocator: out of physical memory for size={} align={}",
92 size, align
93 )
94 })
95 }
96
97 pub fn try_alloc(&mut self, size: usize, align: usize) -> Option<PhysAddr> {
98 if size == 0 {
99 return Some(PhysAddr::new(0));
100 }
101
102 let align = normalize_align(align) as u64;
103 let size = align_up(size as u64, PAGE_SIZE);
104
105 for idx in 0..self.len {
106 let region = self.regions[idx];
107 if region.is_empty() {
108 continue;
109 }
110
111 let alloc_start = align_up(region.start, align);
112 let alloc_end = alloc_start.checked_add(size)?;
113 if alloc_end > region.end {
114 continue;
115 }
116
117 self.consume_region(idx, alloc_start, alloc_end);
118 return Some(PhysAddr::new(alloc_start));
119 }
120
121 let stats = self.stats();
122 serial_println!(
123 "[boot_alloc] try_alloc failed: requested={} aligned={} largest_region={} regions={} total_free={}",
124 size as usize,
125 align as usize,
126 stats.largest_region_bytes as usize,
127 stats.region_count,
128 stats.total_free_bytes as usize
129 );
130 None
131 }
132
133 pub fn try_alloc_accessible(&mut self, size: usize, align: usize) -> Option<PhysAddr> {
134 if size == 0 {
135 return Some(PhysAddr::new(0));
136 }
137
138 let align = normalize_align(align) as u64;
139 let size = align_up(size as u64, PAGE_SIZE);
140
141 for idx in 0..self.len {
142 let region = self.regions[idx];
143 if region.is_empty() {
144 continue;
145 }
146
147 let alloc_start = align_up(region.start, align);
148 let alloc_end = alloc_start.checked_add(size)?;
149 if alloc_end > region.end {
150 continue;
151 }
152
153 if self.accessible_limit != 0 && alloc_end > self.accessible_limit {
154 continue;
155 }
156
157 if !crate::memory::paging::is_hhdm_range_mapped_now(alloc_start, size) {
158 continue;
159 }
160
161 self.consume_region(idx, alloc_start, alloc_end);
162 return Some(PhysAddr::new(alloc_start));
163 }
164
165 let stats = self.stats();
166 serial_println!(
167 "[boot_alloc] try_alloc_accessible failed: requested={} aligned={} largest_region={} regions={} total_free={} accessible_limit={:#x}",
168 size as usize,
169 align as usize,
170 stats.largest_region_bytes as usize,
171 stats.region_count,
172 stats.total_free_bytes as usize
173 ,stats.accessible_limit
174 );
175 None
176 }
177
178 pub fn snapshot_free_regions(&self, out: &mut [MemoryRegion]) -> usize {
179 let count = core::cmp::min(self.len, out.len());
180 for (dst, region) in out.iter_mut().zip(self.regions.iter()).take(count) {
181 *dst = MemoryRegion {
182 base: region.start,
183 size: region.end.saturating_sub(region.start),
184 kind: MemoryKind::Free,
185 };
186 }
187 count
188 }
189
190 fn reset(&mut self) {
191 self.regions = [BootRegion::empty(); MAX_BOOT_ALLOC_REGIONS];
192 self.len = 0;
193 self.accessible_limit = 0;
194 }
195
196 fn push_region(&mut self, region: BootRegion) {
197 if region.is_empty() || self.len >= self.regions.len() {
198 return;
199 }
200 self.regions[self.len] = region;
201 self.len += 1;
202 }
203
204 fn exclude_range(&mut self, exclude_start: u64, exclude_end: u64) {
205 if exclude_start >= exclude_end {
206 return;
207 }
208
209 let mut idx = 0usize;
210 while idx < self.len {
211 let region = self.regions[idx];
212 if exclude_end <= region.start || exclude_start >= region.end {
213 idx += 1;
214 continue;
215 }
216
217 if exclude_start <= region.start && exclude_end >= region.end {
218 self.remove_region(idx);
219 continue;
220 }
221
222 if exclude_start <= region.start {
223 self.regions[idx].start = exclude_end.min(region.end);
224 idx += 1;
225 continue;
226 }
227
228 if exclude_end >= region.end {
229 self.regions[idx].end = exclude_start.max(region.start);
230 idx += 1;
231 continue;
232 }
233
234 let right = BootRegion {
235 start: exclude_end,
236 end: region.end,
237 };
238 self.regions[idx].end = exclude_start;
239 if self.len < self.regions.len() {
240 self.insert_region(idx + 1, right);
241 }
242 idx += 2;
243 }
244
245 self.normalize_regions();
246 }
247
248 fn consume_region(&mut self, idx: usize, alloc_start: u64, alloc_end: u64) {
249 let region = self.regions[idx];
250
251 if alloc_start <= region.start && alloc_end >= region.end {
252 self.remove_region(idx);
253 self.rebuild_accessible_limit();
254 return;
255 }
256
257 if alloc_start <= region.start {
258 self.regions[idx].start = alloc_end;
259 self.rebuild_accessible_limit();
260 return;
261 }
262
263 if alloc_end >= region.end {
264 self.regions[idx].end = alloc_start;
265 self.rebuild_accessible_limit();
266 return;
267 }
268
269 let right = BootRegion {
270 start: alloc_end,
271 end: region.end,
272 };
273 self.regions[idx].end = alloc_start;
274 if self.len < self.regions.len() {
275 self.insert_region(idx + 1, right);
276 }
277
278 self.normalize_regions();
279 self.rebuild_accessible_limit();
280 }
281
282 fn insert_region(&mut self, idx: usize, region: BootRegion) {
283 if region.is_empty() || self.len >= self.regions.len() {
284 return;
285 }
286
287 for slot in (idx..self.len).rev() {
288 self.regions[slot + 1] = self.regions[slot];
289 }
290 self.regions[idx] = region;
291 self.len += 1;
292 }
293
294 fn remove_region(&mut self, idx: usize) {
295 if idx >= self.len {
296 return;
297 }
298 for slot in idx..self.len.saturating_sub(1) {
299 self.regions[slot] = self.regions[slot + 1];
300 }
301 if self.len != 0 {
302 self.len -= 1;
303 self.regions[self.len] = BootRegion::empty();
304 }
305 }
306
307 fn normalize_regions(&mut self) {
308 if self.len <= 1 {
309 self.rebuild_accessible_limit();
310 return;
311 }
312
313 for i in 1..self.len {
314 let cur = self.regions[i];
315 let mut j = i;
316 while j > 0 && self.regions[j - 1].start > cur.start {
317 self.regions[j] = self.regions[j - 1];
318 j -= 1;
319 }
320 self.regions[j] = cur;
321 }
322
323 let mut write = 0usize;
324 for read in 0..self.len {
325 let cur = self.regions[read];
326 if cur.is_empty() {
327 continue;
328 }
329 if write == 0 {
330 self.regions[write] = cur;
331 write += 1;
332 continue;
333 }
334 let prev = self.regions[write - 1];
335 if cur.start <= prev.end {
336 self.regions[write - 1].end = prev.end.max(cur.end);
337 } else {
338 self.regions[write] = cur;
339 write += 1;
340 }
341 }
342
343 for slot in write..self.regions.len() {
344 self.regions[slot] = BootRegion::empty();
345 }
346 self.len = write;
347 self.rebuild_accessible_limit();
348 }
349
350 fn rebuild_accessible_limit(&mut self) {
352 let mut limit = 0u64;
353 for region in self.regions.iter().take(self.len).copied() {
354 limit = limit.max(self.accessible_prefix_end(region));
355 }
356 self.accessible_limit = limit;
357 }
358
359 fn accessible_prefix_end(&self, region: BootRegion) -> u64 {
361 if region.is_empty() {
362 return 0;
363 }
364
365 let total_pages = ((region.end - region.start) / PAGE_SIZE) as usize;
366 if total_pages == 0 {
367 return 0;
368 }
369
370 let mut low = 0usize;
371 let mut high = total_pages;
372 while low < high {
373 let mid = (low + high).div_ceil(2);
374 let size = mid as u64 * PAGE_SIZE;
375 if crate::memory::paging::is_hhdm_range_mapped_now(region.start, size) {
376 low = mid;
377 } else {
378 high = mid - 1;
379 }
380 }
381
382 region.start + low as u64 * PAGE_SIZE
383 }
384
385 pub fn stats(&self) -> BootAllocStats {
386 let mut total = 0u64;
387 let mut largest = 0u64;
388 for region in self.regions.iter().take(self.len) {
389 let size = region.end.saturating_sub(region.start);
390 total = total.saturating_add(size);
391 largest = largest.max(size);
392 }
393 BootAllocStats {
394 region_count: self.len,
395 total_free_bytes: total,
396 largest_region_bytes: largest,
397 accessible_limit: self.accessible_limit,
398 }
399 }
400}
401
402static BOOT_ALLOCATOR: SpinLock<BootAllocator> = SpinLock::new(BootAllocator::new());
403static PROTECTED_RANGES: SpinLock<[Option<(u64, u64)>; MAX_PROTECTED_RANGES]> =
404 SpinLock::new([None; MAX_PROTECTED_RANGES]);
405
406pub fn init_boot_allocator(regions: &[MemoryRegion]) {
407 BOOT_ALLOCATOR.lock().init(regions);
408}
409
410pub fn get_boot_allocator() -> &'static SpinLock<BootAllocator> {
411 &BOOT_ALLOCATOR
412}
413
414pub fn boot_allocator_stats() -> BootAllocStats {
415 BOOT_ALLOCATOR.lock().stats()
416}
417
418pub fn alloc_bytes(size: usize, align: usize) -> Option<PhysAddr> {
419 BOOT_ALLOCATOR.lock().try_alloc(size, align)
420}
421
422pub fn alloc_bytes_accessible(size: usize, align: usize) -> Option<PhysAddr> {
423 BOOT_ALLOCATOR.lock().try_alloc_accessible(size, align)
424}
425
426pub fn snapshot_free_regions(out: &mut [MemoryRegion]) -> usize {
427 BOOT_ALLOCATOR.lock().snapshot_free_regions(out)
428}
429
430pub fn seal() {
437 BOOT_ALLOCATOR.lock().reset();
438}
439
440pub fn alloc_stack(size: usize) -> Option<u64> {
441 let phys = alloc_bytes(size, PAGE_SIZE as usize)?;
442 let span = align_up(size as u64, PAGE_SIZE);
443 Some(phys_to_virt(phys.as_u64()).saturating_add(span))
444}
445
446pub fn set_protected_ranges(ranges: &[Option<(u64, u64)>]) {
447 let mut protected = PROTECTED_RANGES.lock();
448 *protected = [None; MAX_PROTECTED_RANGES];
449 for (dst, src) in protected.iter_mut().zip(ranges.iter().copied()) {
450 *dst = src;
451 }
452}
453
454pub fn reset_protected_ranges() {
455 *PROTECTED_RANGES.lock() = [None; MAX_PROTECTED_RANGES];
456}
457
458pub(crate) fn protected_ranges_snapshot() -> [Option<(u64, u64)>; MAX_PROTECTED_RANGES] {
459 *PROTECTED_RANGES.lock()
460}
461
462#[inline]
463const fn normalize_align(align: usize) -> usize {
464 let align = if align == 0 { 1 } else { align };
465 let align = if align < PAGE_SIZE as usize {
466 PAGE_SIZE as usize
467 } else {
468 align
469 };
470 if align.is_power_of_two() {
471 align
472 } else {
473 align.next_power_of_two()
474 }
475}
476
477#[inline]
478const fn align_up(value: u64, align: u64) -> u64 {
479 (value + align - 1) & !(align - 1)
480}
481
482#[inline]
483const fn align_down(value: u64, align: u64) -> u64 {
484 value & !(align - 1)
485}