Skip to main content

strat9_kernel/memory/
heap.rs

1// Heap allocator: slab sub-allocator + buddy fallback for large objects.
2//
3// Small allocations (effective size ≤ 2048 B) come from per-size-class slab
4// free lists.  Each slab class draws whole pages from the buddy allocator and
5// carves them into fixed-size blocks.  Freed blocks return to the slab free
6// list, so the buddy's page counter stabilises after warm-up instead of
7// growing on every tiny allocation.
8//
9// Large allocations (> 2048 B) go directly to the buddy allocator, exactly
10// as before.
11//
12// Lock ordering: SLAB_ALLOC (outer) may call the frame-allocation helpers.
13// Those helpers can hit a CPU-local cache (no global buddy lock) or fall back
14// to the global buddy lock as needed.
15
16use crate::{
17    memory::{self, frame::PhysFrame, zone::MAX_ORDER},
18    sync::SpinLock,
19};
20use core::{
21    alloc::{GlobalAlloc, Layout},
22    ptr,
23};
24use x86_64::PhysAddr;
25
26// ---------------------------------------------------------------------------
27// Slab size classes
28// ---------------------------------------------------------------------------
29
30/// Power-of-two block sizes handled by the slab allocator.
31const SLAB_SIZES: [usize; 9] = [8, 16, 32, 64, 128, 256, 512, 1024, 2048];
32const NUM_SLABS: usize = SLAB_SIZES.len();
33/// Allocations with effective size above this threshold bypass the slab.
34const MAX_SLAB_SIZE: usize = 2048;
35
36// =============================================================================
37// CRITICAL: Slab corruption detection
38//
39// Set HEAP_POISON_ENABLED to true during debugging of heap-corruption crashes.
40// When enabled:
41//   - Every block carved by refill() is filled with POISON_BYTE in bytes [8..N-4]
42//     and stamped with SLAB_CANARY in the last 4 bytes.
43//   - dealloc_block() restores the canary and re-poisons before linking.
44//   - alloc_block() verifies poison and canary before handing the block out;
45//     a mismatch is logged immediately via serial_println! (non-allocating).
46//
47// This detects:
48//   - Use-after-free: a write to a freed slab block overwrites poison bytes.
49//   - Buffer overflow: a write past the end overwrites the canary or the next
50//     block's free-list pointer.
51//
52// Cost: one memset + canary write per alloc/dealloc for slab classes.
53// =============================================================================
54const HEAP_POISON_ENABLED: bool = true;
55/// Byte pattern written to the body of freed slab blocks.
56const POISON_BYTE: u8 = 0xDE;
57/// Canary word placed at the last 4 bytes of each slab block.
58const SLAB_CANARY: u32 = 0xDEAD_BEEF;
59
60// ---------------------------------------------------------------------------
61// SlabState
62// ---------------------------------------------------------------------------
63
64/// Per-size-class free lists.  Each element is the head of an intrusive
65/// singly-linked list stored *in* the free blocks themselves.
66struct SlabState {
67    /// `free_lists[i]` is the head of the free list for `SLAB_SIZES[i]`.
68    free_lists: [*mut u8; NUM_SLABS],
69}
70
71// SAFETY: protected exclusively through `SLAB_ALLOC: SpinLock<SlabState>`.
72unsafe impl Send for SlabState {}
73unsafe impl Sync for SlabState {}
74
75impl SlabState {
76    /// Creates a new instance.
77    const fn new() -> Self {
78        SlabState {
79            free_lists: [ptr::null_mut(); NUM_SLABS],
80        }
81    }
82
83    /// Return the slab class index for `effective` bytes (already rounded up
84    /// via `max(size, align)`).  Panics if called with `effective > MAX_SLAB_SIZE`.
85    #[inline]
86    fn class_index(effective: usize) -> usize {
87        for (i, &s) in SLAB_SIZES.iter().enumerate() {
88            if effective <= s {
89                return i;
90            }
91        }
92        unreachable!("class_index called with effective > MAX_SLAB_SIZE")
93    }
94
95    /// Carve one buddy page into blocks of size `SLAB_SIZES[ci]` and prepend
96    /// them all to the free list for that class.
97    ///
98    /// Requests one order-0 frame through the memory frame helpers.
99    unsafe fn refill(&mut self, ci: usize) {
100        let slab_size = SLAB_SIZES[ci];
101
102        // Allocate one page (order-0) from the buddy allocator.
103        // SAFETY: refill is called while SLAB_ALLOC is locked; SpinLock::lock()
104        // disables IRQs via save_flags_and_cli before acquiring the lock, so
105        // IRQs are guaranteed to be disabled here.
106        let token = unsafe { crate::sync::IrqDisabledToken::new_unchecked() };
107        let frame = match memory::allocate_frame(&token) {
108            Ok(f) => f,
109            Err(_) => return, // OOM — caller will return null
110        };
111
112        let page_virt = super::phys_to_virt(frame.start_address.as_u64()) as *mut u8;
113        let num_blocks = 4096 / slab_size;
114
115        // Link all blocks into the free list (highest address first so the
116        // first block handed out is at the lowest address — irrelevant for
117        // correctness but tidy).
118        let mut head = self.free_lists[ci];
119        for i in (0..num_blocks).rev() {
120            let block = page_virt.add(i * slab_size);
121            // Store the next-free pointer in the first word of the free block.
122            *(block as *mut *mut u8) = head;
123            if HEAP_POISON_ENABLED {
124                // Poison bytes [8..slab_size-4] and place canary at the tail.
125                let end = slab_size.saturating_sub(4);
126                for off in 8..end {
127                    *block.add(off) = POISON_BYTE;
128                }
129                if slab_size >= 12 {
130                    let cp = block.add(slab_size - 4) as *mut u32;
131                    *cp = SLAB_CANARY;
132                }
133            }
134            head = block;
135        }
136        self.free_lists[ci] = head;
137    }
138
139    /// Pop a block from the free list for class `ci`, refilling from a buddy
140    /// page if the list is empty.  Returns null on OOM.
141    unsafe fn alloc_block(&mut self, ci: usize) -> *mut u8 {
142        if self.free_lists[ci].is_null() {
143            self.refill(ci);
144        }
145        let head = self.free_lists[ci];
146        if head.is_null() {
147            return ptr::null_mut();
148        }
149        // Read the next pointer stored at the start of the block.
150        let next = *(head as *const *mut u8);
151        self.free_lists[ci] = next;
152
153        if HEAP_POISON_ENABLED {
154            let slab_size = SLAB_SIZES[ci];
155            // Bytes [8..slab_size-4] should still hold POISON_BYTE.
156            // Bytes [0..8] held the free-list pointer, exempt from check.
157            let end = slab_size.saturating_sub(4);
158            let mut bad_off: Option<usize> = None;
159            for off in 8..end {
160                if *head.add(off) != POISON_BYTE {
161                    bad_off = Some(off);
162                    break;
163                }
164            }
165            if let Some(off) = bad_off {
166                let b0 = *head.add(off);
167                let b1 = if off + 1 < slab_size {
168                    *head.add(off + 1)
169                } else {
170                    0
171                };
172                let b2 = if off + 2 < slab_size {
173                    *head.add(off + 2)
174                } else {
175                    0
176                };
177                let b3 = if off + 3 < slab_size {
178                    *head.add(off + 3)
179                } else {
180                    0
181                };
182                crate::serial_println!(
183                    "\x1b[1;31m[HEAP] USE-AFTER-FREE: slab[{}] block={:#x} off={} bytes=[{:02x} {:02x} {:02x} {:02x}]\x1b[0m",
184                    slab_size, head as u64, off, b0, b1, b2, b3
185                );
186            }
187            // Verify the canary at the tail of the block.
188            if slab_size >= 12 {
189                let canary = *(head.add(slab_size - 4) as *const u32);
190                if canary != SLAB_CANARY {
191                    crate::serial_println!(
192                        "\x1b[1;31m[HEAP] CANARY OVERFLOW: slab[{}] block={:#x} expected={:#x} got={:#x}\x1b[0m",
193                        slab_size, head as u64, SLAB_CANARY, canary
194                    );
195                }
196            }
197        }
198
199        head
200    }
201
202    /// Push a block back onto the free list for class `ci`.
203    unsafe fn dealloc_block(&mut self, ptr: *mut u8, ci: usize) {
204        if HEAP_POISON_ENABLED {
205            let slab_size = SLAB_SIZES[ci];
206            // Canary at tail, then poison the body (skip the first 8 bytes
207            // which will be overwritten by the free-list pointer below).
208            if slab_size >= 12 {
209                let cp = ptr.add(slab_size - 4) as *mut u32;
210                *cp = SLAB_CANARY;
211            }
212            let end = slab_size.saturating_sub(4);
213            for off in 8..end {
214                *ptr.add(off) = POISON_BYTE;
215            }
216        }
217        // Overwrite the first word of the freed block with the current head.
218        *(ptr as *mut *mut u8) = self.free_lists[ci];
219        self.free_lists[ci] = ptr;
220    }
221}
222
223static SLAB_ALLOC: SpinLock<SlabState> = SpinLock::new(SlabState::new());
224
225/// Returns the slab lock address for deadlock tracing.
226pub fn debug_slab_lock_addr() -> usize {
227    &SLAB_ALLOC as *const _ as usize
228}
229
230// ---------------------------------------------------------------------------
231// GlobalAlloc implementation
232// ---------------------------------------------------------------------------
233
234pub struct LockedHeap;
235
236unsafe impl GlobalAlloc for LockedHeap {
237    /// Performs the alloc operation.
238    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
239        // Effective size must satisfy both the size and alignment requirements.
240        let effective = layout.size().max(layout.align());
241        let boot_reg = crate::silo::debug_boot_reg_active();
242        if boot_reg {
243            crate::serial_println!(
244                "[trace][heap] alloc enter effective={} size={} align={}",
245                effective,
246                layout.size(),
247                layout.align()
248            );
249        }
250
251        if effective <= MAX_SLAB_SIZE {
252            // --- slab path ---
253            let ci = SlabState::class_index(effective);
254            if boot_reg {
255                crate::serial_println!(
256                    "[trace][heap] alloc slab ci={} slab_size={} lock={:#x}",
257                    ci,
258                    SLAB_SIZES[ci],
259                    &SLAB_ALLOC as *const _ as usize
260                );
261            }
262            let mut slab = SLAB_ALLOC.lock();
263            if boot_reg {
264                crate::serial_println!("[trace][heap] alloc slab lock acquired");
265            }
266            slab.alloc_block(ci)
267        } else {
268            // --- buddy path (large allocation) ---
269            let pages_needed = (effective + 4095) / 4096;
270            let max_pages = 1usize << MAX_ORDER;
271            if pages_needed > max_pages {
272                return ptr::null_mut();
273            }
274            let order = pages_needed.next_power_of_two().trailing_zeros() as u8;
275            if boot_reg {
276                crate::serial_println!(
277                    "[trace][heap] alloc buddy order={} pages_needed={}",
278                    order,
279                    pages_needed
280                );
281            }
282
283            crate::sync::with_irqs_disabled(|token| {
284                match memory::allocate_frames(token, order) {
285                    Ok(frame) => super::phys_to_virt(frame.start_address.as_u64()) as *mut u8,
286                    Err(_) => ptr::null_mut(),
287                }
288            })
289        }
290    }
291
292    /// Performs the dealloc operation.
293    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
294        let effective = layout.size().max(layout.align());
295
296        if effective <= MAX_SLAB_SIZE {
297            // --- slab path: return block to free list ---
298            let ci = SlabState::class_index(effective);
299            let mut slab = SLAB_ALLOC.lock();
300            slab.dealloc_block(ptr, ci);
301        } else {
302            // --- buddy path: return page(s) to buddy ---
303            let pages_needed = (effective + 4095) / 4096;
304            let max_pages = 1usize << MAX_ORDER;
305            if pages_needed > max_pages {
306                return;
307            }
308            let order = pages_needed.next_power_of_two().trailing_zeros() as u8;
309
310            let hhdm = super::HHDM_OFFSET.load(core::sync::atomic::Ordering::Relaxed);
311            let phys_addr = (ptr as u64).wrapping_sub(hhdm);
312
313            if let Ok(frame) = PhysFrame::from_start_address(PhysAddr::new(phys_addr)) {
314                crate::sync::with_irqs_disabled(|token| {
315                    memory::free_frames(token, frame, order);
316                });
317            }
318        }
319    }
320}
321
322#[global_allocator]
323static HEAP_ALLOCATOR: LockedHeap = LockedHeap;
324
325/// Allocates error handler.
326#[alloc_error_handler]
327fn alloc_error_handler(layout: Layout) -> ! {
328    panic!("allocation error: {:?}", layout)
329}