1use crate::{
50 arch::x86_64::tlb::shootdown_range,
51 memory::{
52 frame::PhysFrame,
53 paging::{map_page_kernel, unmap_page_kernel},
54 phys_to_virt,
55 },
56 serial_println,
57 sync::{IrqDisabledToken, SpinLock},
58};
59use core::{
60 mem::size_of,
61 panic::Location,
62 ptr,
63 sync::atomic::{AtomicU64, Ordering as AtomicOrdering},
64};
65use x86_64::{
66 structures::paging::{Page, PageTableFlags, PhysFrame as X86PhysFrame},
67 VirtAddr,
68};
69
70pub const VMALLOC_VIRT_START: u64 = 0xffff_c000_0000_0000;
75
76pub const VMALLOC_SIZE: usize = 1024 * 1024 * 1024;
78
79pub const VMALLOC_VIRT_END: u64 = VMALLOC_VIRT_START + VMALLOC_SIZE as u64;
81
82const VMALLOC_PAGES: usize = VMALLOC_SIZE / 4096;
84
85const ARENA_START_PAGE: usize = 1;
93
94const VMALLOC_MAX_ALLOC: usize = VMALLOC_SIZE;
99
100struct FrameList {
101 ptr: *mut PhysFrame,
102 len: usize,
103 storage_frame: PhysFrame,
104 storage_order: u8,
105}
106
107impl FrameList {
108 fn new(len: usize, token: &IrqDisabledToken) -> Option<Self> {
109 let bytes = len.checked_mul(size_of::<PhysFrame>())?;
110 let pages_needed = bytes.saturating_add(4095) / 4096;
111 let order = if pages_needed <= 1 {
112 0
113 } else {
114 pages_needed.next_power_of_two().trailing_zeros() as u8
115 };
116 let storage_frame = crate::memory::buddy::alloc(token, order).ok()?;
117 let ptr = phys_to_virt(storage_frame.start_address.as_u64()) as *mut PhysFrame;
118 Some(Self {
119 ptr,
120 len,
121 storage_frame,
122 storage_order: order,
123 })
124 }
125
126 fn get(&self, index: usize) -> PhysFrame {
127 debug_assert!(index < self.len);
128 unsafe { *self.ptr.add(index) }
129 }
130
131 fn set(&mut self, index: usize, frame: PhysFrame) {
132 debug_assert!(index < self.len);
133 unsafe { *self.ptr.add(index) = frame };
134 }
135
136 fn free_storage(self, token: &IrqDisabledToken) {
137 crate::memory::buddy::free(token, self.storage_frame, self.storage_order);
138 }
139}
140
141unsafe impl Send for FrameList {}
147
148#[derive(Clone, Copy, Debug, Eq, PartialEq)]
149pub enum VmallocError {
150 ZeroSize,
151 SizeExceedsPolicy {
152 requested: usize,
153 max_allowed: usize,
154 },
155 MetadataAllocationFailed,
156 PhysicalMemoryExhausted,
157 VirtualRangeExhausted,
158 KernelMapFailed,
159}
160
161#[derive(Clone, Copy, Debug, Eq, PartialEq)]
162pub struct VmallocFailureSnapshot {
163 pub size: usize,
164 pub pages: usize,
165 pub error: VmallocError,
166}
167
168struct VmallocNode {
176 start_page: usize,
177 page_count: usize,
178 next: *mut VmallocNode,
179 frames: Option<FrameList>,
180 attr: VmallocAttr,
182}
183
184unsafe impl Send for VmallocNode {}
188
189struct Vmalloc {
190 initialized: bool,
191 subtree_ready: bool,
192 arena_initialized: bool,
196 bootstrap_frame: Option<PhysFrame>,
203 free_head: *mut VmallocNode,
204 alloc_head: *mut VmallocNode,
205 node_pool_free: *mut VmallocNode,
206 alloc_count: usize,
207 allocated_pages: usize,
208 metadata_pages: usize,
209 fail_count: usize,
210 last_failure: Option<VmallocFailureSnapshot>,
211}
212
213unsafe impl Send for Vmalloc {}
217
218impl Vmalloc {
219 const fn new() -> Self {
220 Self {
221 initialized: false,
222 subtree_ready: false,
223 arena_initialized: false,
224 bootstrap_frame: None,
225 free_head: ptr::null_mut(),
226 alloc_head: ptr::null_mut(),
227 node_pool_free: ptr::null_mut(),
228 alloc_count: 0,
229 allocated_pages: 0,
230 metadata_pages: 0,
231 fail_count: 0,
232 last_failure: None,
233 }
234 }
235
236 fn record_failure(&mut self, size: usize, pages: usize, error: VmallocError) -> VmallocError {
237 self.fail_count = self.fail_count.saturating_add(1);
238 self.last_failure = Some(VmallocFailureSnapshot { size, pages, error });
239 error
240 }
241
242 unsafe fn refill_node_pool(&mut self, token: &IrqDisabledToken) -> Result<(), VmallocError> {
243 let frame = crate::memory::buddy::alloc(token, 0)
244 .map_err(|_| self.record_failure(0, 0, VmallocError::MetadataAllocationFailed))?;
245 let base = phys_to_virt(frame.start_address.as_u64()) as *mut VmallocNode;
246 const _: () = assert!(
248 core::mem::size_of::<VmallocNode>() < 4096,
249 "VmallocNode exceeds one page : refill_node_pool logic must be revised"
250 );
251 let nodes_per_page = 4096 / size_of::<VmallocNode>();
252
253 for i in 0..nodes_per_page {
254 let node = base.add(i);
255 ptr::write(
256 node,
257 VmallocNode {
258 start_page: 0,
259 page_count: 0,
260 next: self.node_pool_free,
261 frames: None,
262 attr: VmallocAttr::default(),
263 },
264 );
265 self.node_pool_free = node;
266 }
267 self.metadata_pages = self.metadata_pages.saturating_add(1);
268 Ok(())
269 }
270
271 unsafe fn alloc_node(
272 &mut self,
273 token: &IrqDisabledToken,
274 ) -> Result<*mut VmallocNode, VmallocError> {
275 if self.node_pool_free.is_null() {
276 self.refill_node_pool(token)?;
277 }
278 let node = self.node_pool_free;
279 self.node_pool_free = (*node).next;
280 (*node).next = ptr::null_mut();
281 (*node).start_page = 0;
282 (*node).page_count = 0;
283 (*node).frames = None;
284 (*node).attr = VmallocAttr::default();
285 Ok(node)
286 }
287
288 unsafe fn release_node(&mut self, node: *mut VmallocNode) {
289 debug_assert!(
294 (*node).frames.is_none(),
295 "release_node: node at {:p} still has live frames (start_page={}) : \
296 caller must take() frames before releasing",
297 node,
298 (*node).start_page,
299 );
300 (*node).frames = None; (*node).start_page = 0;
302 (*node).page_count = 0;
303 (*node).next = self.node_pool_free;
304 self.node_pool_free = node;
305 }
306
307 unsafe fn ensure_arena_ready(&mut self, token: &IrqDisabledToken) -> Result<(), VmallocError> {
308 if self.arena_initialized {
309 return Ok(());
310 }
311 let node = self.alloc_node(token)?;
312 (*node).start_page = ARENA_START_PAGE;
316 (*node).page_count = VMALLOC_PAGES - ARENA_START_PAGE;
317 (*node).next = ptr::null_mut();
318 (*node).frames = None;
319 self.free_head = node;
320 self.arena_initialized = true;
321 Ok(())
322 }
323
324 unsafe fn reserve_range(
325 &mut self,
326 pages: usize,
327 token: &IrqDisabledToken,
328 ) -> Result<*mut VmallocNode, VmallocError> {
329 let mut best_prev = ptr::null_mut();
330 let mut best = ptr::null_mut();
331 let mut best_size = usize::MAX;
332
333 let mut prev = ptr::null_mut();
334 let mut cur = self.free_head;
335 while !cur.is_null() {
336 if (*cur).page_count >= pages && (*cur).page_count < best_size {
337 best = cur;
338 best_prev = prev;
339 best_size = (*cur).page_count;
340 if best_size == pages {
341 break;
342 }
343 }
344 prev = cur;
345 cur = (*cur).next;
346 }
347
348 if best.is_null() {
349 return Err(VmallocError::VirtualRangeExhausted);
350 }
351
352 if (*best).page_count == pages {
353 let next = (*best).next;
354 if best_prev.is_null() {
355 self.free_head = next;
356 } else {
357 (*best_prev).next = next;
358 }
359 (*best).next = ptr::null_mut();
360 return Ok(best);
361 }
362
363 let alloc = self.alloc_node(token)?;
364 (*alloc).start_page = (*best).start_page;
365 (*alloc).page_count = pages;
366 (*alloc).next = ptr::null_mut();
367 (*alloc).frames = None;
368
369 (*best).start_page = (*best).start_page.saturating_add(pages);
370 (*best).page_count = (*best).page_count.saturating_sub(pages);
371 Ok(alloc)
372 }
373
374 unsafe fn insert_alloc_node(&mut self, node: *mut VmallocNode) {
375 let mut prev: *mut VmallocNode = ptr::null_mut();
376 let mut cur = self.alloc_head;
377 while !cur.is_null() && (*cur).start_page < (*node).start_page {
378 prev = cur;
379 cur = (*cur).next;
380 }
381 (*node).next = cur;
382 if prev.is_null() {
383 self.alloc_head = node;
384 } else {
385 (*prev).next = node;
386 }
387 }
388
389 unsafe fn take_alloc_node_by_addr(&mut self, addr: u64) -> *mut VmallocNode {
390 let mut prev: *mut VmallocNode = ptr::null_mut();
391 let mut cur = self.alloc_head;
392 while !cur.is_null() {
393 let cur_addr = VMALLOC_VIRT_START + ((*cur).start_page as u64 * 4096);
394 if cur_addr == addr {
395 let next = (*cur).next;
396 if prev.is_null() {
397 self.alloc_head = next;
398 } else {
399 (*prev).next = next;
400 }
401 (*cur).next = ptr::null_mut();
402 return cur;
403 }
404 if cur_addr > addr {
405 break;
406 }
407 prev = cur;
408 cur = (*cur).next;
409 }
410 ptr::null_mut()
411 }
412
413 unsafe fn insert_free_node_merge(&mut self, node: *mut VmallocNode) {
414 debug_assert!((*node).frames.is_none());
415
416 let mut prev: *mut VmallocNode = ptr::null_mut();
417 let mut cur = self.free_head;
418 while !cur.is_null() && (*cur).start_page < (*node).start_page {
419 prev = cur;
420 cur = (*cur).next;
421 }
422
423 (*node).next = cur;
424 if prev.is_null() {
425 self.free_head = node;
426 } else {
427 (*prev).next = node;
428 }
429
430 let mut merged = node;
431 if !prev.is_null() && (*prev).start_page + (*prev).page_count == (*node).start_page {
432 (*prev).page_count = (*prev).page_count.saturating_add((*node).page_count);
433 (*prev).next = (*node).next;
434 self.release_node(node);
435 merged = prev;
436 }
437
438 while !(*merged).next.is_null() {
439 let next = (*merged).next;
440 if (*merged).start_page + (*merged).page_count != (*next).start_page {
441 break;
442 }
443 (*merged).page_count = (*merged).page_count.saturating_add((*next).page_count);
444 (*merged).next = (*next).next;
445 self.release_node(next);
446 }
447 }
448
449 unsafe fn free_extent_count(&self) -> usize {
450 let mut count = 0usize;
451 let mut cur = self.free_head;
452 while !cur.is_null() {
453 count = count.saturating_add(1);
454 cur = (*cur).next;
455 }
456 count
457 }
458
459 unsafe fn largest_free_extent_pages(&self) -> usize {
460 let mut largest = 0usize;
461 let mut cur = self.free_head;
462 while !cur.is_null() {
463 largest = largest.max((*cur).page_count);
464 cur = (*cur).next;
465 }
466 largest
467 }
468
469 unsafe fn node_pool_free_count(&self) -> usize {
470 let mut count = 0usize;
471 let mut cur = self.node_pool_free;
472 while !cur.is_null() {
473 count = count.saturating_add(1);
474 cur = (*cur).next;
475 }
476 count
477 }
478}
479
480static VMALLOC: SpinLock<Vmalloc> = SpinLock::new(Vmalloc::new());
481
482pub static VMALLOC_POLICY_REJECT_COUNT: AtomicU64 = AtomicU64::new(0);
484
485pub static VMALLOC_ALLOC_SEQ: AtomicU64 = AtomicU64::new(0);
491
492pub static VMALLOC_PEAK_PAGES: AtomicU64 = AtomicU64::new(0);
494
495#[derive(Clone, Copy, Debug, Default)]
505pub struct VmallocAttr {
506 pub task_id: u64,
508 pub pid: u32,
510 pub tid: u32,
512 pub silo_id: u32,
514 pub size: usize,
516 pub alloc_seq: u64,
518 pub caller_file: &'static str,
520 pub caller_line: u32,
522 pub caller_column: u32,
524}
525
526fn capture_attr(size: usize, caller: &'static Location<'static>) -> VmallocAttr {
535 let (task_id, pid, tid, silo_id) = match crate::process::current_task_clone_try() {
536 Some(task) => {
537 let task_id = task.id.as_u64();
538 let pid = task.pid;
539 let tid = task.tid;
540 let silo_id = crate::silo::try_silo_id_for_task(task.id).unwrap_or(0);
543 (task_id, pid, tid, silo_id)
544 }
545 None => (0, 0, 0, 0),
547 };
548
549 VmallocAttr {
550 task_id,
551 pid,
552 tid,
553 silo_id,
554 size,
555 alloc_seq: 0,
556 caller_file: caller.file(),
557 caller_line: caller.line(),
558 caller_column: caller.column(),
559 }
560}
561
562#[derive(Clone, Copy, Debug, Eq, PartialEq)]
563pub enum VmallocAllocBackend {
564 KernelVirtual,
565}
566
567#[derive(Clone, Copy, Debug, Eq, PartialEq)]
568pub struct VmallocLiveAllocationSnapshot {
569 pub seq: u64,
570 pub task_id: u64,
571 pub pid: u32,
572 pub tid: u32,
573 pub silo_id: u32,
574 pub size: usize,
575 pub pages: usize,
576 pub vaddr: u64,
577 pub backend: VmallocAllocBackend,
578 pub caller_file: &'static str,
579 pub caller_line: u32,
580 pub caller_column: u32,
581}
582
583#[derive(Clone, Copy, Debug, Eq, PartialEq)]
584pub struct VmallocDiagSnapshot {
585 pub arena_start: u64,
586 pub arena_end: u64,
587 pub alloc_count: usize,
588 pub allocated_pages: usize,
589 pub free_pages: usize,
590 pub peak_pages: u64,
591 pub total_seq: u64,
593 pub fail_count: usize,
594 pub policy_rejects: u64,
595 pub free_extent_count: usize,
596 pub largest_free_pages: usize,
597 pub metadata_pages: usize,
598 pub node_pool_free: usize,
599 pub last_failure: Option<VmallocFailureSnapshot>,
600}
601
602fn ensure_kernel_subtree_ready(token: &IrqDisabledToken) {
626 let mut guard = VMALLOC.lock();
627 if guard.subtree_ready {
628 return;
629 }
630
631 let Ok(frame) = crate::memory::allocate_frame(token) else {
632 serial_println!("[vmalloc] bootstrap: failed to allocate bootstrap frame");
633 return;
634 };
635
636 let page = Page::containing_address(VirtAddr::new(VMALLOC_VIRT_START));
637 let x86_frame = X86PhysFrame::containing_address(frame.start_address);
638 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
639
640 if map_page_kernel(page, x86_frame, flags).is_ok() {
641 guard.bootstrap_frame = Some(frame);
655 guard.subtree_ready = true;
656 } else {
657 crate::memory::free_frame(token, frame);
659 serial_println!("[vmalloc] bootstrap: failed to map bootstrap page");
660 }
661}
662
663fn ensure_init() {
664 let mut guard = VMALLOC.lock();
665 if guard.initialized {
666 return;
667 }
668 guard.initialized = true;
669 serial_println!(
670 "[vmalloc] initialized: VA=0x{:x}..0x{:x} ({} pages, {} MiB)",
671 VMALLOC_VIRT_START,
672 VMALLOC_VIRT_END,
673 VMALLOC_PAGES,
674 VMALLOC_SIZE / (1024 * 1024)
675 );
676}
677
678pub fn init() {
679 ensure_init();
680 crate::sync::with_irqs_disabled(|token| {
681 ensure_kernel_subtree_ready(token);
682 let mut guard = VMALLOC.lock();
683 if let Err(e) = unsafe { guard.ensure_arena_ready(token) } {
684 serial_println!(
685 "[vmalloc] init: ensure_arena_ready failed ({:?}) : vmalloc will retry on first use",
686 e
687 );
688 }
689 });
690}
691
692pub fn last_failure_snapshot() -> Option<VmallocFailureSnapshot> {
693 let guard = VMALLOC.lock();
694 guard.last_failure
695}
696
697pub fn diag_snapshot() -> Option<VmallocDiagSnapshot> {
698 let guard = VMALLOC.lock();
699 let vm = &*guard;
700 if !vm.initialized {
701 return None;
702 }
703
704 let (free_extents, largest_free, node_pool_free) = unsafe {
708 (
709 vm.free_extent_count(),
710 vm.largest_free_extent_pages(),
711 vm.node_pool_free_count(),
712 )
713 };
714
715 Some(VmallocDiagSnapshot {
716 arena_start: VMALLOC_VIRT_START,
717 arena_end: VMALLOC_VIRT_END,
718 alloc_count: vm.alloc_count,
719 allocated_pages: vm.allocated_pages,
720 free_pages: (VMALLOC_PAGES - ARENA_START_PAGE).saturating_sub(vm.allocated_pages),
721 peak_pages: VMALLOC_PEAK_PAGES.load(AtomicOrdering::Relaxed),
722 total_seq: VMALLOC_ALLOC_SEQ.load(AtomicOrdering::Relaxed),
723 fail_count: vm.fail_count,
724 policy_rejects: VMALLOC_POLICY_REJECT_COUNT.load(AtomicOrdering::Relaxed),
725 free_extent_count: free_extents,
726 largest_free_pages: largest_free,
727 metadata_pages: vm.metadata_pages,
728 node_pool_free,
729 last_failure: vm.last_failure,
730 })
731}
732
733#[track_caller]
738pub(crate) fn vmalloc(size: usize, token: &IrqDisabledToken) -> Result<*mut u8, VmallocError> {
739 if size == 0 {
740 VMALLOC_POLICY_REJECT_COUNT.fetch_add(1, AtomicOrdering::Relaxed);
743 return Err(VmallocError::ZeroSize);
744 }
745 if size > VMALLOC_MAX_ALLOC {
746 VMALLOC_POLICY_REJECT_COUNT.fetch_add(1, AtomicOrdering::Relaxed);
747 return Err(VmallocError::SizeExceedsPolicy {
748 requested: size,
749 max_allowed: VMALLOC_MAX_ALLOC,
750 });
751 }
752
753 let mut attr = capture_attr(size, Location::caller());
757
758 ensure_init();
759
760 let pages = (size + 4095) / 4096;
761 let page_flags =
762 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
763
764 let mut frames = match FrameList::new(pages, token) {
776 Some(frames) => frames,
777 None => {
778 let mut guard = VMALLOC.lock();
779 return Err(guard.record_failure(size, pages, VmallocError::MetadataAllocationFailed));
780 }
781 };
782
783 for i in 0..pages {
784 match crate::memory::allocate_frame(token) {
785 Ok(frame) => frames.set(i, frame),
786 Err(_) => {
787 for j in 0..i {
788 crate::memory::free_frame(token, frames.get(j));
789 }
790 frames.free_storage(token);
791 let mut guard = VMALLOC.lock();
792 return Err(guard.record_failure(
793 size,
794 pages,
795 VmallocError::PhysicalMemoryExhausted,
796 ));
797 }
798 }
799 }
800
801 let mut guard = VMALLOC.lock();
802 let vm = &mut *guard;
803 unsafe {
804 if let Err(error) = vm.ensure_arena_ready(token) {
805 for i in 0..pages {
806 crate::memory::free_frame(token, frames.get(i));
807 }
808 frames.free_storage(token);
809 return Err(vm.record_failure(size, pages, error));
810 }
811
812 let alloc_node = match vm.reserve_range(pages, token) {
813 Ok(node) => node,
814 Err(error) => {
815 for i in 0..pages {
816 crate::memory::free_frame(token, frames.get(i));
817 }
818 frames.free_storage(token);
819 return Err(vm.record_failure(size, pages, error));
820 }
821 };
822
823 let virt_base = VMALLOC_VIRT_START + ((*alloc_node).start_page as u64 * 4096);
824
825 for i in 0..pages {
826 let frame = frames.get(i);
827 let page_virt = virt_base + (i as u64 * 4096);
828 let page = Page::containing_address(VirtAddr::new(page_virt));
829 let x86_frame = X86PhysFrame::containing_address(frame.start_address);
830 if map_page_kernel(page, x86_frame, page_flags).is_err() {
831 for j in 0..i {
832 let pv = virt_base + (j as u64 * 4096);
833 let pg = Page::containing_address(VirtAddr::new(pv));
834 let _ = unmap_page_kernel(pg);
835 }
836 (*alloc_node).frames = None;
837 vm.insert_free_node_merge(alloc_node);
838 for j in 0..pages {
839 crate::memory::free_frame(token, frames.get(j));
840 }
841 frames.free_storage(token);
842 return Err(vm.record_failure(size, pages, VmallocError::KernelMapFailed));
843 }
844 }
845
846 (*alloc_node).frames = Some(frames);
847 attr.alloc_seq = VMALLOC_ALLOC_SEQ.fetch_add(1, AtomicOrdering::Relaxed);
848 (*alloc_node).attr = attr;
849 vm.insert_alloc_node(alloc_node);
850 vm.alloc_count = vm.alloc_count.saturating_add(1);
851 vm.allocated_pages = vm.allocated_pages.saturating_add(pages);
852 vm.last_failure = None;
853
854 let current_pages = vm.allocated_pages as u64;
856 let mut peak = VMALLOC_PEAK_PAGES.load(AtomicOrdering::Relaxed);
857 while current_pages > peak {
858 match VMALLOC_PEAK_PAGES.compare_exchange_weak(
859 peak,
860 current_pages,
861 AtomicOrdering::Relaxed,
862 AtomicOrdering::Relaxed,
863 ) {
864 Ok(_) => break,
865 Err(p) => peak = p,
866 }
867 }
868
869 Ok(virt_base as *mut u8)
870 }
871}
872
873pub fn vfree(ptr: *mut u8, token: &IrqDisabledToken) -> bool {
890 if ptr.is_null() {
891 return true;
892 }
893
894 let addr = ptr as u64;
895 if addr < VMALLOC_VIRT_START || addr >= VMALLOC_VIRT_END {
896 return false;
897 }
898
899 let (frames, range_start, range_end) = {
901 let mut guard = VMALLOC.lock();
902 let vm = &mut *guard;
903
904 unsafe {
905 let node = vm.take_alloc_node_by_addr(addr);
906 if node.is_null() {
907 serial_println!("[vmalloc] vfree: no allocation record for 0x{:x}", addr);
908 return false;
909 }
910
911 let page_count = (*node).page_count;
912 let virt_start = VMALLOC_VIRT_START + ((*node).start_page as u64 * 4096);
913 let frames = (*node).frames.take().unwrap();
914
915 for i in 0..page_count {
916 let page_start = virt_start + (i as u64 * 4096);
917 let page = Page::containing_address(VirtAddr::new(page_start));
918 let _ = unmap_page_kernel(page);
921 }
922
923 let range_start = VirtAddr::new(virt_start);
924 let range_end = VirtAddr::new(virt_start + (page_count as u64 * 4096));
925
926 vm.alloc_count = vm.alloc_count.saturating_sub(1);
927 vm.allocated_pages = vm.allocated_pages.saturating_sub(page_count);
928 vm.insert_free_node_merge(node);
929 (frames, range_start, range_end)
930 }
931 }; shootdown_range(range_start, range_end);
936
937 for i in 0..frames.len {
939 crate::memory::free_frame(token, frames.get(i));
940 }
941 frames.free_storage(token);
942 true
943}
944
945pub fn dump_live_allocations() {
958 const MAX_SNAPSHOT: usize = 256;
959 let mut snapshot = [VmallocLiveAllocationSnapshot {
960 seq: 0,
961 task_id: 0,
962 pid: 0,
963 tid: 0,
964 silo_id: 0,
965 size: 0,
966 pages: 0,
967 vaddr: 0,
968 backend: VmallocAllocBackend::KernelVirtual,
969 caller_file: "",
970 caller_line: 0,
971 caller_column: 0,
972 }; MAX_SNAPSHOT];
973 let count = live_allocations_snapshot(&mut snapshot);
974 if count == 0 {
975 let guard = VMALLOC.lock();
976 if !guard.initialized {
977 serial_println!("[vmalloc][live] not initialized");
978 return;
979 }
980 }
981
982 let mut total_pages = 0usize;
983 for entry in snapshot.iter().take(count) {
984 serial_println!(
985 "[vmalloc][live] seq={} backend={:?} task={} pid={} tid={} silo={} size={} pages={} vaddr=0x{:x} caller={}:{}:{}",
986 entry.seq,
987 entry.backend,
988 entry.task_id,
989 entry.pid,
990 entry.tid,
991 entry.silo_id,
992 entry.size,
993 entry.pages,
994 entry.vaddr,
995 entry.caller_file,
996 entry.caller_line,
997 entry.caller_column,
998 );
999 total_pages = total_pages.saturating_add(entry.pages);
1000 }
1001
1002 let peak = VMALLOC_PEAK_PAGES.load(AtomicOrdering::Relaxed);
1003 let guard = VMALLOC.lock();
1004 let live_count = guard.alloc_count;
1005 let live_pages = guard.allocated_pages;
1006 serial_println!(
1007 "[vmalloc][live] total: {} allocs, {} pages ({} KiB), peak_pages={}",
1008 live_count,
1009 live_pages,
1010 live_pages.saturating_mul(4),
1011 peak,
1012 );
1013 if live_count > count {
1014 serial_println!(
1015 "[vmalloc][live] snapshot truncated: {} additional allocations not shown",
1016 live_count - count,
1017 );
1018 }
1019}
1020
1021pub fn live_allocations_snapshot(out: &mut [VmallocLiveAllocationSnapshot]) -> usize {
1027 let guard = VMALLOC.lock();
1028 let vm = &*guard;
1029 if !vm.initialized {
1030 return 0;
1031 }
1032
1033 let mut count = 0usize;
1034 let mut cur = vm.alloc_head;
1035 while !cur.is_null() && count < out.len() {
1036 let node = unsafe { &*cur };
1037 out[count] = VmallocLiveAllocationSnapshot {
1038 seq: node.attr.alloc_seq,
1039 task_id: node.attr.task_id,
1040 pid: node.attr.pid,
1041 tid: node.attr.tid,
1042 silo_id: node.attr.silo_id,
1043 size: node.attr.size,
1044 pages: node.page_count,
1045 vaddr: VMALLOC_VIRT_START + (node.start_page as u64 * 4096),
1046 backend: VmallocAllocBackend::KernelVirtual,
1047 caller_file: node.attr.caller_file,
1048 caller_line: node.attr.caller_line,
1049 caller_column: node.attr.caller_column,
1050 };
1051 count += 1;
1052 cur = node.next;
1053 }
1054 count
1055}
1056
1057pub fn is_live_allocation(ptr: *mut u8) -> Option<bool> {
1063 let addr = ptr as u64;
1064 if addr < VMALLOC_VIRT_START || addr >= VMALLOC_VIRT_END {
1065 return Some(false);
1066 }
1067
1068 let guard = VMALLOC.try_lock()?;
1069 let vm = &*guard;
1070 if !vm.initialized {
1071 return Some(false);
1072 }
1073
1074 let start_page = ((addr - VMALLOC_VIRT_START) / 4096) as usize;
1075 let mut cur = vm.alloc_head;
1076 while !cur.is_null() {
1077 let node = unsafe { &*cur };
1078 if node.start_page == start_page {
1079 return Some(true);
1080 }
1081 if node.start_page > start_page {
1082 break;
1083 }
1084 cur = node.next;
1085 }
1086
1087 Some(false)
1088}
1089
1090pub fn dump_diagnostics() {
1092 let guard = VMALLOC.lock();
1093 let vm = &*guard;
1094 if !vm.initialized {
1095 serial_println!("[vmalloc][diag] not initialized");
1096 return;
1097 }
1098
1099 let policy_rejects = VMALLOC_POLICY_REJECT_COUNT.load(AtomicOrdering::Relaxed);
1100 let peak_pages = VMALLOC_PEAK_PAGES.load(AtomicOrdering::Relaxed);
1101 let total_seq = VMALLOC_ALLOC_SEQ.load(AtomicOrdering::Relaxed);
1102 let (free_extents, largest_free, node_pool_free) = unsafe {
1103 (
1104 vm.free_extent_count(),
1105 vm.largest_free_extent_pages(),
1106 vm.node_pool_free_count(),
1107 )
1108 };
1109 serial_println!(
1110 "[vmalloc][diag] arena=0x{:x}..0x{:x} allocs={} alloc_pages={} free_pages={} \
1111 peak_pages={} total_seq={} fails={} policy_rejects={}",
1112 VMALLOC_VIRT_START,
1113 VMALLOC_VIRT_END,
1114 vm.alloc_count,
1115 vm.allocated_pages,
1116 (VMALLOC_PAGES - ARENA_START_PAGE).saturating_sub(vm.allocated_pages),
1117 peak_pages,
1118 total_seq,
1119 vm.fail_count,
1120 policy_rejects
1121 );
1122 serial_println!(
1123 "[vmalloc][diag] extents={} largest_free_pages={} metadata_pages={} node_pool_free={}",
1124 free_extents,
1125 largest_free,
1126 vm.metadata_pages,
1127 node_pool_free
1128 );
1129 if let Some(last) = vm.last_failure {
1130 serial_println!(
1131 "[vmalloc][diag] last_failure: size={} pages={} error={:?}",
1132 last.size,
1133 last.pages,
1134 last.error
1135 );
1136 }
1137 if vm.alloc_count > 0 {
1139 drop(guard); dump_live_allocations();
1141 }
1142}