1use super::{pic, tss};
7use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
8use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
9
10#[allow(dead_code)]
12pub mod irq {
13 pub const TIMER: u8 = super::pic::PIC1_OFFSET; pub const KEYBOARD: u8 = super::pic::PIC1_OFFSET + 1; pub const CASCADE: u8 = super::pic::PIC1_OFFSET + 2; pub const MOUSE: u8 = super::pic::PIC1_OFFSET + 12; pub const COM2: u8 = super::pic::PIC1_OFFSET + 3; pub const COM1: u8 = super::pic::PIC1_OFFSET + 4; pub const FLOPPY: u8 = super::pic::PIC1_OFFSET + 6; pub const ATA_PRIMARY: u8 = super::pic::PIC1_OFFSET + 14; pub const ATA_SECONDARY: u8 = super::pic::PIC1_OFFSET + 15; }
23
24static mut IDT_STORAGE: InterruptDescriptorTable = InterruptDescriptorTable::new();
26static IDT_STORAGE_LOCK: AtomicBool = AtomicBool::new(false);
27static USER_PF_TRACE_BUDGET: AtomicU32 = AtomicU32::new(64);
28
29#[inline]
30fn lock_idt_storage() {
31 while IDT_STORAGE_LOCK
32 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
33 .is_err()
34 {
35 core::hint::spin_loop();
36 }
37}
38
39#[inline]
40fn unlock_idt_storage() {
41 IDT_STORAGE_LOCK.store(false, Ordering::Release);
42}
43
44pub fn init() {
45 lock_idt_storage();
46 unsafe {
47 let idt = &raw mut IDT_STORAGE;
48
49 (*idt).breakpoint.set_handler_fn(breakpoint_handler);
51 (*idt).page_fault.set_handler_fn(page_fault_handler);
52 (*idt)
53 .general_protection_fault
54 .set_handler_fn(general_protection_fault_handler);
55 (*idt)
56 .stack_segment_fault
57 .set_handler_fn(stack_segment_fault_handler);
58 (*idt)
59 .non_maskable_interrupt
60 .set_handler_fn(non_maskable_interrupt_handler);
61 (*idt).invalid_opcode.set_handler_fn(invalid_opcode_handler);
62 (*idt)
63 .double_fault
64 .set_handler_fn(double_fault_handler)
65 .set_stack_index(tss::DOUBLE_FAULT_IST_INDEX);
66
67 let idt_ref = &mut *idt;
69 idt_ref[irq::TIMER as u8].set_handler_fn(legacy_timer_handler);
70 idt_ref[irq::KEYBOARD as u8].set_handler_fn(keyboard_handler);
71 idt_ref[irq::MOUSE as u8].set_handler_fn(mouse_handler);
72
73 idt_ref[0xFF_u8].set_handler_fn(spurious_handler);
75
76 idt_ref[super::apic::IPI_RESCHED_VECTOR as u8].set_handler_fn(resched_ipi_handler);
78
79 idt_ref[super::apic::IPI_TLB_SHOOTDOWN_VECTOR as u8].set_handler_fn(tlb_shootdown_handler);
81
82 (*idt).load_unsafe();
83 }
84 unlock_idt_storage();
85
86 log::debug!("IDT initialized with {} entries", 256);
87}
88
89pub fn load() {
90 lock_idt_storage();
91 unsafe {
92 let idt = &raw const IDT_STORAGE;
93 (*idt).load_unsafe();
94 }
95 unlock_idt_storage();
96}
97
98pub fn register_lapic_timer_vector(vector: u8) {
100 lock_idt_storage();
101 unsafe {
102 let idt = &raw mut IDT_STORAGE;
103 (&mut *idt)[vector].set_handler_fn(lapic_timer_handler);
104 (*idt).load_unsafe();
105 }
106 unlock_idt_storage();
107}
108
109pub fn register_ahci_irq(irq: u8) {
113 let vector = if irq < 16 {
114 super::pic::PIC1_OFFSET + irq
115 } else {
116 irq
117 };
118
119 lock_idt_storage();
120 unsafe {
121 let idt = &raw mut IDT_STORAGE;
122 (&mut *idt)[vector].set_handler_fn(ahci_handler);
123 (*idt).load_unsafe();
124 }
125 unlock_idt_storage();
126 log::info!("AHCI IRQ {} registered on vector {:#x}", irq, vector);
127}
128
129pub fn register_virtio_block_irq(irq: u8) {
134 let vector = if irq < 16 {
137 super::pic::PIC1_OFFSET + irq
138 } else {
139 irq
140 };
141
142 lock_idt_storage();
143 unsafe {
144 let idt = &raw mut IDT_STORAGE;
145 (&mut *idt)[vector].set_handler_fn(virtio_block_handler);
146 (*idt).load_unsafe();
147 }
148 unlock_idt_storage();
149 log::info!("VirtIO-blk IRQ {} registered on vector {:#x}", irq, vector);
150}
151
152extern "x86-interrupt" fn breakpoint_handler(stack_frame: InterruptStackFrame) {
158 log::warn!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
159}
160
161extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: InterruptStackFrame) {
163 let is_user = (stack_frame.code_segment.0 & 3) == 3;
164 if is_user {
165 if let Some(tid) = crate::process::current_task_id() {
166 crate::silo::handle_user_fault(
167 tid,
168 crate::silo::SiloFaultReason::InvalidOpcode,
169 stack_frame.instruction_pointer.as_u64(),
170 0,
171 stack_frame.instruction_pointer.as_u64(),
172 );
173 return;
174 }
175 }
176 log::error!("EXCEPTION: INVALID OPCODE\n{:#?}", stack_frame);
177 panic!("Invalid opcode");
178}
179
180extern "x86-interrupt" fn non_maskable_interrupt_handler(_stack_frame: InterruptStackFrame) {
181 crate::arch::x86_64::cli();
182 loop {
183 crate::arch::x86_64::hlt();
184 }
185}
186
187extern "x86-interrupt" fn page_fault_handler(
189 stack_frame: InterruptStackFrame,
190 error_code: PageFaultErrorCode,
191) {
192 use x86_64::registers::control::{Cr2, Cr3};
193 let is_user = (stack_frame.code_segment.0 & 3) == 3;
194
195 let fault_addr = Cr2::read();
197 let fault_vaddr = fault_addr.as_ref().map(|v| v.as_u64()).unwrap_or(0);
198 let rip = stack_frame.instruction_pointer.as_u64();
199 let user_rsp = stack_frame.stack_pointer.as_u64();
200
201 let mut trace_ctx = crate::trace::TraceTaskCtx::empty();
202 if is_user {
203 if let Some(task) = crate::process::current_task_clone() {
204 let as_ref = unsafe { &*task.process.address_space.get() };
205 trace_ctx = crate::trace::TraceTaskCtx {
206 task_id: task.id.as_u64(),
207 pid: task.pid,
208 tid: task.tid,
209 cr3: as_ref.cr3().as_u64(),
210 };
211 }
212 }
213
214 let do_pf_trace = if is_user {
215 USER_PF_TRACE_BUDGET
216 .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| {
217 if v > 0 {
218 Some(v - 1)
219 } else {
220 None
221 }
222 })
223 .is_ok()
224 } else {
225 true
226 };
227 if do_pf_trace {
228 crate::trace_mem!(
229 crate::trace::category::MEM_PF,
230 crate::trace::TraceKind::MemPageFault,
231 error_code.bits() as u64,
232 trace_ctx,
233 rip,
234 fault_vaddr,
235 user_rsp,
236 0
237 );
238 }
239
240 if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION)
243 && error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE)
244 && is_user
245 {
246 if let Some(task) = crate::process::current_task_clone() {
247 let address_space = unsafe { &*task.process.address_space.get() };
248 if let Ok(vaddr) = fault_addr {
249 match crate::syscall::fork::handle_cow_fault(vaddr.as_u64(), address_space) {
250 Ok(()) => {
251 crate::trace_mem!(
252 crate::trace::category::MEM_COW,
253 crate::trace::TraceKind::MemCow,
254 1,
255 trace_ctx,
256 rip,
257 vaddr.as_u64(),
258 0,
259 0
260 );
261 return;
262 }
263 Err(reason) => {
264 crate::trace_mem!(
265 crate::trace::category::MEM_COW,
266 crate::trace::TraceKind::MemCow,
267 0,
268 trace_ctx,
269 rip,
270 vaddr.as_u64(),
271 0,
272 0
273 );
274 crate::serial_println!(
275 "\x1b[31m[pagefault] COW resolve failed\x1b[0m: task={} \x1b[36mpid={}\x1b[0m tid={} \x1b[35maddr={:#x}\x1b[0m \x1b[35mrip={:#x}\x1b[0m err={}",
276 task.id.as_u64(),
277 task.pid,
278 task.tid,
279 vaddr.as_u64(),
280 stack_frame.instruction_pointer.as_u64(),
281 reason
282 );
283 }
284 }
285 }
286 }
287 }
288
289 if is_user {
290 if let Some(task) = crate::process::current_task_clone() {
291 let address_space = unsafe { &*task.process.address_space.get() };
292 if let Ok(vaddr) = fault_addr {
293 crate::serial_force_println!(
295 "\x1b[33m[pagefault] USER fault\x1b[0m: tid={} rip={:#x} addr={:#x} err={:#x}",
296 task.tid,
297 rip,
298 vaddr.as_u64(),
299 error_code.bits()
300 );
301
302 match address_space.handle_fault(vaddr.as_u64()) {
303 Ok(()) => {
304 crate::serial_force_println!(
305 "\x1b[32m[pagefault] USER fault resolved\x1b[0m: tid={} addr={:#x}",
306 task.tid,
307 vaddr.as_u64()
308 );
309 return;
310 }
311 Err(e) => {
312 crate::serial_force_println!(
313 "\x1b[31m[pagefault] USER fault resolution FAILED\x1b[0m: tid={} addr={:#x} err={:?}",
314 task.tid,
315 vaddr.as_u64(),
316 e
317 );
318 dump_user_pf_context(address_space, rip, user_rsp);
319 }
320 }
321 }
322 }
323 }
324
325 if is_user {
326 if let Some(tid) = crate::process::current_task_id() {
327 crate::silo::handle_user_fault(
328 tid,
329 crate::silo::SiloFaultReason::PageFault,
330 fault_addr.map(|v| v.as_u64()).unwrap_or(0),
331 error_code.bits() as u64,
332 stack_frame.instruction_pointer.as_u64(),
333 );
334 return;
335 }
336 }
337
338 let task_snap = crate::process::scheduler::current_task_clone_try();
340 dump_page_fault_full(&stack_frame, error_code, fault_addr, &task_snap);
341}
342
343fn decode_error_code(ec: PageFaultErrorCode) -> &'static str {
359 let p = ec.contains(PageFaultErrorCode::PROTECTION_VIOLATION);
360 let w = ec.contains(PageFaultErrorCode::CAUSED_BY_WRITE);
361 let u = ec.contains(PageFaultErrorCode::USER_MODE);
362 match (p, w, u) {
363 (false, false, false) => "kernel read of non-present page",
364 (false, true, false) => "kernel write to non-present page",
365 (false, false, true) => "user read of non-present page",
366 (false, true, true) => "user write to non-present page",
367 (true, false, false) => "kernel read protection violation",
368 (true, true, false) => "kernel write protection violation (COW / RO page)",
369 (true, false, true) => "user read protection violation (NX / supervisor-only)",
370 (true, true, true) => "user write protection violation (COW / RO page)",
371 }
372}
373
374fn format_pte_flags(entry: u64) -> [u8; 32] {
376 let mut buf = [b' '; 32];
377 let mut pos = 0usize;
378 let flags: &[(&str, u64)] = &[
379 ("P", 1 << 0),
380 ("RW", 1 << 1),
381 ("US", 1 << 2),
382 ("PWT", 1 << 3),
383 ("PCD", 1 << 4),
384 ("A", 1 << 5),
385 ("D", 1 << 6),
386 ("PS", 1 << 7),
387 ("G", 1 << 8),
388 ("NX", 1 << 63),
389 ];
390 for &(name, bit) in flags {
391 if entry & bit != 0 {
392 for &b in name.as_bytes() {
393 if pos < buf.len() {
394 buf[pos] = b;
395 pos += 1;
396 }
397 }
398 if pos < buf.len() {
399 buf[pos] = b'|';
400 pos += 1;
401 }
402 }
403 }
404 if pos > 0 && buf[pos - 1] == b'|' {
405 buf[pos - 1] = b' ';
406 }
407 buf
408}
409
410fn translate_via_raw_pt(vaddr: u64, cr3_phys: u64, hhdm: u64) -> Option<u64> {
418 unsafe {
419 let l4_ptr = (cr3_phys + hhdm) as *const u64;
420 let l4e = *l4_ptr.add(((vaddr >> 39) & 0x1FF) as usize);
421 if l4e & 1 == 0 {
422 return None;
423 }
424
425 let l3_ptr = ((l4e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
426 let l3e = *l3_ptr.add(((vaddr >> 30) & 0x1FF) as usize);
427 if l3e & 1 == 0 {
428 return None;
429 }
430 if l3e & 0x80 != 0 {
431 return Some((l3e & 0x000F_FFFF_C000_0000) + (vaddr & 0x3FFF_FFFF));
432 }
433
434 let l2_ptr = ((l3e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
435 let l2e = *l2_ptr.add(((vaddr >> 21) & 0x1FF) as usize);
436 if l2e & 1 == 0 {
437 return None;
438 }
439 if l2e & 0x80 != 0 {
440 return Some((l2e & 0x000F_FFFF_FFE0_0000) + (vaddr & 0x1F_FFFF));
441 }
442
443 let l1_ptr = ((l2e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
444 let l1e = *l1_ptr.add(((vaddr >> 12) & 0x1FF) as usize);
445 if l1e & 1 == 0 {
446 return None;
447 }
448 Some((l1e & 0x000F_FFFF_FFFF_F000) + (vaddr & 0xFFF))
449 }
450}
451
452fn dump_memory_bytes(vaddr: u64, cr3_phys: u64, count: usize, prefix: &str) {
455 let hhdm = crate::memory::hhdm_offset();
456 let mut offset = 0usize;
457 while offset < count {
458 let cur_va = vaddr.wrapping_add(offset as u64);
459 let page_off = (cur_va & 0xFFF) as usize;
460 let chunk = core::cmp::min(count - offset, 0x1000 - page_off);
461 let Some(phys) = translate_via_raw_pt(cur_va, cr3_phys, hhdm) else {
462 crate::serial_println!("{}(page {:#x} not mapped)", prefix, cur_va);
463 offset += chunk;
464 continue;
465 };
466 let src = (phys - (cur_va & 0xFFF) + hhdm) as *const u8;
468 let mut line_off = 0usize;
469 while line_off < chunk {
470 let ll = core::cmp::min(16, chunk - line_off);
471 let line_va = cur_va.wrapping_add(line_off as u64);
472 let mut hex = [0u8; 48];
473 let mut asc = [b'.'; 16];
474 for i in 0..ll {
475 let byte = unsafe { *src.add(page_off + line_off + i) };
476 let hi = byte >> 4;
477 let lo = byte & 0xF;
478 hex[i * 3] = if hi < 10 { b'0' + hi } else { b'a' + hi - 10 };
479 hex[i * 3 + 1] = if lo < 10 { b'0' + lo } else { b'a' + lo - 10 };
480 hex[i * 3 + 2] = b' ';
481 if byte >= 0x20 && byte < 0x7F {
482 asc[i] = byte;
483 }
484 }
485 for i in ll..16 {
486 hex[i * 3] = b' ';
487 hex[i * 3 + 1] = b' ';
488 hex[i * 3 + 2] = b' ';
489 }
490 crate::serial_println!(
491 "{}{:#018x}: {} |{}|",
492 prefix,
493 line_va,
494 core::str::from_utf8(&hex[..48]).unwrap_or("???"),
495 core::str::from_utf8(&asc[..ll]).unwrap_or("???")
496 );
497 line_off += ll;
498 }
499 offset += chunk;
500 }
501}
502
503fn dump_page_table_walk(vaddr: u64, cr3_phys: u64) {
505 let hhdm = crate::memory::hhdm_offset();
506 let l4_idx = ((vaddr >> 39) & 0x1FF) as usize;
507 let l3_idx = ((vaddr >> 30) & 0x1FF) as usize;
508 let l2_idx = ((vaddr >> 21) & 0x1FF) as usize;
509 let l1_idx = ((vaddr >> 12) & 0x1FF) as usize;
510
511 unsafe {
513 let l4_ptr = (cr3_phys + hhdm) as *const u64;
514 let l4e = *l4_ptr.add(l4_idx);
515 let f = format_pte_flags(l4e);
516 crate::serial_println!(
517 " PML4[{:>3}] = {:#018x} phys={:#014x} [{}]",
518 l4_idx,
519 l4e,
520 l4e & 0x000F_FFFF_FFFF_F000,
521 core::str::from_utf8(&f).unwrap_or("?").trim()
522 );
523 if l4e & 1 == 0 {
524 crate::serial_println!(" \x1b[1;31m╰→ STOP: PML4 not present\x1b[0m");
525 return;
526 }
527
528 let l3_ptr = ((l4e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
529 let l3e = *l3_ptr.add(l3_idx);
530 let f = format_pte_flags(l3e);
531 crate::serial_println!(
532 " PDPT[{:>3}] = {:#018x} phys={:#014x} [{}]",
533 l3_idx,
534 l3e,
535 l3e & 0x000F_FFFF_FFFF_F000,
536 core::str::from_utf8(&f).unwrap_or("?").trim()
537 );
538 if l3e & 1 == 0 {
539 crate::serial_println!(" \x1b[1;31m╰→ STOP: PDPT not present\x1b[0m");
540 return;
541 }
542 if l3e & 0x80 != 0 {
543 crate::serial_println!(
544 " ╰→ 1 GiB huge page → phys {:#x}",
545 l3e & 0x000F_FFFF_C000_0000
546 );
547 return;
548 } let l2_ptr = ((l3e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
551 let l2e = *l2_ptr.add(l2_idx);
552 let f = format_pte_flags(l2e);
553 crate::serial_println!(
554 " PD [{:>3}] = {:#018x} phys={:#014x} [{}]",
555 l2_idx,
556 l2e,
557 l2e & 0x000F_FFFF_FFFF_F000,
558 core::str::from_utf8(&f).unwrap_or("?").trim()
559 );
560 if l2e & 1 == 0 {
561 crate::serial_println!(" \x1b[1;31m╰→ STOP: PD not present\x1b[0m");
562 return;
563 }
564 if l2e & 0x80 != 0 {
565 crate::serial_println!(
566 " ╰→ 2 MiB huge page → phys {:#x}",
567 l2e & 0x000F_FFFF_FFE0_0000
568 );
569 return;
570 } let l1_ptr = ((l2e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
573 let l1e = *l1_ptr.add(l1_idx);
574 let f = format_pte_flags(l1e);
575 crate::serial_println!(
576 " PT [{:>3}] = {:#018x} phys={:#014x} [{}]",
577 l1_idx,
578 l1e,
579 l1e & 0x000F_FFFF_FFFF_F000,
580 core::str::from_utf8(&f).unwrap_or("?").trim()
581 );
582 if l1e & 1 == 0 {
583 crate::serial_println!(" \x1b[1;31m╰→ STOP: PT not present\x1b[0m");
584 } else {
585 crate::serial_println!(
586 " \x1b[1;32m╰→ PAGE PRESENT\x1b[0m → phys {:#x} (check RW/US/NX flags)",
587 l1e & 0x000F_FFFF_FFFF_F000
588 );
589 }
590 crate::serial_println!(" --- Neighbouring PT entries ---");
592 let start = if l1_idx >= 2 { l1_idx - 2 } else { 0 };
593 for i in start..core::cmp::min(l1_idx + 3, 512) {
594 let e = *l1_ptr.add(i);
595 if e != 0 {
596 let f = format_pte_flags(e);
597 crate::serial_println!(
598 " PT[{:>3}] = {:#018x} [{}]{}",
599 i,
600 e,
601 core::str::from_utf8(&f).unwrap_or("?").trim(),
602 if i == l1_idx { " <<<" } else { "" }
603 );
604 }
605 }
606 }
607}
608
609fn dump_nearby_vma_regions(as_ref: &crate::memory::AddressSpace, fault_vaddr: u64) {
611 let page_start = fault_vaddr & !0xFFF;
612 let probes = [
613 page_start,
614 fault_vaddr & !0x1F_FFFF,
615 fault_vaddr & !0x3FFF_FFFF,
616 0x0000_0001_0000_0000,
617 0x0000_0000_0040_0000,
618 0x0000_7FFF_F000_0000,
619 ];
620 let mut found_any = false;
621 for &p in &probes {
622 if let Some(vma) = as_ref.region_by_start(p) {
623 let end = vma.start + (vma.page_count as u64) * vma.page_size.bytes();
624 let hit = fault_vaddr >= vma.start && fault_vaddr < end;
625 crate::serial_println!(
626 " VMA {:#014x}..{:#014x} pages={:<5} type={:?} flags={:?} pgsz={:?}{}",
627 vma.start,
628 end,
629 vma.page_count,
630 vma.vma_type,
631 vma.flags,
632 vma.page_size,
633 if hit {
634 " \x1b[1;32m<<< FAULT\x1b[0m"
635 } else {
636 ""
637 }
638 );
639 found_any = true;
640 }
641 }
642 if as_ref.has_mapping_in_range(page_start, 0x1000) {
643 crate::serial_println!(
644 " Note: fault page {:#x} IS within a tracked mapping range",
645 page_start
646 );
647 } else {
648 crate::serial_println!(
649 " Note: fault page {:#x} is NOT within any tracked mapping range",
650 page_start
651 );
652 }
653 if !found_any {
654 crate::serial_println!(" (no VMA regions found at probed addresses)");
655 }
656}
657
658fn dump_page_fault_full(
663 stack_frame: &InterruptStackFrame,
664 error_code: PageFaultErrorCode,
665 fault_addr: Result<x86_64::VirtAddr, x86_64::addr::VirtAddrNotValid>,
666 task: &Option<alloc::sync::Arc<crate::process::task::Task>>,
667) -> ! {
668 use x86_64::registers::control::{Cr0, Cr3, Cr4};
669
670 let rip = stack_frame.instruction_pointer.as_u64();
671 let rsp = stack_frame.stack_pointer.as_u64();
672 let cs = stack_frame.code_segment.0;
673 let ss = stack_frame.stack_segment.0;
674 let rflags = stack_frame.cpu_flags.bits();
675 let fault_vaddr = fault_addr.as_ref().map(|v| v.as_u64()).unwrap_or(0);
676 let is_user = (cs & 3) == 3;
677
678 crate::serial_println!("\x1b[1;31m");
679 crate::serial_println!("╔══════════════════════════════════════════════════════════════════╗");
680 crate::serial_println!("║ KERNEL PAGE FAULT EXCEPTION ║");
681 crate::serial_println!(
682 "╚══════════════════════════════════════════════════════════════════╝\x1b[0m"
683 );
684
685 crate::serial_println!("\x1b[1;33m--- Error Code ---\x1b[0m");
687 crate::serial_println!(" Raw : {:#06x}", error_code.bits());
688 crate::serial_println!(
689 " Diagnostic : \x1b[1;31m{}\x1b[0m",
690 decode_error_code(error_code)
691 );
692 crate::serial_println!(
693 " PRESENT : {} | WRITE : {} | USER : {} | RSVD : {} | FETCH : {}",
694 error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) as u8,
695 error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE) as u8,
696 error_code.contains(PageFaultErrorCode::USER_MODE) as u8,
697 (error_code.bits() >> 3) & 1,
698 (error_code.bits() >> 4) & 1
699 );
700
701 crate::serial_println!("\x1b[1;33m--- Faulting Context ---\x1b[0m");
703 crate::serial_println!(" CR2 (addr) : \x1b[1;35m{:#018x}\x1b[0m", fault_vaddr);
704 crate::serial_println!(" RIP : \x1b[1;36m{:#018x}\x1b[0m", rip);
705 crate::serial_println!(" RSP : {:#018x}", rsp);
706 crate::serial_println!(
707 " CS : {:#06x} (ring={}{}) | SS : {:#06x}",
708 cs,
709 cs & 3,
710 if is_user { " USER" } else { " KERNEL" },
711 ss
712 );
713
714 let mut rf_str = [0u8; 64];
716 let mut rfp = 0usize;
717 for &(name, bit) in &[
718 ("CF", 1u64),
719 ("PF", 4),
720 ("AF", 16),
721 ("ZF", 64),
722 ("SF", 128),
723 ("TF", 256),
724 ("IF", 512),
725 ("DF", 1024),
726 ("OF", 2048),
727 ] {
728 if rflags & bit != 0 {
729 for &b in name.as_bytes() {
730 if rfp < rf_str.len() {
731 rf_str[rfp] = b;
732 rfp += 1;
733 }
734 }
735 if rfp < rf_str.len() {
736 rf_str[rfp] = b' ';
737 rfp += 1;
738 }
739 }
740 }
741 crate::serial_println!(
742 " RFLAGS : {:#018x} [{}]",
743 rflags,
744 core::str::from_utf8(&rf_str[..rfp]).unwrap_or("?")
745 );
746
747 crate::serial_println!("\x1b[1;33m--- Control Registers ---\x1b[0m");
749 let cr0 = Cr0::read_raw();
750 let (cr3_frame, cr3_flags) = Cr3::read();
751 let cr3_phys = cr3_frame.start_address().as_u64();
752 let cr4 = Cr4::read_raw();
753 let efer: u64 = unsafe { x86_64::registers::model_specific::Efer::read_raw() };
754 crate::serial_println!(" CR0 : {:#018x}", cr0);
755 crate::serial_println!(
756 " CR3 : {:#018x} (flags={:#x})",
757 cr3_phys,
758 cr3_flags.bits()
759 );
760 crate::serial_println!(" CR4 : {:#018x}", cr4);
761 crate::serial_println!(
762 " EFER : {:#018x} [{}{}{}]",
763 efer,
764 if efer & 1 != 0 { "SCE " } else { "" },
765 if efer & (1 << 8) != 0 { "LME " } else { "" },
766 if efer & (1 << 11) != 0 { "NXE" } else { "" }
767 );
768
769 crate::serial_println!("\x1b[1;33m--- CPU Context ---\x1b[0m");
771 crate::serial_println!(" LAPIC ID : {}", super::apic::lapic_id());
772 crate::serial_println!(" Ticks sched : {}", crate::process::scheduler::ticks());
773 crate::serial_println!(" HHDM offset : {:#x}", crate::memory::hhdm_offset());
774
775 crate::serial_println!("\x1b[1;33m--- Task Context ---\x1b[0m");
777 if let Some(ref t) = *task {
778 crate::serial_println!(
779 " ID={} PID={} TID={} TGID={} name=\"{}\" prio={:?} ticks={}",
780 t.id.as_u64(),
781 t.pid,
782 t.tid,
783 t.tgid,
784 t.name,
785 t.priority,
786 t.ticks.load(core::sync::atomic::Ordering::Relaxed)
787 );
788 let task_cr3: u64 = {
801 let hhdm = crate::memory::hhdm_offset();
802 let proc_ptr: u64 = alloc::sync::Arc::as_ptr(&t.process) as u64;
804 let as_cell_addr: u64 =
809 unsafe { (*alloc::sync::Arc::as_ptr(&t.process)).address_space.get() as u64 };
810 let as_inner_u64: u64 = match translate_via_raw_pt(as_cell_addr, cr3_phys, hhdm) {
813 Some(phys) => unsafe { *((phys + hhdm) as *const u64) },
814 None => 0,
815 };
816 if as_inner_u64 == 0 {
817 0u64
818 } else {
819 let as_data_ptr: u64 = as_inner_u64 + 2 * core::mem::size_of::<usize>() as u64;
824 match translate_via_raw_pt(as_data_ptr, cr3_phys, hhdm) {
826 Some(phys) => unsafe { *((phys + hhdm) as *const u64) },
827 None => 0,
828 }
829 }
830 };
831 if task_cr3 == 0 {
832 crate::serial_println!(
833 " Task CR3 : <unreadable — null/unmapped Arc<AddressSpace>>"
834 );
835 } else {
836 crate::serial_println!(
837 " Task CR3 : {:#018x}{}",
838 task_cr3,
839 if task_cr3 != cr3_phys {
840 " *** DIFFERS from hardware CR3! ***"
841 } else {
842 " (matches hardware CR3)"
843 }
844 );
845 }
846 } else {
847 crate::serial_println!(" (no current task — scheduler idle or unavailable)");
848 }
849
850 crate::serial_println!("\x1b[1;33m--- Memory Stats ---\x1b[0m");
852 if let Some(guard) = crate::memory::get_allocator().try_lock() {
853 if let Some(ref alloc) = *guard {
854 let (total, allocated) = alloc.page_totals();
855 let free = total.saturating_sub(allocated);
856 crate::serial_println!(
857 " Total={} pages ({} MiB) Alloc={} ({} MiB) Free={} ({} MiB)",
858 total,
859 total * 4 / 1024,
860 allocated,
861 allocated * 4 / 1024,
862 free,
863 free * 4 / 1024
864 );
865 let mut zones = [(0u8, 0u64, 0usize, 0usize); 4];
866 let n = alloc.zone_snapshot(&mut zones);
867 for i in 0..n {
868 let (zt, base, pages, ap) = zones[i];
869 crate::serial_println!(
870 " Zone {} ({}): base={:#x} pages={} alloc={} free={}",
871 i,
872 match zt {
873 0 => "DMA",
874 1 => "Normal",
875 2 => "High",
876 _ => "?",
877 },
878 base,
879 pages,
880 ap,
881 pages.saturating_sub(ap)
882 );
883 }
884 } else {
885 crate::serial_println!(" (allocator not initialized)");
886 }
887 } else {
888 crate::serial_println!(" (allocator lock contended — skipping)");
889 }
890
891 crate::serial_println!("\x1b[1;33m--- Code at RIP ({:#x}) ---\x1b[0m", rip);
893 dump_memory_bytes(rip, cr3_phys, 32, " ");
894
895 crate::serial_println!("\x1b[1;33m--- Stack Dump (RSP={:#x}) ---\x1b[0m", rsp);
897 dump_memory_bytes(rsp, cr3_phys, 128, " ");
898
899 crate::serial_println!(
901 "\x1b[1;33m--- Page Table Walk (CR2={:#x}, CR3={:#x}) ---\x1b[0m",
902 fault_vaddr,
903 cr3_phys
904 );
905 if fault_addr.is_ok() {
906 dump_page_table_walk(fault_vaddr, cr3_phys);
907 } else {
908 crate::serial_println!(" (CR2 is a non-canonical address: {:#x})", fault_vaddr);
909 }
910
911 if let Some(ref t) = *task {
913 crate::serial_println!("\x1b[1;33m--- VMA Regions Near Fault ---\x1b[0m");
914 let hhdm_vma = crate::memory::hhdm_offset();
919 let safe_as: Option<*const crate::memory::AddressSpace> = unsafe {
920 let as_cell_addr: u64 =
921 (*alloc::sync::Arc::as_ptr(&t.process)).address_space.get() as u64;
922 match translate_via_raw_pt(as_cell_addr, cr3_phys, hhdm_vma) {
923 Some(phys) => {
924 let as_inner_u64 = *((phys + hhdm_vma) as *const u64);
926 if as_inner_u64 == 0 {
927 None
928 } else {
929 let as_data_ptr = (as_inner_u64 + 2 * core::mem::size_of::<usize>() as u64)
931 as *const crate::memory::AddressSpace;
932 if translate_via_raw_pt(as_data_ptr as u64, cr3_phys, hhdm_vma).is_some() {
934 Some(as_data_ptr)
935 } else {
936 None
937 }
938 }
939 }
940 None => None,
941 }
942 };
943 if let Some(as_ptr) = safe_as {
944 let as_ref = unsafe { &*as_ptr };
946 dump_nearby_vma_regions(as_ref, fault_vaddr);
947 } else {
948 crate::serial_println!(" (AddressSpace unreadable — skipping VMA dump)");
949 }
950 }
951
952 crate::serial_println!(
953 "\x1b[1;31m╔══════════════════════════════════════════════════════════════════╗"
954 );
955 crate::serial_println!("║ END OF PAGE FAULT DUMP ║");
956 crate::serial_println!(
957 "╚══════════════════════════════════════════════════════════════════╝\x1b[0m"
958 );
959
960 panic!(
961 "PAGE FAULT: {} at {:#x}, RIP={:#x}, CR3={:#x}, err={:#x}",
962 decode_error_code(error_code),
963 fault_vaddr,
964 rip,
965 cr3_phys,
966 error_code.bits()
967 );
968}
969
970fn dump_user_pf_context(as_ref: &crate::memory::AddressSpace, rip: u64, rsp: u64) {
972 use x86_64::VirtAddr;
973
974 let hhdm = crate::memory::hhdm_offset();
975
976 if let Some(phys) = as_ref.translate(VirtAddr::new(rip)) {
977 let off = (rip & 0xfff) as usize;
978 let mut bytes = [0u8; 8];
979 unsafe {
981 let src = (phys.as_u64() - (rip & 0xfff) + hhdm + off as u64) as *const u8;
982 core::ptr::copy_nonoverlapping(src, bytes.as_mut_ptr(), bytes.len());
983 }
984 crate::serial_println!(
985 "[pagefault] ctx: rsp={:#x} rip-bytes={:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x}",
986 rsp,
987 bytes[0],
988 bytes[1],
989 bytes[2],
990 bytes[3],
991 bytes[4],
992 bytes[5],
993 bytes[6],
994 bytes[7],
995 );
996 } else {
997 crate::serial_println!("[pagefault] ctx: rsp={:#x} rip page unmapped", rsp);
998 }
999
1000 if let Some(phys) = as_ref.translate(VirtAddr::new(rsp)) {
1001 crate::serial_println!(
1002 "[pagefault] stack-top: rsp mapped (phys={:#x})",
1003 phys.as_u64()
1004 );
1005 } else {
1006 crate::serial_println!("[pagefault] stack-top: rsp unmapped");
1007 }
1008}
1009
1010extern "x86-interrupt" fn general_protection_fault_handler(
1012 stack_frame: InterruptStackFrame,
1013 error_code: u64,
1014) {
1015 let is_user = (stack_frame.code_segment.0 & 3) == 3;
1016 if is_user {
1017 if let Some(tid) = crate::process::current_task_id() {
1018 crate::serial_force_println!(
1019 "\x1b[31;1m[GPF]\x1b[0m USER tid={} rip={:#x} err={:#x}",
1020 tid,
1021 stack_frame.instruction_pointer.as_u64(),
1022 error_code
1023 );
1024 crate::silo::handle_user_fault(
1025 tid,
1026 crate::silo::SiloFaultReason::GeneralProtection,
1027 stack_frame.instruction_pointer.as_u64(),
1028 error_code,
1029 stack_frame.instruction_pointer.as_u64(),
1030 );
1031 return;
1032 }
1033 }
1034 crate::serial_force_println!(
1035 "\x1b[31;1m[GPF]\x1b[0m KERNEL rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1036 stack_frame.instruction_pointer.as_u64(),
1037 error_code,
1038 stack_frame.code_segment.0,
1039 stack_frame.stack_pointer.as_u64()
1040 );
1041 panic!("General protection fault");
1042}
1043
1044extern "x86-interrupt" fn stack_segment_fault_handler(
1046 stack_frame: InterruptStackFrame,
1047 error_code: u64,
1048) {
1049 crate::serial_force_println!(
1050 "\x1b[31;1m[STACK_FAULT]\x1b[0m rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1051 stack_frame.instruction_pointer.as_u64(),
1052 error_code,
1053 stack_frame.code_segment.0,
1054 stack_frame.stack_pointer.as_u64()
1055 );
1056 panic!("Stack segment fault");
1057}
1058
1059extern "x86-interrupt" fn double_fault_handler(
1061 stack_frame: InterruptStackFrame,
1062 error_code: u64,
1063) -> ! {
1064 crate::serial_force_println!(
1065 "\x1b[31;1m[DOUBLE_FAULT]\x1b[0m rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1066 stack_frame.instruction_pointer.as_u64(),
1067 error_code,
1068 stack_frame.code_segment.0,
1069 stack_frame.stack_pointer.as_u64()
1070 );
1071 panic!(
1072 "EXCEPTION: DOUBLE FAULT (error code: {:#x})\n{:#?}",
1073 error_code, stack_frame
1074 );
1075}
1076
1077extern "x86-interrupt" fn legacy_timer_handler(_stack_frame: InterruptStackFrame) {
1085 if crate::arch::x86_64::timer::is_apic_timer_active() {
1086 if super::apic::is_initialized() {
1088 super::apic::eoi();
1089 } else {
1090 pic::end_of_interrupt(0);
1091 }
1092 return;
1093 }
1094
1095 crate::process::scheduler::timer_tick();
1097 if super::apic::is_initialized() {
1102 super::apic::eoi();
1103 } else {
1104 pic::end_of_interrupt(0);
1105 }
1106
1107 crate::process::scheduler::maybe_preempt();
1110}
1111
1112extern "x86-interrupt" fn lapic_timer_handler(_stack_frame: InterruptStackFrame) {
1114 crate::process::scheduler::timer_tick();
1115 super::apic::eoi();
1116 crate::process::scheduler::maybe_preempt();
1117}
1118
1119extern "x86-interrupt" fn mouse_handler(_stack_frame: InterruptStackFrame) {
1121 crate::arch::x86_64::mouse::handle_irq();
1122 if super::apic::is_initialized() {
1123 super::apic::eoi();
1124 } else {
1125 pic::end_of_interrupt(12);
1126 }
1127}
1128
1129extern "x86-interrupt" fn keyboard_handler(_stack_frame: InterruptStackFrame) {
1131 let raw = unsafe { super::io::inb(0x60) };
1132 if let Some(ch) = super::keyboard_layout::handle_scancode_raw(raw) {
1134 crate::arch::x86_64::keyboard::add_to_buffer(ch);
1135 }
1136
1137 if super::apic::is_initialized() {
1138 super::apic::eoi();
1139 } else {
1140 pic::end_of_interrupt(1);
1141 }
1142}
1143
1144extern "x86-interrupt" fn spurious_handler(_stack_frame: InterruptStackFrame) {
1147 }
1149
1150extern "x86-interrupt" fn ahci_handler(_stack_frame: InterruptStackFrame) {
1155 crate::hardware::storage::ahci::handle_interrupt();
1156
1157 if super::apic::is_initialized() {
1158 super::apic::eoi();
1159 } else {
1160 let irq = crate::hardware::storage::ahci::AHCI_IRQ_LINE
1161 .load(core::sync::atomic::Ordering::Relaxed);
1162 pic::end_of_interrupt(irq);
1163 }
1164}
1165
1166extern "x86-interrupt" fn virtio_block_handler(_stack_frame: InterruptStackFrame) {
1171 crate::hardware::storage::virtio_block::handle_interrupt();
1173
1174 if super::apic::is_initialized() {
1176 super::apic::eoi();
1177 } else {
1178 let irq = crate::hardware::storage::virtio_block::get_irq();
1180 pic::end_of_interrupt(irq);
1181 }
1182}
1183
1184extern "x86-interrupt" fn resched_ipi_handler(_stack_frame: InterruptStackFrame) {
1194 super::apic::eoi();
1195 crate::process::scheduler::maybe_preempt();
1196}
1197
1198extern "x86-interrupt" fn tlb_shootdown_handler(_stack_frame: InterruptStackFrame) {
1200 super::tlb::tlb_shootdown_ipi_handler();
1202}