Skip to main content

strat9_kernel/arch/x86_64/
idt.rs

1//! Interrupt Descriptor Table (IDT) for Strat9-OS
2//!
3//! Handles CPU exceptions and hardware IRQs.
4//! Inspired by MaestroOS `idt.rs` and Redox-OS kernel.
5
6use super::{pic, tss};
7use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
8use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
9
10/// IRQ interrupt vector numbers (PIC1_OFFSET + IRQ number)
11#[allow(dead_code)]
12pub mod irq {
13    pub const TIMER: u8 = super::pic::PIC1_OFFSET; // IRQ0 = 0x20
14    pub const KEYBOARD: u8 = super::pic::PIC1_OFFSET + 1; // IRQ1 = 0x21
15    pub const CASCADE: u8 = super::pic::PIC1_OFFSET + 2; // IRQ2 = 0x22
16    pub const MOUSE: u8 = super::pic::PIC1_OFFSET + 12; // IRQ12 = 0x2C
17    pub const COM2: u8 = super::pic::PIC1_OFFSET + 3; // IRQ3 = 0x23
18    pub const COM1: u8 = super::pic::PIC1_OFFSET + 4; // IRQ4 = 0x24
19    pub const FLOPPY: u8 = super::pic::PIC1_OFFSET + 6; // IRQ6 = 0x26
20    pub const ATA_PRIMARY: u8 = super::pic::PIC1_OFFSET + 14; // IRQ14 = 0x2E
21    pub const ATA_SECONDARY: u8 = super::pic::PIC1_OFFSET + 15; // IRQ15 = 0x2F
22}
23
24/// Static IDT storage (must be 'static for load())
25static mut IDT_STORAGE: InterruptDescriptorTable = InterruptDescriptorTable::new();
26static IDT_STORAGE_LOCK: AtomicBool = AtomicBool::new(false);
27static USER_PF_TRACE_BUDGET: AtomicU32 = AtomicU32::new(64);
28
29#[inline]
30fn lock_idt_storage() {
31    while IDT_STORAGE_LOCK
32        .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
33        .is_err()
34    {
35        core::hint::spin_loop();
36    }
37}
38
39#[inline]
40fn unlock_idt_storage() {
41    IDT_STORAGE_LOCK.store(false, Ordering::Release);
42}
43
44pub fn init() {
45    lock_idt_storage();
46    unsafe {
47        let idt = &raw mut IDT_STORAGE;
48
49        // CPU exceptions
50        (*idt).breakpoint.set_handler_fn(breakpoint_handler);
51        (*idt).page_fault.set_handler_fn(page_fault_handler);
52        (*idt)
53            .general_protection_fault
54            .set_handler_fn(general_protection_fault_handler);
55        (*idt)
56            .stack_segment_fault
57            .set_handler_fn(stack_segment_fault_handler);
58        (*idt)
59            .non_maskable_interrupt
60            .set_handler_fn(non_maskable_interrupt_handler);
61        (*idt).invalid_opcode.set_handler_fn(invalid_opcode_handler);
62        (*idt)
63            .double_fault
64            .set_handler_fn(double_fault_handler)
65            .set_stack_index(tss::DOUBLE_FAULT_IST_INDEX);
66
67        // Hardware IRQs (PIC remapped to 0x20+)
68        let idt_ref = &mut *idt;
69        idt_ref[irq::TIMER as u8].set_handler_fn(legacy_timer_handler);
70        idt_ref[irq::KEYBOARD as u8].set_handler_fn(keyboard_handler);
71        idt_ref[irq::MOUSE as u8].set_handler_fn(mouse_handler);
72
73        // Spurious interrupt handler at vector 0xFF (APIC spurious vector)
74        idt_ref[0xFF_u8].set_handler_fn(spurious_handler);
75
76        // Cross-CPU reschedule IPI (vector 0xE0)
77        idt_ref[super::apic::IPI_RESCHED_VECTOR as u8].set_handler_fn(resched_ipi_handler);
78
79        // Cross-CPU TLB shootdown IPI (vector 0xF0)
80        idt_ref[super::apic::IPI_TLB_SHOOTDOWN_VECTOR as u8].set_handler_fn(tlb_shootdown_handler);
81
82        (*idt).load_unsafe();
83    }
84    unlock_idt_storage();
85
86    log::debug!("IDT initialized with {} entries", 256);
87}
88
89pub fn load() {
90    lock_idt_storage();
91    unsafe {
92        let idt = &raw const IDT_STORAGE;
93        (*idt).load_unsafe();
94    }
95    unlock_idt_storage();
96}
97
98/// Register the Local APIC timer IRQ vector to use the timer handler.
99pub fn register_lapic_timer_vector(vector: u8) {
100    lock_idt_storage();
101    unsafe {
102        let idt = &raw mut IDT_STORAGE;
103        (&mut *idt)[vector].set_handler_fn(lapic_timer_handler);
104        (*idt).load_unsafe();
105    }
106    unlock_idt_storage();
107}
108
109/// Register the AHCI storage controller IRQ handler.
110///
111/// Called after AHCI initialisation once the PCI interrupt line is known.
112pub fn register_ahci_irq(irq: u8) {
113    let vector = if irq < 16 {
114        super::pic::PIC1_OFFSET + irq
115    } else {
116        irq
117    };
118
119    lock_idt_storage();
120    unsafe {
121        let idt = &raw mut IDT_STORAGE;
122        (&mut *idt)[vector].set_handler_fn(ahci_handler);
123        (*idt).load_unsafe();
124    }
125    unlock_idt_storage();
126    log::info!("AHCI IRQ {} registered on vector {:#x}", irq, vector);
127}
128
129/// Register the VirtIO block device IRQ handler
130///
131/// Called after VirtIO block device initialization to route the device's
132/// IRQ to the correct handler.
133pub fn register_virtio_block_irq(irq: u8) {
134    // PCI INTx gives an IRQ line number (typically 0..15), while IDT expects
135    // a vector number. Map legacy IRQ lines to the remapped interrupt vectors.
136    let vector = if irq < 16 {
137        super::pic::PIC1_OFFSET + irq
138    } else {
139        irq
140    };
141
142    lock_idt_storage();
143    unsafe {
144        let idt = &raw mut IDT_STORAGE;
145        (&mut *idt)[vector].set_handler_fn(virtio_block_handler);
146        (*idt).load_unsafe();
147    }
148    unlock_idt_storage();
149    log::info!("VirtIO-blk IRQ {} registered on vector {:#x}", irq, vector);
150}
151
152// =============================================
153// CPU Exception Handlers
154// =============================================
155
156/// Performs the breakpoint handler operation.
157extern "x86-interrupt" fn breakpoint_handler(stack_frame: InterruptStackFrame) {
158    log::warn!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
159}
160
161/// Performs the invalid opcode handler operation.
162extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: InterruptStackFrame) {
163    let is_user = (stack_frame.code_segment.0 & 3) == 3;
164    if is_user {
165        if let Some(tid) = crate::process::current_task_id() {
166            crate::silo::handle_user_fault(
167                tid,
168                crate::silo::SiloFaultReason::InvalidOpcode,
169                stack_frame.instruction_pointer.as_u64(),
170                0,
171                stack_frame.instruction_pointer.as_u64(),
172            );
173            return;
174        }
175    }
176    log::error!("EXCEPTION: INVALID OPCODE\n{:#?}", stack_frame);
177    panic!("Invalid opcode");
178}
179
180extern "x86-interrupt" fn non_maskable_interrupt_handler(_stack_frame: InterruptStackFrame) {
181    crate::arch::x86_64::cli();
182    loop {
183        crate::arch::x86_64::hlt();
184    }
185}
186
187/// Performs the page fault handler operation.
188extern "x86-interrupt" fn page_fault_handler(
189    stack_frame: InterruptStackFrame,
190    error_code: PageFaultErrorCode,
191) {
192    use x86_64::registers::control::{Cr2, Cr3};
193    let is_user = (stack_frame.code_segment.0 & 3) == 3;
194
195    // Get the faulting address
196    let fault_addr = Cr2::read();
197    let fault_vaddr = fault_addr.as_ref().map(|v| v.as_u64()).unwrap_or(0);
198    let rip = stack_frame.instruction_pointer.as_u64();
199    let user_rsp = stack_frame.stack_pointer.as_u64();
200
201    let mut trace_ctx = crate::trace::TraceTaskCtx::empty();
202    if is_user {
203        if let Some(task) = crate::process::current_task_clone() {
204            let as_ref = unsafe { &*task.process.address_space.get() };
205            trace_ctx = crate::trace::TraceTaskCtx {
206                task_id: task.id.as_u64(),
207                pid: task.pid,
208                tid: task.tid,
209                cr3: as_ref.cr3().as_u64(),
210            };
211        }
212    }
213
214    let do_pf_trace = if is_user {
215        USER_PF_TRACE_BUDGET
216            .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| {
217                if v > 0 {
218                    Some(v - 1)
219                } else {
220                    None
221                }
222            })
223            .is_ok()
224    } else {
225        true
226    };
227    if do_pf_trace {
228        crate::trace_mem!(
229            crate::trace::category::MEM_PF,
230            crate::trace::TraceKind::MemPageFault,
231            error_code.bits() as u64,
232            trace_ctx,
233            rip,
234            fault_vaddr,
235            user_rsp,
236            0
237        );
238    }
239
240    // Try COW only for write-protection faults on already-present pages.
241    // For not-present faults, demand paging should run first.
242    if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION)
243        && error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE)
244        && is_user
245    {
246        if let Some(task) = crate::process::current_task_clone() {
247            let address_space = unsafe { &*task.process.address_space.get() };
248            if let Ok(vaddr) = fault_addr {
249                match crate::syscall::fork::handle_cow_fault(vaddr.as_u64(), address_space) {
250                    Ok(()) => {
251                        crate::trace_mem!(
252                            crate::trace::category::MEM_COW,
253                            crate::trace::TraceKind::MemCow,
254                            1,
255                            trace_ctx,
256                            rip,
257                            vaddr.as_u64(),
258                            0,
259                            0
260                        );
261                        return;
262                    }
263                    Err(reason) => {
264                        crate::trace_mem!(
265                            crate::trace::category::MEM_COW,
266                            crate::trace::TraceKind::MemCow,
267                            0,
268                            trace_ctx,
269                            rip,
270                            vaddr.as_u64(),
271                            0,
272                            0
273                        );
274                        crate::serial_println!(
275                            "\x1b[31m[pagefault] COW resolve failed\x1b[0m: task={} \x1b[36mpid={}\x1b[0m tid={} \x1b[35maddr={:#x}\x1b[0m \x1b[35mrip={:#x}\x1b[0m err={}",
276                            task.id.as_u64(),
277                            task.pid,
278                            task.tid,
279                            vaddr.as_u64(),
280                            stack_frame.instruction_pointer.as_u64(),
281                            reason
282                        );
283                    }
284                }
285            }
286        }
287    }
288
289    if is_user {
290        if let Some(task) = crate::process::current_task_clone() {
291            let address_space = unsafe { &*task.process.address_space.get() };
292            if let Ok(vaddr) = fault_addr {
293                // FORCE OUTPUT for user fault - bypasses normal logging mutexes
294                crate::serial_force_println!(
295                    "\x1b[33m[pagefault] USER fault\x1b[0m: tid={} rip={:#x} addr={:#x} err={:#x}",
296                    task.tid,
297                    rip,
298                    vaddr.as_u64(),
299                    error_code.bits()
300                );
301
302                match address_space.handle_fault(vaddr.as_u64()) {
303                    Ok(()) => {
304                        crate::serial_force_println!(
305                            "\x1b[32m[pagefault] USER fault resolved\x1b[0m: tid={} addr={:#x}",
306                            task.tid,
307                            vaddr.as_u64()
308                        );
309                        return;
310                    }
311                    Err(e) => {
312                        crate::serial_force_println!(
313                            "\x1b[31m[pagefault] USER fault resolution FAILED\x1b[0m: tid={} addr={:#x} err={:?}",
314                            task.tid,
315                            vaddr.as_u64(),
316                            e
317                        );
318                        dump_user_pf_context(address_space, rip, user_rsp);
319                    }
320                }
321            }
322        }
323    }
324
325    if is_user {
326        if let Some(tid) = crate::process::current_task_id() {
327            crate::silo::handle_user_fault(
328                tid,
329                crate::silo::SiloFaultReason::PageFault,
330                fault_addr.map(|v| v.as_u64()).unwrap_or(0),
331                error_code.bits() as u64,
332                stack_frame.instruction_pointer.as_u64(),
333            );
334            return;
335        }
336    }
337
338    // Capture current task (non-blocking, safe from IRQ context) for the diagnostic dump.
339    let task_snap = crate::process::scheduler::current_task_clone_try();
340    dump_page_fault_full(&stack_frame, error_code, fault_addr, &task_snap);
341}
342
343// =============================================================================
344// CRITICAL: Full page fault diagnostic dump
345//
346// Invoked for every non-recoverable page fault (kernel or unhandled user).
347// Designed to be deadlock-safe:
348//   - Uses serial_println! (direct UART) instead of the log framework, which
349//     may itself allocate or acquire locks.
350//   - All memory reads go through translate_via_raw_pt so no unmapped address
351//     is ever dereferenced.
352//   - The buddy allocator lock is acquired with try_lock (non-blocking) for
353//     memory statistics.
354//   - Uses current_task_clone_try (non-blocking) instead of current_task_clone.
355// =============================================================================
356
357/// Decodes `PageFaultErrorCode` bits into a human-readable string.
358fn decode_error_code(ec: PageFaultErrorCode) -> &'static str {
359    let p = ec.contains(PageFaultErrorCode::PROTECTION_VIOLATION);
360    let w = ec.contains(PageFaultErrorCode::CAUSED_BY_WRITE);
361    let u = ec.contains(PageFaultErrorCode::USER_MODE);
362    match (p, w, u) {
363        (false, false, false) => "kernel read of non-present page",
364        (false, true, false) => "kernel write to non-present page",
365        (false, false, true) => "user read of non-present page",
366        (false, true, true) => "user write to non-present page",
367        (true, false, false) => "kernel read protection violation",
368        (true, true, false) => "kernel write protection violation (COW / RO page)",
369        (true, false, true) => "user read protection violation (NX / supervisor-only)",
370        (true, true, true) => "user write protection violation (COW / RO page)",
371    }
372}
373
374/// Formats page table entry flags into a short human-readable byte string.
375fn format_pte_flags(entry: u64) -> [u8; 32] {
376    let mut buf = [b' '; 32];
377    let mut pos = 0usize;
378    let flags: &[(&str, u64)] = &[
379        ("P", 1 << 0),
380        ("RW", 1 << 1),
381        ("US", 1 << 2),
382        ("PWT", 1 << 3),
383        ("PCD", 1 << 4),
384        ("A", 1 << 5),
385        ("D", 1 << 6),
386        ("PS", 1 << 7),
387        ("G", 1 << 8),
388        ("NX", 1 << 63),
389    ];
390    for &(name, bit) in flags {
391        if entry & bit != 0 {
392            for &b in name.as_bytes() {
393                if pos < buf.len() {
394                    buf[pos] = b;
395                    pos += 1;
396                }
397            }
398            if pos < buf.len() {
399                buf[pos] = b'|';
400                pos += 1;
401            }
402        }
403    }
404    if pos > 0 && buf[pos - 1] == b'|' {
405        buf[pos - 1] = b' ';
406    }
407    buf
408}
409
410/// Translates a virtual address to a physical address via a manual 4-level
411/// page table walk.  Returns `Some(phys)` or `None` if any level is absent.
412///
413/// # SAFETY
414/// Read-only access to page tables through the HHDM mapping.
415/// All intermediate addresses are derived from table entries — no pointer
416/// originating from user-controlled data is ever dereferenced.
417fn translate_via_raw_pt(vaddr: u64, cr3_phys: u64, hhdm: u64) -> Option<u64> {
418    unsafe {
419        let l4_ptr = (cr3_phys + hhdm) as *const u64;
420        let l4e = *l4_ptr.add(((vaddr >> 39) & 0x1FF) as usize);
421        if l4e & 1 == 0 {
422            return None;
423        }
424
425        let l3_ptr = ((l4e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
426        let l3e = *l3_ptr.add(((vaddr >> 30) & 0x1FF) as usize);
427        if l3e & 1 == 0 {
428            return None;
429        }
430        if l3e & 0x80 != 0 {
431            return Some((l3e & 0x000F_FFFF_C000_0000) + (vaddr & 0x3FFF_FFFF));
432        }
433
434        let l2_ptr = ((l3e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
435        let l2e = *l2_ptr.add(((vaddr >> 21) & 0x1FF) as usize);
436        if l2e & 1 == 0 {
437            return None;
438        }
439        if l2e & 0x80 != 0 {
440            return Some((l2e & 0x000F_FFFF_FFE0_0000) + (vaddr & 0x1F_FFFF));
441        }
442
443        let l1_ptr = ((l2e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
444        let l1e = *l1_ptr.add(((vaddr >> 12) & 0x1FF) as usize);
445        if l1e & 1 == 0 {
446            return None;
447        }
448        Some((l1e & 0x000F_FFFF_FFFF_F000) + (vaddr & 0xFFF))
449    }
450}
451
452/// Hex + ASCII dump of `count` bytes at virtual address `vaddr`.
453/// Each page boundary is translated through the raw page tables.
454fn dump_memory_bytes(vaddr: u64, cr3_phys: u64, count: usize, prefix: &str) {
455    let hhdm = crate::memory::hhdm_offset();
456    let mut offset = 0usize;
457    while offset < count {
458        let cur_va = vaddr.wrapping_add(offset as u64);
459        let page_off = (cur_va & 0xFFF) as usize;
460        let chunk = core::cmp::min(count - offset, 0x1000 - page_off);
461        let Some(phys) = translate_via_raw_pt(cur_va, cr3_phys, hhdm) else {
462            crate::serial_println!("{}(page {:#x} not mapped)", prefix, cur_va);
463            offset += chunk;
464            continue;
465        };
466        // SAFETY: read-only access to a valid physical page through the HHDM mapping.
467        let src = (phys - (cur_va & 0xFFF) + hhdm) as *const u8;
468        let mut line_off = 0usize;
469        while line_off < chunk {
470            let ll = core::cmp::min(16, chunk - line_off);
471            let line_va = cur_va.wrapping_add(line_off as u64);
472            let mut hex = [0u8; 48];
473            let mut asc = [b'.'; 16];
474            for i in 0..ll {
475                let byte = unsafe { *src.add(page_off + line_off + i) };
476                let hi = byte >> 4;
477                let lo = byte & 0xF;
478                hex[i * 3] = if hi < 10 { b'0' + hi } else { b'a' + hi - 10 };
479                hex[i * 3 + 1] = if lo < 10 { b'0' + lo } else { b'a' + lo - 10 };
480                hex[i * 3 + 2] = b' ';
481                if byte >= 0x20 && byte < 0x7F {
482                    asc[i] = byte;
483                }
484            }
485            for i in ll..16 {
486                hex[i * 3] = b' ';
487                hex[i * 3 + 1] = b' ';
488                hex[i * 3 + 2] = b' ';
489            }
490            crate::serial_println!(
491                "{}{:#018x}: {} |{}|",
492                prefix,
493                line_va,
494                core::str::from_utf8(&hex[..48]).unwrap_or("???"),
495                core::str::from_utf8(&asc[..ll]).unwrap_or("???")
496            );
497            line_off += ll;
498        }
499        offset += chunk;
500    }
501}
502
503/// Detailed page table walk with flag decoding at every level.
504fn dump_page_table_walk(vaddr: u64, cr3_phys: u64) {
505    let hhdm = crate::memory::hhdm_offset();
506    let l4_idx = ((vaddr >> 39) & 0x1FF) as usize;
507    let l3_idx = ((vaddr >> 30) & 0x1FF) as usize;
508    let l2_idx = ((vaddr >> 21) & 0x1FF) as usize;
509    let l1_idx = ((vaddr >> 12) & 0x1FF) as usize;
510
511    // SAFETY: read-only access through the HHDM mapping for diagnostic purposes.
512    unsafe {
513        let l4_ptr = (cr3_phys + hhdm) as *const u64;
514        let l4e = *l4_ptr.add(l4_idx);
515        let f = format_pte_flags(l4e);
516        crate::serial_println!(
517            "  PML4[{:>3}] = {:#018x}  phys={:#014x}  [{}]",
518            l4_idx,
519            l4e,
520            l4e & 0x000F_FFFF_FFFF_F000,
521            core::str::from_utf8(&f).unwrap_or("?").trim()
522        );
523        if l4e & 1 == 0 {
524            crate::serial_println!("  \x1b[1;31m╰→ STOP: PML4 not present\x1b[0m");
525            return;
526        }
527
528        let l3_ptr = ((l4e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
529        let l3e = *l3_ptr.add(l3_idx);
530        let f = format_pte_flags(l3e);
531        crate::serial_println!(
532            "  PDPT[{:>3}] = {:#018x}  phys={:#014x}  [{}]",
533            l3_idx,
534            l3e,
535            l3e & 0x000F_FFFF_FFFF_F000,
536            core::str::from_utf8(&f).unwrap_or("?").trim()
537        );
538        if l3e & 1 == 0 {
539            crate::serial_println!("  \x1b[1;31m╰→ STOP: PDPT not present\x1b[0m");
540            return;
541        }
542        if l3e & 0x80 != 0 {
543            crate::serial_println!(
544                "  ╰→ 1 GiB huge page → phys {:#x}",
545                l3e & 0x000F_FFFF_C000_0000
546            );
547            return;
548        } // 1 GiB
549
550        let l2_ptr = ((l3e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
551        let l2e = *l2_ptr.add(l2_idx);
552        let f = format_pte_flags(l2e);
553        crate::serial_println!(
554            "  PD  [{:>3}] = {:#018x}  phys={:#014x}  [{}]",
555            l2_idx,
556            l2e,
557            l2e & 0x000F_FFFF_FFFF_F000,
558            core::str::from_utf8(&f).unwrap_or("?").trim()
559        );
560        if l2e & 1 == 0 {
561            crate::serial_println!("  \x1b[1;31m╰→ STOP: PD not present\x1b[0m");
562            return;
563        }
564        if l2e & 0x80 != 0 {
565            crate::serial_println!(
566                "  ╰→ 2 MiB huge page → phys {:#x}",
567                l2e & 0x000F_FFFF_FFE0_0000
568            );
569            return;
570        } // 2 MiB
571
572        let l1_ptr = ((l2e & 0x000F_FFFF_FFFF_F000) + hhdm) as *const u64;
573        let l1e = *l1_ptr.add(l1_idx);
574        let f = format_pte_flags(l1e);
575        crate::serial_println!(
576            "  PT  [{:>3}] = {:#018x}  phys={:#014x}  [{}]",
577            l1_idx,
578            l1e,
579            l1e & 0x000F_FFFF_FFFF_F000,
580            core::str::from_utf8(&f).unwrap_or("?").trim()
581        );
582        if l1e & 1 == 0 {
583            crate::serial_println!("  \x1b[1;31m╰→ STOP: PT not present\x1b[0m");
584        } else {
585            crate::serial_println!(
586                "  \x1b[1;32m╰→ PAGE PRESENT\x1b[0m → phys {:#x} (check RW/US/NX flags)",
587                l1e & 0x000F_FFFF_FFFF_F000
588            );
589        }
590        // Neighbouring PT entries for context
591        crate::serial_println!("  --- Neighbouring PT entries ---");
592        let start = if l1_idx >= 2 { l1_idx - 2 } else { 0 };
593        for i in start..core::cmp::min(l1_idx + 3, 512) {
594            let e = *l1_ptr.add(i);
595            if e != 0 {
596                let f = format_pte_flags(e);
597                crate::serial_println!(
598                    "    PT[{:>3}] = {:#018x}  [{}]{}",
599                    i,
600                    e,
601                    core::str::from_utf8(&f).unwrap_or("?").trim(),
602                    if i == l1_idx { " <<<" } else { "" }
603                );
604            }
605        }
606    }
607}
608
609/// Dumps VMA regions near the faulting address.
610fn dump_nearby_vma_regions(as_ref: &crate::memory::AddressSpace, fault_vaddr: u64) {
611    let page_start = fault_vaddr & !0xFFF;
612    let probes = [
613        page_start,
614        fault_vaddr & !0x1F_FFFF,
615        fault_vaddr & !0x3FFF_FFFF,
616        0x0000_0001_0000_0000,
617        0x0000_0000_0040_0000,
618        0x0000_7FFF_F000_0000,
619    ];
620    let mut found_any = false;
621    for &p in &probes {
622        if let Some(vma) = as_ref.region_by_start(p) {
623            let end = vma.start + (vma.page_count as u64) * vma.page_size.bytes();
624            let hit = fault_vaddr >= vma.start && fault_vaddr < end;
625            crate::serial_println!(
626                "  VMA {:#014x}..{:#014x}  pages={:<5}  type={:?}  flags={:?}  pgsz={:?}{}",
627                vma.start,
628                end,
629                vma.page_count,
630                vma.vma_type,
631                vma.flags,
632                vma.page_size,
633                if hit {
634                    "  \x1b[1;32m<<< FAULT\x1b[0m"
635                } else {
636                    ""
637                }
638            );
639            found_any = true;
640        }
641    }
642    if as_ref.has_mapping_in_range(page_start, 0x1000) {
643        crate::serial_println!(
644            "  Note: fault page {:#x} IS within a tracked mapping range",
645            page_start
646        );
647    } else {
648        crate::serial_println!(
649            "  Note: fault page {:#x} is NOT within any tracked mapping range",
650            page_start
651        );
652    }
653    if !found_any {
654        crate::serial_println!("  (no VMA regions found at probed addresses)");
655    }
656}
657
658/// Full diagnostic dump for a non-recoverable page fault.
659///
660/// Uses `serial_println!` directly (lock-free UART) to avoid any deadlock
661/// with the log framework or the heap allocator.
662fn dump_page_fault_full(
663    stack_frame: &InterruptStackFrame,
664    error_code: PageFaultErrorCode,
665    fault_addr: Result<x86_64::VirtAddr, x86_64::addr::VirtAddrNotValid>,
666    task: &Option<alloc::sync::Arc<crate::process::task::Task>>,
667) -> ! {
668    use x86_64::registers::control::{Cr0, Cr3, Cr4};
669
670    let rip = stack_frame.instruction_pointer.as_u64();
671    let rsp = stack_frame.stack_pointer.as_u64();
672    let cs = stack_frame.code_segment.0;
673    let ss = stack_frame.stack_segment.0;
674    let rflags = stack_frame.cpu_flags.bits();
675    let fault_vaddr = fault_addr.as_ref().map(|v| v.as_u64()).unwrap_or(0);
676    let is_user = (cs & 3) == 3;
677
678    crate::serial_println!("\x1b[1;31m");
679    crate::serial_println!("╔══════════════════════════════════════════════════════════════════╗");
680    crate::serial_println!("║                  KERNEL PAGE FAULT EXCEPTION                    ║");
681    crate::serial_println!(
682        "╚══════════════════════════════════════════════════════════════════╝\x1b[0m"
683    );
684
685    // --- Error code ---
686    crate::serial_println!("\x1b[1;33m--- Error Code ---\x1b[0m");
687    crate::serial_println!("  Raw         : {:#06x}", error_code.bits());
688    crate::serial_println!(
689        "  Diagnostic  : \x1b[1;31m{}\x1b[0m",
690        decode_error_code(error_code)
691    );
692    crate::serial_println!(
693        "  PRESENT     : {} | WRITE : {} | USER : {} | RSVD : {} | FETCH : {}",
694        error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) as u8,
695        error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE) as u8,
696        error_code.contains(PageFaultErrorCode::USER_MODE) as u8,
697        (error_code.bits() >> 3) & 1,
698        (error_code.bits() >> 4) & 1
699    );
700
701    // --- Faulting context ---
702    crate::serial_println!("\x1b[1;33m--- Faulting Context ---\x1b[0m");
703    crate::serial_println!("  CR2 (addr)  : \x1b[1;35m{:#018x}\x1b[0m", fault_vaddr);
704    crate::serial_println!("  RIP         : \x1b[1;36m{:#018x}\x1b[0m", rip);
705    crate::serial_println!("  RSP         : {:#018x}", rsp);
706    crate::serial_println!(
707        "  CS          : {:#06x}  (ring={}{}) | SS : {:#06x}",
708        cs,
709        cs & 3,
710        if is_user { " USER" } else { " KERNEL" },
711        ss
712    );
713
714    // RFLAGS décodé
715    let mut rf_str = [0u8; 64];
716    let mut rfp = 0usize;
717    for &(name, bit) in &[
718        ("CF", 1u64),
719        ("PF", 4),
720        ("AF", 16),
721        ("ZF", 64),
722        ("SF", 128),
723        ("TF", 256),
724        ("IF", 512),
725        ("DF", 1024),
726        ("OF", 2048),
727    ] {
728        if rflags & bit != 0 {
729            for &b in name.as_bytes() {
730                if rfp < rf_str.len() {
731                    rf_str[rfp] = b;
732                    rfp += 1;
733                }
734            }
735            if rfp < rf_str.len() {
736                rf_str[rfp] = b' ';
737                rfp += 1;
738            }
739        }
740    }
741    crate::serial_println!(
742        "  RFLAGS      : {:#018x}  [{}]",
743        rflags,
744        core::str::from_utf8(&rf_str[..rfp]).unwrap_or("?")
745    );
746
747    // --- Control registers ---
748    crate::serial_println!("\x1b[1;33m--- Control Registers ---\x1b[0m");
749    let cr0 = Cr0::read_raw();
750    let (cr3_frame, cr3_flags) = Cr3::read();
751    let cr3_phys = cr3_frame.start_address().as_u64();
752    let cr4 = Cr4::read_raw();
753    let efer: u64 = unsafe { x86_64::registers::model_specific::Efer::read_raw() };
754    crate::serial_println!("  CR0         : {:#018x}", cr0);
755    crate::serial_println!(
756        "  CR3         : {:#018x}  (flags={:#x})",
757        cr3_phys,
758        cr3_flags.bits()
759    );
760    crate::serial_println!("  CR4         : {:#018x}", cr4);
761    crate::serial_println!(
762        "  EFER        : {:#018x}  [{}{}{}]",
763        efer,
764        if efer & 1 != 0 { "SCE " } else { "" },
765        if efer & (1 << 8) != 0 { "LME " } else { "" },
766        if efer & (1 << 11) != 0 { "NXE" } else { "" }
767    );
768
769    // --- CPU context ---
770    crate::serial_println!("\x1b[1;33m--- CPU Context ---\x1b[0m");
771    crate::serial_println!("  LAPIC ID    : {}", super::apic::lapic_id());
772    crate::serial_println!("  Ticks sched : {}", crate::process::scheduler::ticks());
773    crate::serial_println!("  HHDM offset : {:#x}", crate::memory::hhdm_offset());
774
775    // --- Task context ---
776    crate::serial_println!("\x1b[1;33m--- Task Context ---\x1b[0m");
777    if let Some(ref t) = *task {
778        crate::serial_println!(
779            "  ID={} PID={} TID={} TGID={} name=\"{}\" prio={:?} ticks={}",
780            t.id.as_u64(),
781            t.pid,
782            t.tid,
783            t.tgid,
784            t.name,
785            t.priority,
786            t.ticks.load(core::sync::atomic::Ordering::Relaxed)
787        );
788        // SAFETY: Read task CR3 safely using the hardware page-table walker
789        // (translate_via_raw_pt) to prevent recursive page faults if the
790        // process's Arc<AddressSpace> is partially initialized or corrupted.
791        //
792        // Chain: &t.process → Arc<Process> data ptr (Arc::as_ptr)
793        //      → (*process).address_space.get() → *mut Arc<AddressSpace>
794        //      → Arc::as_ptr(arc_as) → *const AddressSpace
795        //      → (*addr_space).cr3_phys
796        //
797        // Each step uses translate_via_raw_pt to verify the pointer is mapped
798        // before dereferencing, using the hardware CR3 (cr3_phys) which always
799        // maps the kernel's HHDM region.
800        let task_cr3: u64 = {
801            let hhdm = crate::memory::hhdm_offset();
802            // Step 1: Arc<Process> data (Arc::as_ptr is always valid for a live Arc)
803            let proc_ptr: u64 = alloc::sync::Arc::as_ptr(&t.process) as u64;
804            // Step 2: address_space field in Process = SyncUnsafeCell whose .get()
805            // returns a raw ptr into the Process data — always valid for a live Process.
806            // However, reading the Arc<AddressSpace> *value* from that pointer may
807            // fault if the memory is unmapped, so we use translate_via_raw_pt.
808            let as_cell_addr: u64 =
809                unsafe { (*alloc::sync::Arc::as_ptr(&t.process)).address_space.get() as u64 };
810            // Step 3: read the 8-byte Arc<AddressSpace> inner pointer from as_cell_addr
811            // via raw page table walk with current hardware CR3.
812            let as_inner_u64: u64 = match translate_via_raw_pt(as_cell_addr, cr3_phys, hhdm) {
813                Some(phys) => unsafe { *((phys + hhdm) as *const u64) },
814                None => 0,
815            };
816            if as_inner_u64 == 0 {
817                0u64
818            } else {
819                // as_inner_u64 is the NonNull ptr inside Arc<AddressSpace>
820                // = pointer to ArcInner<AddressSpace>.
821                // ArcInner = strong(8) + weak(8) + data(AddressSpace).
822                // So AddressSpace data is at as_inner_u64 + 16.
823                let as_data_ptr: u64 = as_inner_u64 + 2 * core::mem::size_of::<usize>() as u64;
824                // cr3_phys is the first field of AddressSpace (PhysAddr = u64, 8 bytes).
825                match translate_via_raw_pt(as_data_ptr, cr3_phys, hhdm) {
826                    Some(phys) => unsafe { *((phys + hhdm) as *const u64) },
827                    None => 0,
828                }
829            }
830        };
831        if task_cr3 == 0 {
832            crate::serial_println!(
833                "  Task CR3    : <unreadable — null/unmapped Arc<AddressSpace>>"
834            );
835        } else {
836            crate::serial_println!(
837                "  Task CR3    : {:#018x}{}",
838                task_cr3,
839                if task_cr3 != cr3_phys {
840                    " *** DIFFERS from hardware CR3! ***"
841                } else {
842                    " (matches hardware CR3)"
843                }
844            );
845        }
846    } else {
847        crate::serial_println!("  (no current task — scheduler idle or unavailable)");
848    }
849
850    // --- Memory statistics ---
851    crate::serial_println!("\x1b[1;33m--- Memory Stats ---\x1b[0m");
852    if let Some(guard) = crate::memory::get_allocator().try_lock() {
853        if let Some(ref alloc) = *guard {
854            let (total, allocated) = alloc.page_totals();
855            let free = total.saturating_sub(allocated);
856            crate::serial_println!(
857                "  Total={} pages ({} MiB)  Alloc={} ({} MiB)  Free={} ({} MiB)",
858                total,
859                total * 4 / 1024,
860                allocated,
861                allocated * 4 / 1024,
862                free,
863                free * 4 / 1024
864            );
865            let mut zones = [(0u8, 0u64, 0usize, 0usize); 4];
866            let n = alloc.zone_snapshot(&mut zones);
867            for i in 0..n {
868                let (zt, base, pages, ap) = zones[i];
869                crate::serial_println!(
870                    "    Zone {} ({}): base={:#x} pages={} alloc={} free={}",
871                    i,
872                    match zt {
873                        0 => "DMA",
874                        1 => "Normal",
875                        2 => "High",
876                        _ => "?",
877                    },
878                    base,
879                    pages,
880                    ap,
881                    pages.saturating_sub(ap)
882                );
883            }
884        } else {
885            crate::serial_println!("  (allocator not initialized)");
886        }
887    } else {
888        crate::serial_println!("  (allocator lock contended — skipping)");
889    }
890
891    // --- Code bytes at RIP ---
892    crate::serial_println!("\x1b[1;33m--- Code at RIP ({:#x}) ---\x1b[0m", rip);
893    dump_memory_bytes(rip, cr3_phys, 32, "  ");
894
895    // --- Stack dump ---
896    crate::serial_println!("\x1b[1;33m--- Stack Dump (RSP={:#x}) ---\x1b[0m", rsp);
897    dump_memory_bytes(rsp, cr3_phys, 128, "  ");
898
899    // --- Page table walk ---
900    crate::serial_println!(
901        "\x1b[1;33m--- Page Table Walk (CR2={:#x}, CR3={:#x}) ---\x1b[0m",
902        fault_vaddr,
903        cr3_phys
904    );
905    if fault_addr.is_ok() {
906        dump_page_table_walk(fault_vaddr, cr3_phys);
907    } else {
908        crate::serial_println!("  (CR2 is a non-canonical address: {:#x})", fault_vaddr);
909    }
910
911    // --- VMA regions near fault ---
912    if let Some(ref t) = *task {
913        crate::serial_println!("\x1b[1;33m--- VMA Regions Near Fault ---\x1b[0m");
914        // SAFETY: Use the same safe ptr-chain read strategy as the Task CR3 section above:
915        // Arc::as_ptr gives a valid *const AddressSpace if the Arc is alive, but the
916        // Arc<AddressSpace> stored inside the SyncUnsafeCell might be corrupted.
917        // We validate via translate_via_raw_pt before reading the inner ptr.
918        let hhdm_vma = crate::memory::hhdm_offset();
919        let safe_as: Option<*const crate::memory::AddressSpace> = unsafe {
920            let as_cell_addr: u64 =
921                (*alloc::sync::Arc::as_ptr(&t.process)).address_space.get() as u64;
922            match translate_via_raw_pt(as_cell_addr, cr3_phys, hhdm_vma) {
923                Some(phys) => {
924                    // Read the Arc<AddressSpace> inner pointer (a NonNull ptr stored at this phys)
925                    let as_inner_u64 = *((phys + hhdm_vma) as *const u64);
926                    if as_inner_u64 == 0 {
927                        None
928                    } else {
929                        // ArcInner<AddressSpace>.data at +16
930                        let as_data_ptr = (as_inner_u64 + 2 * core::mem::size_of::<usize>() as u64)
931                            as *const crate::memory::AddressSpace;
932                        // Validate the AddressSpace pointer is mapped before returning it
933                        if translate_via_raw_pt(as_data_ptr as u64, cr3_phys, hhdm_vma).is_some() {
934                            Some(as_data_ptr)
935                        } else {
936                            None
937                        }
938                    }
939                }
940                None => None,
941            }
942        };
943        if let Some(as_ptr) = safe_as {
944            // SAFETY: We verified above that as_ptr is mapped and readable.
945            let as_ref = unsafe { &*as_ptr };
946            dump_nearby_vma_regions(as_ref, fault_vaddr);
947        } else {
948            crate::serial_println!("  (AddressSpace unreadable — skipping VMA dump)");
949        }
950    }
951
952    crate::serial_println!(
953        "\x1b[1;31m╔══════════════════════════════════════════════════════════════════╗"
954    );
955    crate::serial_println!("║                     END OF PAGE FAULT DUMP                      ║");
956    crate::serial_println!(
957        "╚══════════════════════════════════════════════════════════════════╝\x1b[0m"
958    );
959
960    panic!(
961        "PAGE FAULT: {} at {:#x}, RIP={:#x}, CR3={:#x}, err={:#x}",
962        decode_error_code(error_code),
963        fault_vaddr,
964        rip,
965        cr3_phys,
966        error_code.bits()
967    );
968}
969
970/// Performs the dump user pf context operation.
971fn dump_user_pf_context(as_ref: &crate::memory::AddressSpace, rip: u64, rsp: u64) {
972    use x86_64::VirtAddr;
973
974    let hhdm = crate::memory::hhdm_offset();
975
976    if let Some(phys) = as_ref.translate(VirtAddr::new(rip)) {
977        let off = (rip & 0xfff) as usize;
978        let mut bytes = [0u8; 8];
979        // SAFETY: We read at most 8 bytes from a mapped user instruction page via HHDM.
980        unsafe {
981            let src = (phys.as_u64() - (rip & 0xfff) + hhdm + off as u64) as *const u8;
982            core::ptr::copy_nonoverlapping(src, bytes.as_mut_ptr(), bytes.len());
983        }
984        crate::serial_println!(
985            "[pagefault] ctx: rsp={:#x} rip-bytes={:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x} {:02x}",
986            rsp,
987            bytes[0],
988            bytes[1],
989            bytes[2],
990            bytes[3],
991            bytes[4],
992            bytes[5],
993            bytes[6],
994            bytes[7],
995        );
996    } else {
997        crate::serial_println!("[pagefault] ctx: rsp={:#x} rip page unmapped", rsp);
998    }
999
1000    if let Some(phys) = as_ref.translate(VirtAddr::new(rsp)) {
1001        crate::serial_println!(
1002            "[pagefault] stack-top: rsp mapped (phys={:#x})",
1003            phys.as_u64()
1004        );
1005    } else {
1006        crate::serial_println!("[pagefault] stack-top: rsp unmapped");
1007    }
1008}
1009
1010/// Performs the general protection fault handler operation.
1011extern "x86-interrupt" fn general_protection_fault_handler(
1012    stack_frame: InterruptStackFrame,
1013    error_code: u64,
1014) {
1015    let is_user = (stack_frame.code_segment.0 & 3) == 3;
1016    if is_user {
1017        if let Some(tid) = crate::process::current_task_id() {
1018            crate::serial_force_println!(
1019                "\x1b[31;1m[GPF]\x1b[0m USER tid={} rip={:#x} err={:#x}",
1020                tid,
1021                stack_frame.instruction_pointer.as_u64(),
1022                error_code
1023            );
1024            crate::silo::handle_user_fault(
1025                tid,
1026                crate::silo::SiloFaultReason::GeneralProtection,
1027                stack_frame.instruction_pointer.as_u64(),
1028                error_code,
1029                stack_frame.instruction_pointer.as_u64(),
1030            );
1031            return;
1032        }
1033    }
1034    crate::serial_force_println!(
1035        "\x1b[31;1m[GPF]\x1b[0m KERNEL rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1036        stack_frame.instruction_pointer.as_u64(),
1037        error_code,
1038        stack_frame.code_segment.0,
1039        stack_frame.stack_pointer.as_u64()
1040    );
1041    panic!("General protection fault");
1042}
1043
1044/// Performs the stack segment fault handler operation.
1045extern "x86-interrupt" fn stack_segment_fault_handler(
1046    stack_frame: InterruptStackFrame,
1047    error_code: u64,
1048) {
1049    crate::serial_force_println!(
1050        "\x1b[31;1m[STACK_FAULT]\x1b[0m rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1051        stack_frame.instruction_pointer.as_u64(),
1052        error_code,
1053        stack_frame.code_segment.0,
1054        stack_frame.stack_pointer.as_u64()
1055    );
1056    panic!("Stack segment fault");
1057}
1058
1059/// Performs the double fault handler operation.
1060extern "x86-interrupt" fn double_fault_handler(
1061    stack_frame: InterruptStackFrame,
1062    error_code: u64,
1063) -> ! {
1064    crate::serial_force_println!(
1065        "\x1b[31;1m[DOUBLE_FAULT]\x1b[0m rip={:#x} err={:#x} cs={:#x} rsp={:#x}",
1066        stack_frame.instruction_pointer.as_u64(),
1067        error_code,
1068        stack_frame.code_segment.0,
1069        stack_frame.stack_pointer.as_u64()
1070    );
1071    panic!(
1072        "EXCEPTION: DOUBLE FAULT (error code: {:#x})\n{:#?}",
1073        error_code, stack_frame
1074    );
1075}
1076
1077// =============================================
1078// Hardware IRQ handlers
1079// =============================================
1080
1081/// Legacy external timer IRQ handler (PIC/IOAPIC IRQ0 path, vector 0x20).
1082///
1083/// When the LAPIC timer is active, we ignore this source to avoid double-ticking.
1084extern "x86-interrupt" fn legacy_timer_handler(_stack_frame: InterruptStackFrame) {
1085    if crate::arch::x86_64::timer::is_apic_timer_active() {
1086        // Ignore legacy timer source once LAPIC timer is running.
1087        if super::apic::is_initialized() {
1088            super::apic::eoi();
1089        } else {
1090            pic::end_of_interrupt(0);
1091        }
1092        return;
1093    }
1094
1095    // Increment tick counter
1096    crate::process::scheduler::timer_tick();
1097    // NOTE: avoid complex rendering/allocation work in IRQ context.
1098    // Status bar refresh is currently done from non-IRQ paths.
1099
1100    // Send EOI first so the timer can fire again on the new task
1101    if super::apic::is_initialized() {
1102        super::apic::eoi();
1103    } else {
1104        pic::end_of_interrupt(0);
1105    }
1106
1107    // Try to preempt the current task (no-op if scheduler lock is held
1108    // or no task is running yet)
1109    crate::process::scheduler::maybe_preempt();
1110}
1111
1112/// Local APIC timer handler (dedicated vector, e.g. 0xD2).
1113extern "x86-interrupt" fn lapic_timer_handler(_stack_frame: InterruptStackFrame) {
1114    crate::process::scheduler::timer_tick();
1115    super::apic::eoi();
1116    crate::process::scheduler::maybe_preempt();
1117}
1118
1119/// PS/2 Mouse IRQ12 handler.
1120extern "x86-interrupt" fn mouse_handler(_stack_frame: InterruptStackFrame) {
1121    crate::arch::x86_64::mouse::handle_irq();
1122    if super::apic::is_initialized() {
1123        super::apic::eoi();
1124    } else {
1125        pic::end_of_interrupt(12);
1126    }
1127}
1128
1129/// Performs the keyboard handler operation.
1130extern "x86-interrupt" fn keyboard_handler(_stack_frame: InterruptStackFrame) {
1131    let raw = unsafe { super::io::inb(0x60) };
1132    // Port 0x60 is consumed on read: feed the raw scancode directly.
1133    if let Some(ch) = super::keyboard_layout::handle_scancode_raw(raw) {
1134        crate::arch::x86_64::keyboard::add_to_buffer(ch);
1135    }
1136
1137    if super::apic::is_initialized() {
1138        super::apic::eoi();
1139    } else {
1140        pic::end_of_interrupt(1);
1141    }
1142}
1143
1144/// Spurious interrupt handler (APIC vector 0xFF).
1145/// Per Intel SDM: do NOT send EOI for spurious interrupts.
1146extern "x86-interrupt" fn spurious_handler(_stack_frame: InterruptStackFrame) {
1147    // Intentionally empty — no EOI per Intel SDM
1148}
1149
1150/// AHCI storage controller IRQ handler.
1151///
1152/// Reads `HBA_IS`, processes per-port completions, wakes waiting tasks, then
1153/// sends EOI.  Must not call any function that may block or allocate.
1154extern "x86-interrupt" fn ahci_handler(_stack_frame: InterruptStackFrame) {
1155    crate::hardware::storage::ahci::handle_interrupt();
1156
1157    if super::apic::is_initialized() {
1158        super::apic::eoi();
1159    } else {
1160        let irq = crate::hardware::storage::ahci::AHCI_IRQ_LINE
1161            .load(core::sync::atomic::Ordering::Relaxed);
1162        pic::end_of_interrupt(irq);
1163    }
1164}
1165
1166/// VirtIO Block device IRQ handler
1167///
1168/// Handles interrupts from the VirtIO block device.
1169/// The IRQ line is determined at runtime from PCI config.
1170extern "x86-interrupt" fn virtio_block_handler(_stack_frame: InterruptStackFrame) {
1171    // Handle the VirtIO block interrupt
1172    crate::hardware::storage::virtio_block::handle_interrupt();
1173
1174    // Send EOI
1175    if super::apic::is_initialized() {
1176        super::apic::eoi();
1177    } else {
1178        // Get the IRQ number from the device
1179        let irq = crate::hardware::storage::virtio_block::get_irq();
1180        pic::end_of_interrupt(irq);
1181    }
1182}
1183
1184/// Cross-CPU reschedule IPI handler (vector 0xF0).
1185///
1186/// Sent by another CPU (via `apic::send_resched_ipi`) to request that this
1187/// CPU preempts its current task immediately rather than waiting for the next
1188/// timer tick. This is used when a task running on this CPU is killed or
1189/// suspended by a different CPU.
1190///
1191/// EOI is sent ***before*** ` maybe_preempt()` so the APIC can accept further
1192/// IPIs before the potentially long context-switch path runs.
1193extern "x86-interrupt" fn resched_ipi_handler(_stack_frame: InterruptStackFrame) {
1194    super::apic::eoi();
1195    crate::process::scheduler::maybe_preempt();
1196}
1197
1198/// Cross-CPU TLB shootdown IPI handler (vector 0xF0).
1199extern "x86-interrupt" fn tlb_shootdown_handler(_stack_frame: InterruptStackFrame) {
1200    // Note: EOI is sent by the architecture-independent handler.
1201    super::tlb::tlb_shootdown_ipi_handler();
1202}