Skip to main content

strat9_kernel/arch/x86_64/
tss.rs

1//! Task State Segment (TSS) for Strat9-OS
2//!
3//! The TSS is required for:
4//! - Interrupt Stack Table (IST) entries for safe exception handling
5//! - Ring 3 -> Ring 0 stack switching (privilege_stack_table[0] = rsp0)
6
7use core::{
8    mem::MaybeUninit,
9    sync::atomic::{AtomicBool, Ordering},
10};
11use x86_64::{structures::tss::TaskStateSegment, VirtAddr};
12
13/// IST index used for the double fault handler
14pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;
15
16/// IST stack size (20 KB)
17const IST_STACK_SIZE: usize = 4096 * 5;
18
19/// Static IST stacks for double fault handler (per-CPU)
20static mut IST_STACKS: [[u8; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS] =
21    [[0; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS];
22
23/// Per-CPU TSS storage
24static mut TSS: [MaybeUninit<TaskStateSegment>; crate::arch::x86_64::percpu::MAX_CPUS] =
25    [const { MaybeUninit::uninit() }; crate::arch::x86_64::percpu::MAX_CPUS];
26
27static TSS_INIT: [AtomicBool; crate::arch::x86_64::percpu::MAX_CPUS] =
28    [const { AtomicBool::new(false) }; crate::arch::x86_64::percpu::MAX_CPUS];
29
30/// Initialize the TSS with IST entries
31///
32/// Must be called before `gdt::init()` since the GDT references the TSS.
33pub fn init() {
34    init_cpu(0);
35}
36
37/// Initialize the TSS for a given CPU index.
38pub fn init_cpu(cpu_index: usize) {
39    // Bounds check: prevent OOB access into static arrays before any unsafe.
40    assert!(
41        cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
42        "TSS init_cpu: cpu_index {} >= MAX_CPUS {}",
43        cpu_index,
44        crate::arch::x86_64::percpu::MAX_CPUS,
45    );
46    // SAFETY: Called during init (BSP) or AP bring-up before interrupts are enabled on that CPU.
47    unsafe {
48        let stack_ptr = &raw const IST_STACKS[cpu_index] as *const u8;
49        let stack_end = VirtAddr::from_ptr(stack_ptr) + IST_STACK_SIZE as u64;
50        let mut tss = TaskStateSegment::new();
51        tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = stack_end;
52
53        TSS[cpu_index].write(tss);
54        TSS_INIT[cpu_index].store(true, Ordering::Release);
55
56        let ist_addr = VirtAddr::from_ptr(stack_ptr);
57        log::info!(
58            "TSS[CPU{}] initialized: IST[{}] stack @ {:#x}..{:#x} ({} KB)",
59            cpu_index,
60            DOUBLE_FAULT_IST_INDEX,
61            ist_addr.as_u64(),
62            ist_addr.as_u64() + IST_STACK_SIZE as u64,
63            IST_STACK_SIZE / 1024,
64        );
65    }
66}
67
68/// Get a reference to the TSS for a given CPU index (for GDT descriptor creation).
69pub fn tss_for(cpu_index: usize) -> &'static TaskStateSegment {
70    assert!(
71        cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
72        "tss_for: cpu_index {} >= MAX_CPUS",
73        cpu_index,
74    );
75    if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
76        panic!("TSS for CPU{} not initialized", cpu_index);
77    }
78    // SAFETY: TSS was initialized in init_cpu and lives for 'static.
79    unsafe { &*TSS[cpu_index].as_ptr() }
80}
81
82/// Update TSS.rsp0 — the kernel stack pointer used when transitioning
83/// from Ring 3 to Ring 0 on interrupt/syscall.
84///
85/// Called on every context switch to point to the new task's kernel stack top.
86pub fn set_kernel_stack(stack_top: VirtAddr) {
87    let cpu_index = crate::arch::x86_64::percpu::current_cpu_index();
88    set_kernel_stack_for(cpu_index, stack_top);
89}
90
91/// Update TSS.rsp0 for a specific CPU index.
92pub fn set_kernel_stack_for(cpu_index: usize, stack_top: VirtAddr) {
93    if cpu_index >= crate::arch::x86_64::percpu::MAX_CPUS {
94        log::warn!("set_kernel_stack_for: cpu_index {} out of range", cpu_index);
95        return;
96    }
97    // SAFETY: privilege_stack_table[0] is a VirtAddr (u64), writes are atomic on x86_64.
98    // Called with interrupts disabled or from the scheduler with lock held.
99    if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
100        return;
101    }
102    unsafe {
103        let tss = &raw mut *TSS[cpu_index].as_mut_ptr();
104        (*tss).privilege_stack_table[0] = stack_top;
105    }
106}