Skip to main content

strat9_kernel/arch/x86_64/
tss.rs

1//! Task State Segment (TSS) for Strat9-OS
2//!
3//! The TSS is required for:
4//! - Interrupt Stack Table (IST) entries for safe exception handling
5//! - Ring 3 -> Ring 0 stack switching (privilege_stack_table[0] = rsp0)
6
7use core::{
8    mem::MaybeUninit,
9    panic::Location,
10    sync::atomic::{AtomicBool, Ordering},
11};
12use x86_64::{structures::tss::TaskStateSegment, VirtAddr};
13
14#[repr(C, packed)]
15struct DescriptorTableRegister {
16    limit: u16,
17    base: u64,
18}
19
20#[derive(Clone, Copy, Debug)]
21pub struct LoadedTssInfo {
22    pub tr_selector: u16,
23    pub tss_base: u64,
24    pub rsp0: u64,
25}
26
27/// IST index used for the double fault handler
28pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;
29
30/// IST stack size (20 KB)
31const IST_STACK_SIZE: usize = 4096 * 5;
32
33/// Static IST stacks for double fault handler (per-CPU)
34static mut IST_STACKS: [[u8; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS] =
35    [[0; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS];
36
37/// Per-CPU TSS storage
38static mut TSS: [MaybeUninit<TaskStateSegment>; crate::arch::x86_64::percpu::MAX_CPUS] =
39    [const { MaybeUninit::uninit() }; crate::arch::x86_64::percpu::MAX_CPUS];
40
41static TSS_INIT: [AtomicBool; crate::arch::x86_64::percpu::MAX_CPUS] =
42    [const { AtomicBool::new(false) }; crate::arch::x86_64::percpu::MAX_CPUS];
43
44/// Initialize the TSS with IST entries
45///
46/// Must be called before `gdt::init()` since the GDT references the TSS.
47pub fn init() {
48    init_cpu(0);
49}
50
51/// Initialize the TSS for a given CPU index.
52pub fn init_cpu(cpu_index: usize) {
53    // Bounds check: prevent OOB access into static arrays before any unsafe.
54    assert!(
55        cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
56        "TSS init_cpu: cpu_index {} >= MAX_CPUS {}",
57        cpu_index,
58        crate::arch::x86_64::percpu::MAX_CPUS,
59    );
60    // SAFETY: Called during init (BSP) or AP bring-up before interrupts are enabled on that CPU.
61    unsafe {
62        let stack_ptr = &raw const IST_STACKS[cpu_index] as *const u8;
63        let stack_end = VirtAddr::from_ptr(stack_ptr) + IST_STACK_SIZE as u64;
64        let mut tss = TaskStateSegment::new();
65        tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = stack_end;
66
67        TSS[cpu_index].write(tss);
68        TSS_INIT[cpu_index].store(true, Ordering::Release);
69
70        let ist_addr = VirtAddr::from_ptr(stack_ptr);
71        log::info!(
72            "TSS[CPU{}] initialized: IST[{}] stack @ {:#x}..{:#x} ({} KB)",
73            cpu_index,
74            DOUBLE_FAULT_IST_INDEX,
75            ist_addr.as_u64(),
76            ist_addr.as_u64() + IST_STACK_SIZE as u64,
77            IST_STACK_SIZE / 1024,
78        );
79    }
80}
81
82/// Get a reference to the TSS for a given CPU index (for GDT descriptor creation).
83pub fn tss_for(cpu_index: usize) -> &'static TaskStateSegment {
84    assert!(
85        cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
86        "tss_for: cpu_index {} >= MAX_CPUS",
87        cpu_index,
88    );
89    if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
90        panic!("TSS for CPU{} not initialized", cpu_index);
91    }
92    // SAFETY: TSS was initialized in init_cpu and lives for 'static.
93    unsafe { &*TSS[cpu_index].as_ptr() }
94}
95
96/// Return TSS.rsp0 for a specific CPU, if its TSS is initialized.
97pub fn kernel_stack_for(cpu_index: usize) -> Option<VirtAddr> {
98    if cpu_index >= crate::arch::x86_64::percpu::MAX_CPUS {
99        return None;
100    }
101    if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
102        return None;
103    }
104    // SAFETY: The TSS for this CPU is initialized and lives for the whole kernel lifetime.
105    unsafe {
106        let tss = &*TSS[cpu_index].as_ptr();
107        Some(tss.privilege_stack_table[0])
108    }
109}
110
111/// Read the TSS currently loaded in TR by decoding the live GDT entry.
112pub fn loaded_tss_info() -> Option<LoadedTssInfo> {
113    let mut gdtr = DescriptorTableRegister { limit: 0, base: 0 };
114    let tr_selector: u16;
115    // SAFETY: `sgdt`/`str` are privileged register reads with no side effect.
116    unsafe {
117        core::arch::asm!(
118            "sgdt [{}]",
119            in(reg) &mut gdtr,
120            options(nostack, preserves_flags),
121        );
122        core::arch::asm!(
123            "str {0:x}",
124            out(reg) tr_selector,
125            options(nostack, nomem, preserves_flags),
126        );
127    }
128
129    if tr_selector == 0 {
130        return None;
131    }
132
133    let entry_offset = (tr_selector & !0x7) as usize;
134    if entry_offset + 16 > gdtr.limit as usize + 1 {
135        return None;
136    }
137
138    // SAFETY: The GDTR base/limit were read from the CPU and bounds-checked above.
139    let (low, high) = unsafe {
140        let entry_ptr = (gdtr.base + entry_offset as u64) as *const u64;
141        (
142            core::ptr::read_unaligned(entry_ptr),
143            core::ptr::read_unaligned(entry_ptr.add(1)),
144        )
145    };
146
147    let base_low =
148        ((low >> 16) & 0xFFFF) | (((low >> 32) & 0xFF) << 16) | (((low >> 56) & 0xFF) << 24);
149    let tss_base = base_low | (high << 32);
150    if tss_base == 0 {
151        return None;
152    }
153
154    // SAFETY: The live TSS base comes from the CPU's TSS descriptor.
155    let rsp0 = unsafe {
156        let tss = &*(tss_base as *const TaskStateSegment);
157        tss.privilege_stack_table[0].as_u64()
158    };
159
160    Some(LoadedTssInfo {
161        tr_selector,
162        tss_base,
163        rsp0,
164    })
165}
166
167/// Update TSS.rsp0 : the kernel stack pointer used when transitioning
168/// from Ring 3 to Ring 0 on interrupt/syscall.
169///
170/// Called on every context switch to point to the new task's kernel stack top.
171#[track_caller]
172pub fn set_kernel_stack(stack_top: VirtAddr) {
173    let cpu_index = crate::arch::x86_64::percpu::current_cpu_index();
174    set_kernel_stack_for(cpu_index, stack_top);
175}
176
177/// Update TSS.rsp0 for a specific CPU index.
178#[track_caller]
179pub fn set_kernel_stack_for(cpu_index: usize, stack_top: VirtAddr) {
180    if cpu_index >= crate::arch::x86_64::percpu::MAX_CPUS {
181        log::warn!("set_kernel_stack_for: cpu_index {} out of range", cpu_index);
182        return;
183    }
184    // SAFETY: privilege_stack_table[0] is a VirtAddr (u64), writes are atomic on x86_64.
185    // Called with interrupts disabled or from the scheduler with lock held.
186    if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
187        return;
188    }
189    let caller = Location::caller();
190    unsafe {
191        let tss = &raw mut *TSS[cpu_index].as_mut_ptr();
192        let old_stack_top = (*tss).privilege_stack_table[0];
193        (*tss).privilege_stack_table[0] = stack_top;
194        crate::e9_println!(
195            "[tss-set] cpu={} old={:#x} new={:#x} caller={}:{}",
196            cpu_index,
197            old_stack_top.as_u64(),
198            stack_top.as_u64(),
199            caller.file(),
200            caller.line()
201        );
202    }
203}