strat9_kernel/arch/x86_64/
tss.rs1use core::{
8 mem::MaybeUninit,
9 panic::Location,
10 sync::atomic::{AtomicBool, Ordering},
11};
12use x86_64::{structures::tss::TaskStateSegment, VirtAddr};
13
14#[repr(C, packed)]
15struct DescriptorTableRegister {
16 limit: u16,
17 base: u64,
18}
19
20#[derive(Clone, Copy, Debug)]
21pub struct LoadedTssInfo {
22 pub tr_selector: u16,
23 pub tss_base: u64,
24 pub rsp0: u64,
25}
26
27pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;
29
30const IST_STACK_SIZE: usize = 4096 * 5;
32
33static mut IST_STACKS: [[u8; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS] =
35 [[0; IST_STACK_SIZE]; crate::arch::x86_64::percpu::MAX_CPUS];
36
37static mut TSS: [MaybeUninit<TaskStateSegment>; crate::arch::x86_64::percpu::MAX_CPUS] =
39 [const { MaybeUninit::uninit() }; crate::arch::x86_64::percpu::MAX_CPUS];
40
41static TSS_INIT: [AtomicBool; crate::arch::x86_64::percpu::MAX_CPUS] =
42 [const { AtomicBool::new(false) }; crate::arch::x86_64::percpu::MAX_CPUS];
43
44pub fn init() {
48 init_cpu(0);
49}
50
51pub fn init_cpu(cpu_index: usize) {
53 assert!(
55 cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
56 "TSS init_cpu: cpu_index {} >= MAX_CPUS {}",
57 cpu_index,
58 crate::arch::x86_64::percpu::MAX_CPUS,
59 );
60 unsafe {
62 let stack_ptr = &raw const IST_STACKS[cpu_index] as *const u8;
63 let stack_end = VirtAddr::from_ptr(stack_ptr) + IST_STACK_SIZE as u64;
64 let mut tss = TaskStateSegment::new();
65 tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = stack_end;
66
67 TSS[cpu_index].write(tss);
68 TSS_INIT[cpu_index].store(true, Ordering::Release);
69
70 let ist_addr = VirtAddr::from_ptr(stack_ptr);
71 log::info!(
72 "TSS[CPU{}] initialized: IST[{}] stack @ {:#x}..{:#x} ({} KB)",
73 cpu_index,
74 DOUBLE_FAULT_IST_INDEX,
75 ist_addr.as_u64(),
76 ist_addr.as_u64() + IST_STACK_SIZE as u64,
77 IST_STACK_SIZE / 1024,
78 );
79 }
80}
81
82pub fn tss_for(cpu_index: usize) -> &'static TaskStateSegment {
84 assert!(
85 cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
86 "tss_for: cpu_index {} >= MAX_CPUS",
87 cpu_index,
88 );
89 if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
90 panic!("TSS for CPU{} not initialized", cpu_index);
91 }
92 unsafe { &*TSS[cpu_index].as_ptr() }
94}
95
96pub fn kernel_stack_for(cpu_index: usize) -> Option<VirtAddr> {
98 if cpu_index >= crate::arch::x86_64::percpu::MAX_CPUS {
99 return None;
100 }
101 if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
102 return None;
103 }
104 unsafe {
106 let tss = &*TSS[cpu_index].as_ptr();
107 Some(tss.privilege_stack_table[0])
108 }
109}
110
111pub fn loaded_tss_info() -> Option<LoadedTssInfo> {
113 let mut gdtr = DescriptorTableRegister { limit: 0, base: 0 };
114 let tr_selector: u16;
115 unsafe {
117 core::arch::asm!(
118 "sgdt [{}]",
119 in(reg) &mut gdtr,
120 options(nostack, preserves_flags),
121 );
122 core::arch::asm!(
123 "str {0:x}",
124 out(reg) tr_selector,
125 options(nostack, nomem, preserves_flags),
126 );
127 }
128
129 if tr_selector == 0 {
130 return None;
131 }
132
133 let entry_offset = (tr_selector & !0x7) as usize;
134 if entry_offset + 16 > gdtr.limit as usize + 1 {
135 return None;
136 }
137
138 let (low, high) = unsafe {
140 let entry_ptr = (gdtr.base + entry_offset as u64) as *const u64;
141 (
142 core::ptr::read_unaligned(entry_ptr),
143 core::ptr::read_unaligned(entry_ptr.add(1)),
144 )
145 };
146
147 let base_low =
148 ((low >> 16) & 0xFFFF) | (((low >> 32) & 0xFF) << 16) | (((low >> 56) & 0xFF) << 24);
149 let tss_base = base_low | (high << 32);
150 if tss_base == 0 {
151 return None;
152 }
153
154 let rsp0 = unsafe {
156 let tss = &*(tss_base as *const TaskStateSegment);
157 tss.privilege_stack_table[0].as_u64()
158 };
159
160 Some(LoadedTssInfo {
161 tr_selector,
162 tss_base,
163 rsp0,
164 })
165}
166
167#[track_caller]
172pub fn set_kernel_stack(stack_top: VirtAddr) {
173 let cpu_index = crate::arch::x86_64::percpu::current_cpu_index();
174 set_kernel_stack_for(cpu_index, stack_top);
175}
176
177#[track_caller]
179pub fn set_kernel_stack_for(cpu_index: usize, stack_top: VirtAddr) {
180 if cpu_index >= crate::arch::x86_64::percpu::MAX_CPUS {
181 log::warn!("set_kernel_stack_for: cpu_index {} out of range", cpu_index);
182 return;
183 }
184 if !TSS_INIT[cpu_index].load(Ordering::Acquire) {
187 return;
188 }
189 let caller = Location::caller();
190 unsafe {
191 let tss = &raw mut *TSS[cpu_index].as_mut_ptr();
192 let old_stack_top = (*tss).privilege_stack_table[0];
193 (*tss).privilege_stack_table[0] = stack_top;
194 crate::e9_println!(
195 "[tss-set] cpu={} old={:#x} new={:#x} caller={}:{}",
196 cpu_index,
197 old_stack_top.as_u64(),
198 stack_top.as_u64(),
199 caller.file(),
200 caller.line()
201 );
202 }
203}