strat9_kernel/arch/x86_64/
gdt.rs1use core::{
22 mem::MaybeUninit,
23 sync::atomic::{AtomicBool, Ordering},
24};
25use x86_64::{
26 instructions::{
27 segmentation::{Segment, CS, DS, SS},
28 tables::load_tss,
29 },
30 structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector},
31};
32
33static mut GDT: [MaybeUninit<GlobalDescriptorTable>; crate::arch::x86_64::percpu::MAX_CPUS] =
35 [const { MaybeUninit::uninit() }; crate::arch::x86_64::percpu::MAX_CPUS];
36
37static mut SELECTORS: [MaybeUninit<Selectors>; crate::arch::x86_64::percpu::MAX_CPUS] =
39 [const { MaybeUninit::uninit() }; crate::arch::x86_64::percpu::MAX_CPUS];
40static SELECTORS_INIT: [AtomicBool; crate::arch::x86_64::percpu::MAX_CPUS] =
41 [const { AtomicBool::new(false) }; crate::arch::x86_64::percpu::MAX_CPUS];
42
43#[derive(Copy, Clone)]
44struct Selectors {
45 kernel_code: SegmentSelector,
46 kernel_data: SegmentSelector,
47 #[allow(dead_code)]
48 user_code32: SegmentSelector,
49 user_data: SegmentSelector,
50 user_code64: SegmentSelector,
51 #[allow(dead_code)]
52 tss: SegmentSelector,
53}
54
55pub fn init() {
63 init_cpu(0);
64}
65
66pub fn init_cpu(cpu_index: usize) {
68 assert!(
70 cpu_index < crate::arch::x86_64::percpu::MAX_CPUS,
71 "GDT init_cpu: cpu_index {} >= MAX_CPUS {}",
72 cpu_index,
73 crate::arch::x86_64::percpu::MAX_CPUS,
74 );
75 unsafe {
77 let gdt = &mut *GDT[cpu_index].as_mut_ptr();
78 *gdt = GlobalDescriptorTable::new();
79
80 let kernel_code = gdt.append(Descriptor::kernel_code_segment());
82 let kernel_data = gdt.append(Descriptor::kernel_data_segment());
84
85 let user_code32_bits: u64 = (1 << 47) | (3 << 45) | (1 << 44) | (1 << 43) | (1 << 41) | (1 << 54); let user_code32 = gdt.append(Descriptor::UserSegment(user_code32_bits));
96
97 let user_data = gdt.append(Descriptor::user_data_segment());
99 let user_code64 = gdt.append(Descriptor::user_code_segment());
101
102 let tss_sel = gdt.append(Descriptor::tss_segment(super::tss::tss_for(cpu_index)));
104
105 gdt.load_unsafe();
106
107 CS::set_reg(kernel_code);
109 DS::set_reg(kernel_data);
110 SS::set_reg(kernel_data);
111
112 load_tss(tss_sel);
114
115 SELECTORS[cpu_index].write(Selectors {
116 kernel_code,
117 kernel_data,
118 user_code32,
119 user_data,
120 user_code64,
121 tss: tss_sel,
122 });
123 SELECTORS_INIT[cpu_index].store(true, Ordering::Release);
124
125 log::info!(
126 "GDT[CPU{}] loaded: CS={:#x} DS/SS={:#x} user32={:#x} user_data={:#x} user64={:#x} TSS={:#x}",
127 cpu_index,
128 kernel_code.0,
129 kernel_data.0,
130 user_code32.0,
131 user_data.0,
132 user_code64.0,
133 tss_sel.0,
134 );
135 }
136}
137
138pub fn kernel_code_selector() -> SegmentSelector {
140 selectors_for(current_cpu_index()).kernel_code
141}
142
143pub fn kernel_data_selector() -> SegmentSelector {
145 selectors_for(current_cpu_index()).kernel_data
146}
147
148pub fn user_code_selector() -> SegmentSelector {
150 let sel = selectors_for(current_cpu_index()).user_code64;
151 SegmentSelector(sel.0 | 3) }
153
154pub fn user_data_selector() -> SegmentSelector {
156 let sel = selectors_for(current_cpu_index()).user_data;
157 SegmentSelector(sel.0 | 3) }
159
160pub fn star_msr_value() -> u64 {
165 let sels = selectors_for(current_cpu_index());
166 let kernel_cs = sels.kernel_code.0 as u64;
167 let user_base = sels.user_code32.0 as u64; (kernel_cs << 32) | (user_base << 48)
169}
170
171fn selectors_for(cpu_index: usize) -> Selectors {
173 if !SELECTORS_INIT[cpu_index].load(Ordering::Acquire) {
174 panic!("GDT selectors for CPU{} not initialized", cpu_index);
175 }
176 unsafe { *SELECTORS[cpu_index].as_ptr() }
178}
179
180fn current_cpu_index() -> usize {
182 crate::arch::x86_64::percpu::current_cpu_index()
183}