strat9_kernel/arch/x86_64/
apic.rs1use crate::memory;
8use core::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
9
10static APIC_INITIALIZED: AtomicBool = AtomicBool::new(false);
12static APIC_X2_MODE: AtomicBool = AtomicBool::new(false);
14
15static APIC_BASE_VIRT: AtomicU64 = AtomicU64::new(0);
17
18static APIC_BASE_PHYS: AtomicU64 = AtomicU64::new(0);
20static ICR_HIGH_SHADOW: [AtomicU32; crate::arch::x86_64::percpu::MAX_CPUS] =
22 [const { AtomicU32::new(0) }; crate::arch::x86_64::percpu::MAX_CPUS];
23
24pub fn lapic_phys() -> u64 {
26 APIC_BASE_PHYS.load(Ordering::Relaxed)
27}
28
29const REG_ID: u32 = 0x020;
33#[allow(dead_code)]
35const REG_VERSION: u32 = 0x030;
36const REG_TPR: u32 = 0x080;
38const REG_EOI: u32 = 0x0B0;
40const REG_SPURIOUS: u32 = 0x0F0;
42pub const REG_ESR: u32 = 0x280;
44pub const REG_ICR_LOW: u32 = 0x300;
46pub const REG_ICR_HIGH: u32 = 0x310;
48pub const REG_LVT_TIMER: u32 = 0x320;
50pub const REG_TIMER_INIT: u32 = 0x380;
52pub const REG_TIMER_CURRENT: u32 = 0x390;
54pub const REG_TIMER_DIVIDE: u32 = 0x3E0;
56
57pub const LVT_TIMER_PERIODIC: u32 = 1 << 17;
61#[allow(dead_code)]
63pub const LVT_TIMER_MASKED: u32 = 1 << 16;
64pub const LVT_TIMER_VECTOR: u8 = 0xD2;
66
67const IA32_APIC_BASE_MSR: u32 = 0x1B;
69const APIC_BASE_ADDR_MASK: u64 = 0x000F_FFFF_FFFF_F000;
71const APIC_BASE_ENABLE: u64 = 1 << 11;
73const APIC_BASE_EXTD: u64 = 1 << 10;
75
76const SPURIOUS_VECTOR: u8 = 0xFF;
78
79pub const IPI_RESCHED_VECTOR: u8 = 0xE0;
81
82pub const IPI_TLB_SHOOTDOWN_VECTOR: u8 = 0xF0;
84
85pub fn is_present() -> bool {
87 let (_eax, _ebx, _ecx, edx) = super::cpuid(1, 0);
88 edx & (1 << 9) != 0
90}
91
92pub fn is_x2apic_supported() -> bool {
94 let (_eax, _ebx, ecx, _edx) = super::cpuid(1, 0);
95 ecx & (1 << 21) != 0
97}
98
99pub fn lapic_id() -> u32 {
104 if APIC_INITIALIZED.load(Ordering::Relaxed) {
105 let raw = unsafe { read_reg(REG_ID) };
107 if APIC_X2_MODE.load(Ordering::Relaxed) {
108 return raw;
109 }
110 return raw >> 24;
111 }
112
113 let (_eax, ebx, _ecx, _edx) = super::cpuid(1, 0);
114 (ebx >> 24) & 0xFF
115}
116
117pub fn is_initialized() -> bool {
119 APIC_INITIALIZED.load(Ordering::Relaxed)
120}
121
122pub fn is_x2apic_enabled() -> bool {
124 APIC_X2_MODE.load(Ordering::Relaxed)
125}
126
127#[inline]
128fn x2apic_msr_for_reg(offset: u32) -> Option<u32> {
129 match offset {
130 REG_ID => Some(0x802),
131 REG_VERSION => Some(0x803),
132 REG_TPR => Some(0x808),
133 REG_EOI => Some(0x80B),
134 REG_SPURIOUS => Some(0x80F),
135 REG_ESR => Some(0x828),
136 REG_LVT_TIMER => Some(0x832),
137 REG_TIMER_INIT => Some(0x838),
138 REG_TIMER_CURRENT => Some(0x839),
139 REG_TIMER_DIVIDE => Some(0x83E),
140 _ => None,
141 }
142}
143
144#[inline]
145fn current_cpu_slot() -> usize {
146 let idx = crate::arch::x86_64::percpu::current_cpu_index();
147 if idx < crate::arch::x86_64::percpu::MAX_CPUS {
148 idx
149 } else {
150 0
151 }
152}
153
154pub unsafe fn read_reg(offset: u32) -> u32 {
159 if APIC_X2_MODE.load(Ordering::Relaxed) {
160 return match offset {
161 REG_ICR_LOW => super::rdmsr(0x830) as u32,
162 REG_ICR_HIGH => ((super::rdmsr(0x830) >> 32) as u32) << 24,
163 _ => {
164 let Some(msr) = x2apic_msr_for_reg(offset) else {
165 return 0;
166 };
167 super::rdmsr(msr) as u32
168 }
169 };
170 }
171
172 let addr = APIC_BASE_VIRT.load(Ordering::Relaxed) + offset as u64;
173 unsafe { core::ptr::read_volatile(addr as *const u32) }
175}
176
177pub unsafe fn write_reg(offset: u32, value: u32) {
182 if APIC_X2_MODE.load(Ordering::Relaxed) {
183 match offset {
184 REG_ICR_HIGH => {
185 let cpu = current_cpu_slot();
186 let dest = if value & 0x00FF_FFFF == 0 {
189 value >> 24
190 } else {
191 value
192 };
193 ICR_HIGH_SHADOW[cpu].store(dest, Ordering::Relaxed);
194 }
195 REG_ICR_LOW => {
196 let cpu = current_cpu_slot();
197 let dest = ICR_HIGH_SHADOW[cpu].load(Ordering::Relaxed) as u64;
198 super::wrmsr(0x830, (dest << 32) | value as u64);
199 }
200 _ => {
201 if let Some(msr) = x2apic_msr_for_reg(offset) {
202 super::wrmsr(msr, value as u64);
203 }
204 }
205 }
206 return;
207 }
208
209 let addr = APIC_BASE_VIRT.load(Ordering::Relaxed) + offset as u64;
210 unsafe { core::ptr::write_volatile(addr as *mut u32, value) }
212}
213
214pub fn init(madt_lapic_addr: u32) {
219 let apic_base_msr = super::rdmsr(IA32_APIC_BASE_MSR);
221 let apic_phys = apic_base_msr & APIC_BASE_ADDR_MASK;
222
223 if apic_phys != madt_lapic_addr as u64 {
224 log::warn!(
225 "LAPIC: MSR base 0x{:X} differs from MADT 0x{:X}, using MSR",
226 apic_phys,
227 madt_lapic_addr
228 );
229 }
230
231 let use_x2apic = if is_x2apic_supported() {
232 if apic_base_msr & APIC_BASE_ENABLE == 0 {
233 super::wrmsr(IA32_APIC_BASE_MSR, apic_base_msr | APIC_BASE_ENABLE);
234 }
235 super::wrmsr(
236 IA32_APIC_BASE_MSR,
237 apic_base_msr | APIC_BASE_ENABLE | APIC_BASE_EXTD,
238 );
239 let verify = super::rdmsr(IA32_APIC_BASE_MSR);
240 verify & (APIC_BASE_ENABLE | APIC_BASE_EXTD) == (APIC_BASE_ENABLE | APIC_BASE_EXTD)
241 } else {
242 if apic_base_msr & APIC_BASE_ENABLE == 0 {
243 super::wrmsr(IA32_APIC_BASE_MSR, apic_base_msr | APIC_BASE_ENABLE);
244 }
245 false
246 };
247 APIC_X2_MODE.store(use_x2apic, Ordering::Release);
248
249 let apic_virt = memory::phys_to_virt(apic_phys);
251 APIC_BASE_VIRT.store(apic_virt, Ordering::Relaxed);
252 APIC_BASE_PHYS.store(apic_phys, Ordering::Relaxed);
253
254 crate::serial_println!(
256 "[apic] init: hhdm={:#x} lapic_phys={:#x} lapic_virt={:#x}",
257 memory::hhdm_offset(),
258 apic_phys,
259 apic_virt
260 );
261 if apic_virt == apic_phys {
262 crate::serial_println!(
263 "[apic] WARN: lapic_virt == lapic_phys (HHDM offset is 0!) \
264 The LAPIC MMIO is identity-mapped at a low address. \
265 Kernel MMIO entries will be propagated to user page tables."
266 );
267 }
268
269 unsafe {
271 write_reg(REG_ESR, 0);
273 write_reg(REG_ESR, 0);
274
275 write_reg(REG_TPR, 0);
277
278 write_reg(REG_SPURIOUS, 0x100 | SPURIOUS_VECTOR as u32);
280 }
281
282 APIC_INITIALIZED.store(true, Ordering::Relaxed);
283
284 let id = lapic_id();
285 log::info!(
286 "LAPIC: initialized at phys=0x{:X} virt=0x{:X} (ID={}, mode={})",
287 apic_phys,
288 apic_virt,
289 id,
290 if use_x2apic { "x2APIC" } else { "xAPIC" }
291 );
292}
293
294pub fn init_ap() {
299 if !APIC_INITIALIZED.load(Ordering::Relaxed) {
300 log::warn!("LAPIC: init_ap called before init");
301 return;
302 }
303
304 if APIC_X2_MODE.load(Ordering::Acquire) {
305 let base = super::rdmsr(IA32_APIC_BASE_MSR);
306 if base & (APIC_BASE_ENABLE | APIC_BASE_EXTD) != (APIC_BASE_ENABLE | APIC_BASE_EXTD) {
307 if base & APIC_BASE_ENABLE == 0 {
308 super::wrmsr(IA32_APIC_BASE_MSR, base | APIC_BASE_ENABLE);
309 }
310 super::wrmsr(
311 IA32_APIC_BASE_MSR,
312 base | APIC_BASE_ENABLE | APIC_BASE_EXTD,
313 );
314 }
315 }
316
317 unsafe {
319 write_reg(REG_ESR, 0);
320 write_reg(REG_ESR, 0);
321 write_reg(REG_TPR, 0);
322 write_reg(REG_SPURIOUS, 0x100 | SPURIOUS_VECTOR as u32);
323 }
324}
325
326#[inline]
328pub fn eoi() {
329 unsafe {
331 write_reg(REG_EOI, 0);
332 }
333}
334
335pub fn send_ipi_raw(target_apic_id: u32, icr_low: u32) {
339 unsafe {
340 write_reg(REG_ESR, 0);
341 if APIC_X2_MODE.load(Ordering::Relaxed) {
342 super::wrmsr(0x830, ((target_apic_id as u64) << 32) | icr_low as u64);
343 } else {
344 write_reg(REG_ICR_HIGH, target_apic_id << 24);
345 write_reg(REG_ICR_LOW, icr_low);
346 }
347 }
348}
349
350pub fn send_resched_ipi(target_apic_id: u32) {
359 send_ipi_raw(target_apic_id, IPI_RESCHED_VECTOR as u32 | (1 << 14));
360}