1#![allow(dead_code)]
13
14use crate::{
15 hardware::pci_client::{self as pci, Bar, ProbeCriteria},
16 memory::{allocate_dma_frame, paging, phys_to_virt},
17};
18use alloc::{sync::Arc, vec::Vec};
19use core::sync::atomic::{AtomicBool, AtomicU8, Ordering};
20use spin::Mutex;
21
22const XHCI_MMIO_SIZE: usize = 0x10000;
23const XHCI_PORT_REG_BASE: usize = 0x400;
24const XHCI_PORT_REG_STRIDE: usize = 0x10;
25
26const USBCMD_RUN_STOP: u32 = 1 << 0;
27const USBCMD_HCRST: u32 = 1 << 1;
28const USBCMD_INTE: u32 = 1 << 2;
29
30const USBSTS_HCH: u32 = 1 << 0;
31const USBSTS_CNR: u32 = 1 << 11;
32
33const XHCI_SAFE_PROBE_ONLY: bool = true;
35
36const PORTSC_CCS: u32 = 1 << 0;
37const PORTSC_PED: u32 = 1 << 1;
38const PORTSC_PR: u32 = 1 << 4;
39const PORTSC_PP: u32 = 1 << 9;
40const PORTSC_SPEED_SHIFT: u32 = 10;
41const PORTSC_W1C_MASK: u32 = 0xFE0000;
42
43const TRB_TYPE_NORMAL: u32 = 1;
44const TRB_TYPE_SETUP_STAGE: u32 = 2;
45const TRB_TYPE_DATA_STAGE: u32 = 3;
46const TRB_TYPE_STATUS_STAGE: u32 = 4;
47const TRB_TYPE_ENABLE_SLOT: u32 = 9;
48const TRB_TYPE_ADDRESS_DEVICE: u32 = 11;
49const TRB_TYPE_CONFIGURE_ENDPOINT: u32 = 12;
50const TRB_TYPE_TRANSFER_EVENT: u32 = 32;
51
52const TRB_CYCLE: u32 = 1 << 0;
53const TRB_IOC: u32 = 1 << 5;
54const TRB_DIR_IN: u32 = 1 << 16;
55
56const EP_TYPE_CONTROL: u32 = 4;
57const EP_TYPE_INTR_IN: u32 = 7;
58
59#[repr(C)]
60struct CapRegisters {
61 caplength: u8,
62 _reserved: u8,
63 _hciversion: u16,
64 hcsparams1: u32,
65 _hcsparams2: u32,
66 _hcsparams3: u32,
67 _hccparams1: u32,
68 dboff: u32,
69 rtsoff: u32,
70 _hccparams2: u32,
71}
72
73#[repr(C)]
74struct OpRegisters {
75 usbcmd: u32,
76 usbsts: u32,
77 _pagesize: u32,
78 _reserved0: [u32; 2],
79 _dnctrl: u32,
80 crcr: u64,
81 _reserved1: [u32; 4],
82 dcbaap: u64,
83 config: u32,
84}
85
86#[repr(C)]
87struct RuntimeRegisters {
88 _mfindex: u32,
89 _reserved: [u32; 7],
90 ir: [InterrupterRegisters; 1],
91}
92
93#[repr(C)]
94struct InterrupterRegisters {
95 iman: u32,
96 _imod: u32,
97 erstsz: u32,
98 _reserved: u32,
99 erstba: u64,
100 erdp: u64,
101}
102
103#[repr(C)]
104#[derive(Clone, Copy)]
105struct Trb {
106 d0: u32,
107 d1: u32,
108 d2: u32,
109 d3: u32,
110}
111
112impl Trb {
113 fn link(addr: u64, toggle_cycle: bool) -> Self {
115 Self {
116 d0: (addr & 0xFFFFFFFF) as u32,
117 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
118 d2: 0,
119 d3: ((TRB_TYPE_LINK << TRB_TYPE_SHIFT) as u32)
120 | TRB_CYCLE
121 | (if toggle_cycle { TRB_TC } else { 0 }),
122 }
123 }
124
125 fn normal(addr: u64, len: u32, cycle: bool, ioc: bool) -> Self {
127 let mut d3 = (TRB_TYPE_NORMAL << TRB_TYPE_SHIFT) as u32 | if cycle { TRB_CYCLE } else { 0 };
128 if ioc {
129 d3 |= TRB_IOC;
130 }
131 Self {
132 d0: (addr & 0xFFFFFFFF) as u32,
133 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
134 d2: len,
135 d3,
136 }
137 }
138}
139
140const TRB_TYPE_LINK: u32 = 6;
141const TRB_TYPE_SHIFT: u32 = 10;
142const TRB_TC: u32 = 1 << 1;
143
144#[repr(C, packed)]
145struct SlotContext {
146 d0: u32,
147 d1: u32,
148 d2: u32,
149 d3: u32,
150 d4: u32,
151 d5: u32,
152 d6: u32,
153 d7: u32,
154}
155
156#[repr(C, packed)]
157struct EndpointContext {
158 d0: u32,
159 d1: u32,
160 d2: u32,
161 d3: u32,
162 d4: u32,
163 d5: u32,
164 d6: u32,
165 d7: u32,
166}
167
168#[repr(C, packed)]
169struct InputControlContext {
170 d0: u32,
171 d1: u32,
172 d2: [u32; 30],
173}
174
175#[repr(C, packed)]
176struct InputContext {
177 ctrl: InputControlContext,
178 slot: SlotContext,
179 eps: [EndpointContext; 31],
180}
181
182struct XhciPort {
183 port_num: usize,
184 enabled: bool,
185 connected: bool,
186 speed: u8,
187}
188
189pub struct XhciController {
190 mmio_base: usize,
191 cap_regs: *const CapRegisters,
192 op_regs: *mut OpRegisters,
193 rt_regs: *mut RuntimeRegisters,
194 db_regs: *mut u32,
195 caplength: u8,
196 max_ports: usize,
197 ports: Vec<XhciPort>,
198 device_ctx: *mut u8,
199 device_ctx_phys: u64,
200 cmd_ring: *mut Trb,
201 cmd_ring_phys: u64,
202 cmd_ring_deq: usize,
203 cmd_ring_cycle: bool,
204 event_ring: *mut Trb,
205 event_ring_phys: u64,
206 event_ring_deq: usize,
207 event_ring_cycle: bool,
208 slot_id: AtomicU8,
209}
210
211unsafe impl Send for XhciController {}
212unsafe impl Sync for XhciController {}
213
214impl XhciController {
215 pub unsafe fn new(pci_dev: pci::PciDevice) -> Result<Arc<Self>, &'static str> {
217 let bar = match pci_dev.read_bar(0) {
218 Some(Bar::Memory64 { addr, .. }) => addr,
219 _ => return Err("Invalid BAR"),
220 };
221 paging::ensure_identity_map_range(bar, XHCI_MMIO_SIZE as u64);
222
223 let mmio_base = phys_to_virt(bar) as usize;
224 let cap_regs = mmio_base as *const CapRegisters;
225 let caplength = (*cap_regs).caplength;
226 let op_regs = (mmio_base + caplength as usize) as *mut OpRegisters;
227
228 let dboff = (*cap_regs).dboff;
229 let db_regs = (mmio_base + dboff as usize) as *mut u32;
230
231 let rtsoff = (*cap_regs).rtsoff;
232 let rt_regs = (mmio_base + rtsoff as usize) as *mut RuntimeRegisters;
233
234 let max_ports = (((*cap_regs).hcsparams1 >> 24) & 0xFF) as usize;
235
236 let mut controller = Self {
237 mmio_base,
238 cap_regs,
239 op_regs,
240 rt_regs,
241 db_regs,
242 caplength,
243 max_ports,
244 ports: Vec::new(),
245 device_ctx: core::ptr::null_mut(),
246 device_ctx_phys: 0,
247 cmd_ring: core::ptr::null_mut(),
248 cmd_ring_phys: 0,
249 cmd_ring_deq: 0,
250 cmd_ring_cycle: true,
251 event_ring: core::ptr::null_mut(),
252 event_ring_phys: 0,
253 event_ring_deq: 0,
254 event_ring_cycle: true,
255 slot_id: AtomicU8::new(0),
256 };
257
258 controller.init()?;
259 Ok(Arc::new(controller))
260 }
261
262 fn init(&mut self) -> Result<(), &'static str> {
264 unsafe {
265 let op = &mut *self.op_regs;
266
267 for _ in 0..100_000 {
268 if op.usbsts & USBSTS_CNR == 0 {
269 break;
270 }
271 core::hint::spin_loop();
272 }
273 if op.usbsts & USBSTS_CNR != 0 {
274 return Err("xHCI: controller not ready (CNR)");
275 }
276
277 op.usbcmd &= !USBCMD_RUN_STOP;
278 for _ in 0..100_000 {
279 if op.usbsts & USBSTS_HCH != 0 {
280 break;
281 }
282 core::hint::spin_loop();
283 }
284 if op.usbsts & USBSTS_HCH == 0 {
285 return Err("xHCI: controller did not halt");
286 }
287
288 op.usbcmd |= USBCMD_HCRST;
289 for _ in 0..100_000 {
290 if op.usbcmd & USBCMD_HCRST == 0 {
291 break;
292 }
293 core::hint::spin_loop();
294 }
295 if op.usbcmd & USBCMD_HCRST != 0 {
296 return Err("xHCI: controller reset timed out");
297 }
298 let mut cnr_timeout = 1_000_000u32;
299 while op.usbsts & USBSTS_CNR != 0 {
300 if cnr_timeout == 0 {
301 return Err("xHCI: CNR did not clear after reset");
302 }
303 cnr_timeout -= 1;
304 core::hint::spin_loop();
305 }
306
307 for i in 0..self.max_ports {
308 let portsc = self.read_portsc(i);
309 self.ports.push(XhciPort {
310 port_num: i,
311 enabled: (portsc & PORTSC_PED) != 0,
312 connected: (portsc & PORTSC_CCS) != 0,
313 speed: ((portsc >> PORTSC_SPEED_SHIFT) & 0xF) as u8,
314 });
315 }
316
317 if XHCI_SAFE_PROBE_ONLY {
318 op.usbcmd |= USBCMD_RUN_STOP;
319 op.usbcmd &= !USBCMD_INTE;
320 return Ok(());
321 }
322
323 self.init_rings()?;
324 self.init_interrupter()?;
325
326 op.usbcmd |= USBCMD_RUN_STOP;
327 op.usbcmd &= !USBCMD_INTE;
328 let max_slots = (*self.cap_regs).hcsparams1 & 0xFF;
329 op.config = max_slots;
330
331 self.enable_slot()?;
332 }
333 Ok(())
334 }
335
336 unsafe fn init_rings(&mut self) -> Result<(), &'static str> {
338 let cmd_frame = allocate_dma_frame().ok_or("Failed to allocate cmd ring")?;
339 self.cmd_ring_phys = cmd_frame.start_address.as_u64();
340 self.cmd_ring = phys_to_virt(self.cmd_ring_phys) as *mut Trb;
341 core::ptr::write_bytes(self.cmd_ring as *mut u8, 0, 4096);
342
343 let event_frame = allocate_dma_frame().ok_or("Failed to allocate event ring")?;
344 self.event_ring_phys = event_frame.start_address.as_u64();
345 self.event_ring = phys_to_virt(self.event_ring_phys) as *mut Trb;
346 core::ptr::write_bytes(self.event_ring as *mut u8, 0, 4096);
347
348 let dev_frame = allocate_dma_frame().ok_or("Failed to allocate device context")?;
349 self.device_ctx_phys = dev_frame.start_address.as_u64();
350 self.device_ctx = phys_to_virt(self.device_ctx_phys) as *mut u8;
351 core::ptr::write_bytes(self.device_ctx, 0, 4096);
352
353 let dcbaap = &mut (*self.op_regs).dcbaap;
354 *dcbaap = self.device_ctx_phys;
355
356 Ok(())
357 }
358
359 unsafe fn init_interrupter(&mut self) -> Result<(), &'static str> {
361 let erst_frame = allocate_dma_frame().ok_or("Failed to allocate ERST")?;
362 let erst_phys = erst_frame.start_address.as_u64();
363 let erst_virt = phys_to_virt(erst_phys) as *mut u64;
364 core::ptr::write_bytes(erst_virt as *mut u8, 0, 4096);
365
366 let erst_entry = erst_virt as *mut u8;
367 let addr_bytes = self.event_ring_phys.to_le_bytes();
368 core::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), erst_entry, 8);
369 let seg_size: u32 = 64;
370 let size_bytes = seg_size.to_le_bytes();
371 core::ptr::copy_nonoverlapping(size_bytes.as_ptr(), erst_entry.add(8), 4);
372
373 let ir = &mut (*self.rt_regs).ir[0];
374 ir.erstsz = 1;
375 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
376 ir.erstba = erst_phys;
377 ir.iman = 3;
378 ir.erdp = self.event_ring_phys;
379
380 Ok(())
381 }
382
383 unsafe fn read_portsc(&self, port: usize) -> u32 {
385 let port_offset = XHCI_PORT_REG_BASE + (port * XHCI_PORT_REG_STRIDE);
386 let portsc_ptr = (self.op_regs as *const u8).add(port_offset) as *const u32;
387 portsc_ptr.read_volatile()
388 }
389
390 unsafe fn write_portsc(&self, port: usize, val: u32) {
392 let port_offset = XHCI_PORT_REG_BASE + (port * XHCI_PORT_REG_STRIDE);
393 let portsc_ptr = (self.op_regs as *const u8).add(port_offset) as *mut u32;
394 portsc_ptr.write_volatile(val);
395 }
396
397 unsafe fn enable_slot(&mut self) -> Result<(), &'static str> {
399 self.cmd_ring_enqueue(Trb {
400 d0: 0,
401 d1: 0,
402 d2: 0,
403 d3: (TRB_TYPE_ENABLE_SLOT << TRB_TYPE_SHIFT) as u32 | TRB_CYCLE,
404 });
405
406 let event = self.wait_for_event()?;
407 let completion_code = (event.d2 >> 24) & 0xFF;
408 if completion_code != 1 {
409 return Err("Enable slot failed");
410 }
411 let slot_id = ((event.d3 >> 24) & 0xFF) as u8;
412 if slot_id == 0 {
413 return Err("No slot available");
414 }
415 self.slot_id.store(slot_id, Ordering::SeqCst);
416
417 Ok(())
418 }
419
420 unsafe fn cmd_ring_enqueue(&mut self, trb: Trb) {
422 let idx = self.cmd_ring_deq;
423 let mut trb = trb;
424 if self.cmd_ring_cycle {
425 trb.d3 |= TRB_CYCLE;
426 } else {
427 trb.d3 &= !TRB_CYCLE;
428 }
429 core::ptr::write_volatile(self.cmd_ring.add(idx), trb);
430 self.cmd_ring_deq = idx + 1;
431
432 if self.cmd_ring_deq >= 63 {
433 let link = Trb::link(self.cmd_ring_phys, true);
434 let mut link_trb = link;
435 if self.cmd_ring_cycle {
436 link_trb.d3 |= TRB_CYCLE;
437 } else {
438 link_trb.d3 &= !TRB_CYCLE;
439 }
440 core::ptr::write_volatile(self.cmd_ring.add(63), link_trb);
441 self.cmd_ring_deq = 0;
442 self.cmd_ring_cycle = !self.cmd_ring_cycle;
443 }
444
445 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
446 core::ptr::write_volatile(self.db_regs.add(0), 0);
447 }
448
449 unsafe fn wait_for_event(&mut self) -> Result<Trb, &'static str> {
451 for _ in 0..1000000 {
452 let idx = self.event_ring_deq;
453 let trb = core::ptr::read_volatile(self.event_ring.add(idx));
454
455 let expected_c = if self.event_ring_cycle { TRB_CYCLE } else { 0 };
456 if (trb.d3 & TRB_CYCLE) == expected_c {
457 self.event_ring_deq = (idx + 1) % 64;
458 if self.event_ring_deq == 0 {
459 self.event_ring_cycle = !self.event_ring_cycle;
460 }
461 let ir = &mut (*self.rt_regs).ir[0];
462 ir.erdp = (self.event_ring_phys + (self.event_ring_deq as u64) * 16) | (1 << 3);
463 return Ok(trb);
464 }
465 core::hint::spin_loop();
466 }
467 Err("Event timeout")
468 }
469
470 pub fn port_count(&self) -> usize {
472 self.max_ports
473 }
474
475 pub fn is_port_connected(&self, port: usize) -> bool {
477 if port >= self.ports.len() {
478 return false;
479 }
480 self.ports[port].connected
481 }
482}
483
484static XHCI_CONTROLLERS: Mutex<Vec<Arc<XhciController>>> = Mutex::new(Vec::new());
485static XHCI_INITIALIZED: AtomicBool = AtomicBool::new(false);
486
487pub fn init() {
489 log::info!("[xHCI] Scanning for xHCI controllers...");
490
491 let candidates = pci::probe_all(ProbeCriteria {
492 vendor_id: None,
493 device_id: None,
494 class_code: Some(0x0C),
495 subclass: Some(0x03),
496 prog_if: Some(0x30),
497 });
498
499 for pci_dev in candidates.into_iter() {
500 log::info!(
501 "xHCI: Found controller at {:?} (VEN:{:04x} DEV:{:04x})",
502 pci_dev.address,
503 pci_dev.vendor_id,
504 pci_dev.device_id
505 );
506
507 pci_dev.enable_bus_master();
508
509 match unsafe { XhciController::new(pci_dev) } {
510 Ok(controller) => {
511 log::info!("[xHCI] Initialized with {} ports", controller.port_count());
512 XHCI_CONTROLLERS.lock().push(controller);
513 }
514 Err(e) => {
515 log::warn!("xHCI: Failed to initialize controller: {}", e);
516 }
517 }
518 }
519
520 XHCI_INITIALIZED.store(true, Ordering::SeqCst);
521 log::info!(
522 "[xHCI] Found {} controller(s)",
523 XHCI_CONTROLLERS.lock().len()
524 );
525}
526
527pub fn get_controller(index: usize) -> Option<Arc<XhciController>> {
529 XHCI_CONTROLLERS.lock().get(index).cloned()
530}
531
532pub fn is_available() -> bool {
534 XHCI_INITIALIZED.load(Ordering::Relaxed)
535}