1#![allow(dead_code)]
13
14use crate::{
15 hardware::pci_client::{self as pci, Bar, ProbeCriteria},
16 memory::{allocate_zeroed_frame, paging, phys_to_virt},
17};
18use alloc::{sync::Arc, vec::Vec};
19use core::{
20 ptr::{read_volatile, write_volatile},
21 sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering},
22};
23use spin::Mutex;
24
25const XHCI_MMIO_SIZE: usize = 0x10000;
26const XHCI_PORT_REG_BASE: usize = 0x400;
27const XHCI_PORT_REG_STRIDE: usize = 0x10;
28const XHCI_RING_TRBS: usize = 64;
29
30const USBCMD_RUN_STOP: u32 = 1 << 0;
31const USBCMD_HCRST: u32 = 1 << 1;
32const USBCMD_INTE: u32 = 1 << 2;
33
34const USBSTS_HCH: u32 = 1 << 0;
35const USBSTS_CNR: u32 = 1 << 11;
36
37const PORTSC_CCS: u32 = 1 << 0;
38const PORTSC_PED: u32 = 1 << 1;
39const PORTSC_PR: u32 = 1 << 4;
40const PORTSC_PP: u32 = 1 << 9;
41const PORTSC_SPEED_SHIFT: u32 = 10;
42const PORTSC_W1C_MASK: u32 = 0xFE0000;
43
44const TRB_TYPE_NORMAL: u32 = 1;
45const TRB_TYPE_SETUP_STAGE: u32 = 2;
46const TRB_TYPE_DATA_STAGE: u32 = 3;
47const TRB_TYPE_STATUS_STAGE: u32 = 4;
48const TRB_TYPE_ENABLE_SLOT: u32 = 9;
49const TRB_TYPE_ADDRESS_DEVICE: u32 = 11;
50const TRB_TYPE_CONFIGURE_ENDPOINT: u32 = 12;
51const TRB_TYPE_TRANSFER_EVENT: u32 = 32;
52const TRB_TYPE_LINK: u32 = 6;
53
54const TRB_CYCLE: u32 = 1 << 0;
55const TRB_IOC: u32 = 1 << 5;
56const TRB_DIR_IN: u32 = 1 << 16;
57const TRB_DIR_OUT: u32 = 0;
58const TRB_TC: u32 = 1 << 1;
59
60const TRB_TYPE_SHIFT: u32 = 10;
61const TRB_IDT: u32 = 1 << 6;
62const TRB_TD_SIZE_SHIFT: u32 = 17;
63const TRB_TD_SIZE_MASK: u32 = 0x1F;
64
65const SLOT_STATE_DISABLED: u32 = 0;
66const SLOT_STATE_ENABLED: u32 = 1;
67const SLOT_STATE_ADDRESSED: u32 = 2;
68const SLOT_STATE_CONFIGURED: u32 = 3;
69
70const EP_TYPE_CONTROL: u32 = 4;
71const EP_TYPE_INTR_IN: u32 = 7;
72
73const fn TRB_GET_TYPE(d3: u32) -> u32 {
74 (d3 >> TRB_TYPE_SHIFT) & 0xFF
75}
76
77#[repr(C)]
78struct CapRegisters {
79 caplength: u8,
80 _reserved: u8,
81 _hciversion: u16,
82 hcsparams1: u32,
83 _hcsparams2: u32,
84 _hcsparams3: u32,
85 _hccparams1: u32,
86 dboff: u32,
87 rtsoff: u32,
88 _hccparams2: u32,
89}
90
91#[repr(C)]
92struct OpRegisters {
93 usbcmd: u32,
94 usbsts: u32,
95 _pagesize: u32,
96 _reserved0: [u32; 2],
97 _dnctrl: u32,
98 crcr: u64,
99 _reserved1: [u32; 4],
100 dcbaap: u64,
101 config: u32,
102}
103
104#[repr(C)]
105struct RuntimeRegisters {
106 _mfindex: u32,
107 _reserved: [u32; 7],
108 ir: [InterrupterRegisters; 1],
109}
110
111#[repr(C)]
112struct InterrupterRegisters {
113 iman: u32,
114 _imod: u32,
115 erstsz: u32,
116 _reserved: u32,
117 erstba: u64,
118 erdp: u64,
119}
120
121#[repr(C)]
122#[derive(Clone, Copy)]
123struct Trb {
124 d0: u32,
125 d1: u32,
126 d2: u32,
127 d3: u32,
128}
129
130impl Trb {
131 fn link(addr: u64, toggle_cycle: bool) -> Self {
132 Self {
133 d0: (addr & 0xFFFFFFFF) as u32,
134 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
135 d2: 0,
136 d3: ((TRB_TYPE_LINK << TRB_TYPE_SHIFT) as u32)
137 | TRB_CYCLE
138 | (if toggle_cycle { TRB_TC } else { 0 }),
139 }
140 }
141
142 fn normal(addr: u64, len: u32, cycle: bool, ioc: bool) -> Self {
143 let mut d3 = (TRB_TYPE_NORMAL << TRB_TYPE_SHIFT) as u32 | if cycle { TRB_CYCLE } else { 0 };
144 if ioc {
145 d3 |= TRB_IOC;
146 }
147 Self {
148 d0: (addr & 0xFFFFFFFF) as u32,
149 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
150 d2: len,
151 d3,
152 }
153 }
154
155 fn setup_stage(addr: u64, cycle: bool) -> Self {
156 let mut d3 =
157 (TRB_TYPE_SETUP_STAGE << TRB_TYPE_SHIFT) as u32 | if cycle { TRB_CYCLE } else { 0 };
158 d3 |= TRB_IDT;
159 Self {
160 d0: (addr & 0xFFFFFFFF) as u32,
161 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
162 d2: 8,
163 d3,
164 }
165 }
166
167 fn data_stage(addr: u64, len: u32, dir_in: bool, cycle: bool, ioc: bool) -> Self {
168 let mut d3 =
169 (TRB_TYPE_DATA_STAGE << TRB_TYPE_SHIFT) as u32 | if cycle { TRB_CYCLE } else { 0 };
170 if dir_in {
171 d3 |= TRB_DIR_IN;
172 }
173 if ioc {
174 d3 |= TRB_IOC;
175 }
176 let td_size = ((len + TRB_TD_SIZE_MASK) / (TRB_TD_SIZE_MASK + 1)) & TRB_TD_SIZE_MASK;
177 let d2 = (td_size << TRB_TD_SIZE_SHIFT) | len;
178 Self {
179 d0: (addr & 0xFFFFFFFF) as u32,
180 d1: ((addr >> 32) & 0xFFFFFFFF) as u32,
181 d2,
182 d3,
183 }
184 }
185
186 fn status_stage(cycle: bool, dir_in: bool) -> Self {
187 let mut d3 =
188 (TRB_TYPE_STATUS_STAGE << TRB_TYPE_SHIFT) as u32 | if cycle { TRB_CYCLE } else { 0 };
189 if dir_in {
190 d3 |= TRB_DIR_IN;
191 }
192 d3 |= TRB_IOC;
193 Self {
194 d0: 0,
195 d1: 0,
196 d2: 0,
197 d3,
198 }
199 }
200}
201
202#[repr(C, packed)]
203struct SlotContext {
204 d0: u32,
205 d1: u32,
206 d2: u32,
207 d3: u32,
208 d4: u32,
209 d5: u32,
210 d6: u32,
211 d7: u32,
212}
213
214#[repr(C, packed)]
215struct EndpointContext {
216 d0: u32,
217 d1: u32,
218 d2: u32,
219 d3: u32,
220 d4: u32,
221 d5: u32,
222 d6: u32,
223 d7: u32,
224}
225
226#[repr(C, packed)]
227struct InputControlContext {
228 d0: u32,
229 d1: u32,
230 d2: [u32; 30],
231}
232
233#[repr(C, packed)]
234struct InputContext {
235 ctrl: InputControlContext,
236 slot: SlotContext,
237 eps: [EndpointContext; 31],
238}
239
240struct XhciPort {
241 port_num: usize,
242 enabled: bool,
243 connected: bool,
244 speed: u8,
245}
246
247pub struct XhciController {
248 mmio_base: usize,
249 cap_regs: *const CapRegisters,
250 op_regs: *mut OpRegisters,
251 rt_regs: *mut RuntimeRegisters,
252 db_regs: *mut u32,
253 caplength: u8,
254 max_ports: usize,
255 ports: Vec<XhciPort>,
256 device_ctx: *mut u8,
257 device_ctx_phys: u64,
258 cmd_ring: *mut Trb,
259 cmd_ring_phys: u64,
260 cmd_ring_deq: usize,
261 cmd_ring_cycle: bool,
262 event_ring: *mut Trb,
263 event_ring_phys: u64,
264 event_ring_deq: AtomicUsize,
265 event_ring_cycle: AtomicBool,
266 slot_id: AtomicU8,
267 ctrl_transfer_buf: *mut u8,
268 ctrl_transfer_buf_phys: u64,
269 ctrl_ring: *mut Trb,
270 ctrl_ring_phys: u64,
271 ctrl_ring_deq: usize,
272 ctrl_ring_cycle: bool,
273 enumeration_ready: bool,
274}
275
276unsafe impl Send for XhciController {}
277unsafe impl Sync for XhciController {}
278
279impl XhciController {
280 pub unsafe fn new(pci_dev: pci::PciDevice) -> Result<Self, &'static str> {
282 let bar = match pci_dev.read_bar(0) {
283 Some(Bar::Memory64 { addr, .. }) => addr,
284 _ => return Err("Invalid BAR"),
285 };
286 paging::ensure_identity_map_range(bar, XHCI_MMIO_SIZE as u64);
287
288 let mmio_base = phys_to_virt(bar) as usize;
289 let cap_regs = mmio_base as *const CapRegisters;
290 let caplength = (*cap_regs).caplength;
291 let op_regs = (mmio_base + caplength as usize) as *mut OpRegisters;
292
293 let dboff = (*cap_regs).dboff;
294 let db_regs = (mmio_base + dboff as usize) as *mut u32;
295
296 let rtsoff = (*cap_regs).rtsoff;
297 let rt_regs = (mmio_base + rtsoff as usize) as *mut RuntimeRegisters;
298
299 let max_ports = (((*cap_regs).hcsparams1 >> 24) & 0xFF) as usize;
300
301 let mut controller = Self {
302 mmio_base,
303 cap_regs,
304 op_regs,
305 rt_regs,
306 db_regs,
307 caplength,
308 max_ports,
309 ports: Vec::new(),
310 device_ctx: core::ptr::null_mut(),
311 device_ctx_phys: 0,
312 cmd_ring: core::ptr::null_mut(),
313 cmd_ring_phys: 0,
314 cmd_ring_deq: 0,
315 cmd_ring_cycle: true,
316 event_ring: core::ptr::null_mut(),
317 event_ring_phys: 0,
318 event_ring_deq: AtomicUsize::new(0),
319 event_ring_cycle: AtomicBool::new(true),
320 slot_id: AtomicU8::new(0),
321 ctrl_transfer_buf: core::ptr::null_mut(),
322 ctrl_transfer_buf_phys: 0,
323 ctrl_ring: core::ptr::null_mut(),
324 ctrl_ring_phys: 0,
325 ctrl_ring_deq: 0,
326 ctrl_ring_cycle: true,
327 enumeration_ready: false,
328 };
329
330 controller.init()?;
331 Ok(controller)
332 }
333
334 fn init(&mut self) -> Result<(), &'static str> {
336 unsafe {
337 for _ in 0..100_000 {
338 if self.read_usbsts() & USBSTS_CNR == 0 {
339 break;
340 }
341 core::hint::spin_loop();
342 }
343 if self.read_usbsts() & USBSTS_CNR != 0 {
344 return Err("xHCI: controller not ready (CNR)");
345 }
346
347 let mut usbcmd = self.read_usbcmd();
348 usbcmd &= !USBCMD_RUN_STOP;
349 self.write_usbcmd(usbcmd);
350 for _ in 0..100_000 {
351 if self.read_usbsts() & USBSTS_HCH != 0 {
352 break;
353 }
354 core::hint::spin_loop();
355 }
356 if self.read_usbsts() & USBSTS_HCH == 0 {
357 return Err("xHCI: controller did not halt");
358 }
359
360 self.write_usbcmd(self.read_usbcmd() | USBCMD_HCRST);
361 for _ in 0..100_000 {
362 if self.read_usbcmd() & USBCMD_HCRST == 0 {
363 break;
364 }
365 core::hint::spin_loop();
366 }
367 if self.read_usbcmd() & USBCMD_HCRST != 0 {
368 return Err("xHCI: controller reset timed out");
369 }
370 let mut cnr_timeout = 1_000_000u32;
371 while self.read_usbsts() & USBSTS_CNR != 0 {
372 if cnr_timeout == 0 {
373 return Err("xHCI: CNR did not clear after reset");
374 }
375 cnr_timeout -= 1;
376 core::hint::spin_loop();
377 }
378
379 for i in 0..self.max_ports {
380 let portsc = self.read_portsc(i);
381 self.ports.push(XhciPort {
382 port_num: i,
383 enabled: (portsc & PORTSC_PED) != 0,
384 connected: (portsc & PORTSC_CCS) != 0,
385 speed: ((portsc >> PORTSC_SPEED_SHIFT) & 0xF) as u8,
386 });
387 }
388
389 self.init_rings()?;
390 self.init_interrupter()?;
391 self.init_ctrl_ring()?;
392
393 let max_slots = self.max_device_slots();
394 self.write_config(max_slots);
395 self.write_usbcmd(self.read_usbcmd() | USBCMD_RUN_STOP | USBCMD_INTE);
396
397 self.enumeration_ready = false;
401 }
402 Ok(())
403 }
404
405 unsafe fn init_rings(&mut self) -> Result<(), &'static str> {
407 let cmd_frame = allocate_zeroed_frame().ok_or("Failed to allocate cmd ring")?;
408 self.cmd_ring_phys = cmd_frame.start_address.as_u64();
409 self.cmd_ring = phys_to_virt(self.cmd_ring_phys) as *mut Trb;
410 core::ptr::write_bytes(self.cmd_ring as *mut u8, 0, 4096);
411 core::ptr::write(
412 self.cmd_ring.add(XHCI_RING_TRBS - 1),
413 Trb::link(self.cmd_ring_phys, true),
414 );
415 self.write_crcr(self.cmd_ring_phys | 1);
416
417 let event_frame = allocate_zeroed_frame().ok_or("Failed to allocate event ring")?;
418 self.event_ring_phys = event_frame.start_address.as_u64();
419 self.event_ring = phys_to_virt(self.event_ring_phys) as *mut Trb;
420 core::ptr::write_bytes(self.event_ring as *mut u8, 0, 4096);
421
422 let dev_frame = allocate_zeroed_frame().ok_or("Failed to allocate DCBAA")?;
423 self.device_ctx_phys = dev_frame.start_address.as_u64();
424 self.device_ctx = phys_to_virt(self.device_ctx_phys) as *mut u8;
425 core::ptr::write_bytes(self.device_ctx, 0, 4096);
426 self.write_dcbaap(self.device_ctx_phys);
427
428 Ok(())
429 }
430
431 unsafe fn init_interrupter(&mut self) -> Result<(), &'static str> {
433 let erst_frame = allocate_zeroed_frame().ok_or("Failed to allocate ERST")?;
434 let erst_phys = erst_frame.start_address.as_u64();
435 let erst_virt = phys_to_virt(erst_phys) as *mut u64;
436 core::ptr::write_bytes(erst_virt as *mut u8, 0, 4096);
437
438 let erst_entry = erst_virt as *mut u8;
439 let addr_bytes = self.event_ring_phys.to_le_bytes();
440 core::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), erst_entry, 8);
441 let seg_size: u32 = XHCI_RING_TRBS as u32;
442 let size_bytes = seg_size.to_le_bytes();
443 core::ptr::copy_nonoverlapping(size_bytes.as_ptr(), erst_entry.add(8), 4);
444
445 let ir = &mut (*self.rt_regs).ir[0];
446 write_volatile(core::ptr::addr_of_mut!(ir.erstsz), 1);
447 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
448 write_volatile(core::ptr::addr_of_mut!(ir.erstba), erst_phys);
449 write_volatile(core::ptr::addr_of_mut!(ir.erdp), self.event_ring_phys);
450 write_volatile(core::ptr::addr_of_mut!(ir.iman), 3);
451
452 Ok(())
453 }
454
455 unsafe fn init_ctrl_ring(&mut self) -> Result<(), &'static str> {
457 let buf_frame = allocate_zeroed_frame().ok_or("Failed to allocate ctrl buf")?;
458 self.ctrl_transfer_buf_phys = buf_frame.start_address.as_u64();
459 self.ctrl_transfer_buf = phys_to_virt(self.ctrl_transfer_buf_phys) as *mut u8;
460 core::ptr::write_bytes(self.ctrl_transfer_buf, 0, 4096);
461
462 let ring_frame = allocate_zeroed_frame().ok_or("Failed to allocate ctrl ring")?;
463 self.ctrl_ring_phys = ring_frame.start_address.as_u64();
464 self.ctrl_ring = phys_to_virt(self.ctrl_ring_phys) as *mut Trb;
465 core::ptr::write_bytes(self.ctrl_ring as *mut u8, 0, 4096);
466
467 Ok(())
468 }
469
470 unsafe fn read_portsc(&self, port: usize) -> u32 {
472 let port_offset = XHCI_PORT_REG_BASE + (port * XHCI_PORT_REG_STRIDE);
473 let portsc_ptr = (self.op_regs as *const u8).add(port_offset) as *const u32;
474 portsc_ptr.read_volatile()
475 }
476
477 unsafe fn write_portsc(&self, port: usize, val: u32) {
479 let port_offset = XHCI_PORT_REG_BASE + (port * XHCI_PORT_REG_STRIDE);
480 let portsc_ptr = (self.op_regs as *const u8).add(port_offset) as *mut u32;
481 portsc_ptr.write_volatile(val);
482 }
483
484 unsafe fn read_usbcmd(&self) -> u32 {
485 read_volatile(core::ptr::addr_of!((*self.op_regs).usbcmd))
486 }
487
488 unsafe fn write_usbcmd(&self, value: u32) {
489 write_volatile(core::ptr::addr_of_mut!((*self.op_regs).usbcmd), value);
490 }
491
492 unsafe fn read_usbsts(&self) -> u32 {
493 read_volatile(core::ptr::addr_of!((*self.op_regs).usbsts))
494 }
495
496 unsafe fn write_crcr(&self, value: u64) {
497 write_volatile(core::ptr::addr_of_mut!((*self.op_regs).crcr), value);
498 }
499
500 unsafe fn write_dcbaap(&self, value: u64) {
501 write_volatile(core::ptr::addr_of_mut!((*self.op_regs).dcbaap), value);
502 }
503
504 unsafe fn write_config(&self, value: u32) {
505 write_volatile(core::ptr::addr_of_mut!((*self.op_regs).config), value);
506 }
507
508 fn max_device_slots(&self) -> u32 {
509 unsafe { read_volatile(core::ptr::addr_of!((*self.cap_regs).hcsparams1)) & 0xFF }
510 }
511
512 unsafe fn enable_slot(&mut self) -> Result<(), &'static str> {
514 self.cmd_ring_enqueue(Trb {
515 d0: 0,
516 d1: 0,
517 d2: 0,
518 d3: (TRB_TYPE_ENABLE_SLOT << TRB_TYPE_SHIFT) as u32 | TRB_CYCLE,
519 });
520
521 let event = self.wait_for_event()?;
522 let completion_code = (event.d2 >> 24) & 0xFF;
523 if completion_code != 1 {
524 return Err("Enable slot failed");
525 }
526 let slot_id = ((event.d3 >> 24) & 0xFF) as u8;
527 if slot_id == 0 {
528 return Err("No slot available");
529 }
530 self.slot_id.store(slot_id, Ordering::SeqCst);
531
532 Ok(())
533 }
534
535 unsafe fn cmd_ring_enqueue(&mut self, trb: Trb) {
537 let idx = self.cmd_ring_deq;
538 let mut trb = trb;
539 if self.cmd_ring_cycle {
540 trb.d3 |= TRB_CYCLE;
541 } else {
542 trb.d3 &= !TRB_CYCLE;
543 }
544 core::ptr::write_volatile(self.cmd_ring.add(idx), trb);
545 self.cmd_ring_deq = idx + 1;
546
547 if self.cmd_ring_deq >= 63 {
548 let link = Trb::link(self.cmd_ring_phys, true);
549 let mut link_trb = link;
550 if self.cmd_ring_cycle {
551 link_trb.d3 |= TRB_CYCLE;
552 } else {
553 link_trb.d3 &= !TRB_CYCLE;
554 }
555 core::ptr::write_volatile(self.cmd_ring.add(63), link_trb);
556 self.cmd_ring_deq = 0;
557 self.cmd_ring_cycle = !self.cmd_ring_cycle;
558 }
559
560 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
561 core::ptr::write_volatile(self.db_regs.add(0), 0);
562 }
563
564 unsafe fn wait_for_event(&mut self) -> Result<Trb, &'static str> {
566 for _ in 0..1000000 {
567 let idx = self.event_ring_deq.load(Ordering::Acquire);
568 let trb = core::ptr::read_volatile(self.event_ring.add(idx));
569
570 let expected_c = if self.event_ring_cycle.load(Ordering::Acquire) {
571 TRB_CYCLE
572 } else {
573 0
574 };
575 if (trb.d3 & TRB_CYCLE) == expected_c {
576 let new_deq = (idx + 1) % 64;
577 self.event_ring_deq.store(new_deq, Ordering::Release);
578 if new_deq == 0 {
579 self.event_ring_cycle.store(
580 !self.event_ring_cycle.load(Ordering::Acquire),
581 Ordering::Release,
582 );
583 }
584 let ir = &mut (*self.rt_regs).ir[0];
585 ir.erdp = (self.event_ring_phys + (new_deq as u64) * 16) | (1 << 3);
586 return Ok(trb);
587 }
588 core::hint::spin_loop();
589 }
590 Err("Event timeout")
591 }
592
593 unsafe fn ctrl_ring_enqueue(&mut self, trb: Trb) {
594 let idx = self.ctrl_ring_deq;
595 let mut trb = trb;
596 if self.ctrl_ring_cycle {
597 trb.d3 |= TRB_CYCLE;
598 } else {
599 trb.d3 &= !TRB_CYCLE;
600 }
601 core::ptr::write_volatile(self.ctrl_ring.add(idx), trb);
602 self.ctrl_ring_deq = idx + 1;
603
604 if self.ctrl_ring_deq >= 63 {
605 let link = Trb::link(self.ctrl_ring_phys, true);
606 let mut link_trb = link;
607 if self.ctrl_ring_cycle {
608 link_trb.d3 |= TRB_CYCLE;
609 } else {
610 link_trb.d3 &= !TRB_CYCLE;
611 }
612 core::ptr::write_volatile(self.ctrl_ring.add(63), link_trb);
613 self.ctrl_ring_deq = 0;
614 self.ctrl_ring_cycle = !self.ctrl_ring_cycle;
615 }
616
617 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
618 core::ptr::write_volatile(self.db_regs.add(0), 0);
619 }
620
621 unsafe fn ctrl_wait_for_event(&mut self) -> Result<Trb, &'static str> {
622 for _ in 0..1000000 {
623 let idx = self.ctrl_ring_deq;
624 let trb = core::ptr::read_volatile(self.ctrl_ring.add(idx));
625
626 let expected_c = if self.ctrl_ring_cycle { TRB_CYCLE } else { 0 };
627 if (trb.d3 & TRB_CYCLE) == expected_c {
628 self.ctrl_ring_deq = (idx + 1) % 64;
629 if self.ctrl_ring_deq == 0 {
630 self.ctrl_ring_cycle = !self.ctrl_ring_cycle;
631 }
632 return Ok(trb);
633 }
634 core::hint::spin_loop();
635 }
636 Err("Control transfer timeout")
637 }
638
639 fn ctrl_transfer(
640 &mut self,
641 slot_id: u8,
642 setup_data: &[u8; 8],
643 data_buf: Option<&mut [u8]>,
644 data_len: usize,
645 ) -> Result<usize, &'static str> {
646 unsafe {
647 self.ctrl_ring_deq = 0;
648 self.ctrl_ring_cycle = true;
649 core::ptr::write_bytes(self.ctrl_ring as *mut u8, 0, 4096);
650
651 let setup_phys = self.ctrl_transfer_buf_phys;
652 let setup_virt = self.ctrl_transfer_buf;
653 core::ptr::copy_nonoverlapping(setup_data.as_ptr(), setup_virt, 8);
654
655 self.ctrl_ring_enqueue(Trb::setup_stage(setup_phys, true));
656
657 let mut transferred = 0;
658 let has_data = data_buf.is_some();
659 let dir_in = if has_data {
660 (setup_data[0] & 0x80) != 0
661 } else {
662 false
663 };
664
665 if let Some(buf) = &data_buf {
666 let data_phys = self.ctrl_transfer_buf_phys + 8;
667 let data_virt = self.ctrl_transfer_buf.add(8);
668
669 if dir_in && data_len > 0 {
670 core::ptr::write_bytes(data_virt, 0, data_len);
671 } else if !dir_in && data_len > 0 {
672 core::ptr::copy_nonoverlapping(buf.as_ptr(), data_virt, data_len);
673 }
674
675 self.ctrl_ring_enqueue(Trb::data_stage(
676 data_phys,
677 data_len as u32,
678 dir_in,
679 true,
680 false,
681 ));
682
683 self.ctrl_ring_enqueue(Trb::status_stage(true, !dir_in));
684 } else {
685 self.ctrl_ring_enqueue(Trb::status_stage(true, true));
686 }
687
688 for _ in 0..3 {
689 let event = self.ctrl_wait_for_event()?;
690 let trb_type = TRB_GET_TYPE(event.d3);
691 let completion = (event.d2 >> 24) & 0xFF;
692
693 if trb_type == TRB_TYPE_STATUS_STAGE && completion == 1 {
694 if has_data && data_len > 0 {
695 if dir_in {
696 let data_virt = self.ctrl_transfer_buf.add(8);
697 if let Some(buf) = data_buf {
698 core::ptr::copy_nonoverlapping(
699 data_virt,
700 buf.as_mut_ptr(),
701 data_len,
702 );
703 }
704 }
705 transferred = data_len;
706 }
707 return Ok(transferred);
708 }
709 }
710 Err("Control transfer failed")
711 }
712 }
713
714 pub fn port_count(&self) -> usize {
716 self.max_ports
717 }
718
719 pub fn is_port_connected(&self, port: usize) -> bool {
721 if port >= self.ports.len() {
722 return false;
723 }
724 self.ports[port].connected
725 }
726
727 pub fn get_device_descriptor(
728 &mut self,
729 slot_id: u8,
730 buf: &mut [u8; 18],
731 ) -> Result<usize, &'static str> {
732 if !self.enumeration_ready {
733 let _ = slot_id;
734 return Err("xHCI enumeration is not ready");
735 }
736 let setup = [0x80, 0x06, 0x00, 0x01, 0x00, 0x00, 18, 0x00];
737 self.ctrl_transfer(slot_id, &setup, Some(buf), 18)
738 }
739
740 pub fn get_configuration_descriptor(
741 &mut self,
742 slot_id: u8,
743 config_idx: u8,
744 buf: &mut [u8],
745 len: usize,
746 ) -> Result<usize, &'static str> {
747 if !self.enumeration_ready {
748 return Err("xHCI enumeration is not ready");
749 }
750 let setup = [
751 0x80,
752 0x06,
753 config_idx,
754 0x02,
755 0x00,
756 0x00,
757 (len & 0xFF) as u8,
758 ((len >> 8) & 0xFF) as u8,
759 ];
760 self.ctrl_transfer(slot_id, &setup, Some(buf), len)
761 }
762
763 pub fn set_address(&mut self, slot_id: u8, address: u8) -> Result<(), &'static str> {
764 if !self.enumeration_ready {
765 let _ = (slot_id, address);
766 return Err("xHCI enumeration is not ready");
767 }
768 unsafe {
769 self.cmd_ring_enqueue(Trb {
770 d0: 0,
771 d1: 0,
772 d2: (slot_id as u32) << 24,
773 d3: (TRB_TYPE_ADDRESS_DEVICE << TRB_TYPE_SHIFT) as u32 | TRB_CYCLE,
774 });
775 let event = self.wait_for_event()?;
776 let completion = (event.d2 >> 24) & 0xFF;
777 if completion != 1 {
778 return Err("Set address failed");
779 }
780 }
781 Ok(())
782 }
783
784 pub fn set_configuration(&mut self, slot_id: u8, config_value: u8) -> Result<(), &'static str> {
785 if !self.enumeration_ready {
786 let _ = (slot_id, config_value);
787 return Err("xHCI enumeration is not ready");
788 }
789 let setup = [0x00, 0x09, config_value, 0x00, 0x00, 0x00, 0x00, 0x00];
790 self.ctrl_transfer(slot_id, &setup, None, 0)?;
791 Ok(())
792 }
793
794 pub fn get_port_speed(&self, port: usize) -> u8 {
795 if port >= self.ports.len() {
796 return 0;
797 }
798 self.ports[port].speed
799 }
800}
801
802static XHCI_CONTROLLERS: Mutex<Vec<Arc<Mutex<XhciController>>>> = Mutex::new(Vec::new());
803static XHCI_INITIALIZED: AtomicBool = AtomicBool::new(false);
804pub static XHCI_IRQ_LINE: AtomicU8 = AtomicU8::new(0);
805
806pub fn init() {
808 log::info!("[xHCI] Scanning for xHCI controllers...");
809
810 let candidates = pci::probe_all(ProbeCriteria {
811 vendor_id: None,
812 device_id: None,
813 class_code: Some(0x0C),
814 subclass: Some(0x03),
815 prog_if: Some(0x30),
816 });
817
818 for pci_dev in candidates.into_iter() {
819 log::info!(
820 "xHCI: Found controller at {:?} (VEN:{:04x} DEV:{:04x})",
821 pci_dev.address,
822 pci_dev.vendor_id,
823 pci_dev.device_id
824 );
825
826 let irq = pci_dev.interrupt_line;
827 pci_dev.enable_memory_space();
828 pci_dev.enable_bus_master();
829
830 match unsafe { XhciController::new(pci_dev) } {
831 Ok(controller) => {
832 log::info!("[xHCI] Initialized with {} ports", controller.port_count());
833 XHCI_IRQ_LINE.store(irq, Ordering::Relaxed);
834 XHCI_CONTROLLERS
835 .lock()
836 .push(Arc::new(Mutex::new(controller)));
837 crate::arch::x86_64::idt::register_xhci_irq(irq);
838 }
839 Err(e) => {
840 log::warn!("xHCI: Failed to initialize controller: {}", e);
841 }
842 }
843 }
844
845 XHCI_INITIALIZED.store(true, Ordering::SeqCst);
846 log::info!(
847 "[xHCI] Found {} controller(s)",
848 XHCI_CONTROLLERS.lock().len()
849 );
850}
851
852pub fn get_controller(index: usize) -> Option<Arc<Mutex<XhciController>>> {
854 XHCI_CONTROLLERS.lock().get(index).cloned()
855}
856
857pub fn is_available() -> bool {
859 XHCI_INITIALIZED.load(Ordering::Relaxed) && !XHCI_CONTROLLERS.lock().is_empty()
860}
861
862pub fn handle_interrupt() {
866 if let Some(controller_arc) = get_controller(0) {
867 let mut controller = controller_arc.lock();
868 unsafe {
869 let ir = &mut (*controller.rt_regs).ir[0];
870 if (ir.iman & 1) != 0 {
871 let mut processed = 0;
872 while processed < 16 {
873 let idx = controller.event_ring_deq.load(Ordering::Acquire);
874 let trb = core::ptr::read_volatile(controller.event_ring.add(idx));
875
876 let expected_c = if controller.event_ring_cycle.load(Ordering::Acquire) {
877 TRB_CYCLE
878 } else {
879 0
880 };
881 if (trb.d3 & TRB_CYCLE) != expected_c {
882 break;
883 }
884
885 let trb_type = TRB_GET_TYPE(trb.d3);
886 match trb_type {
887 TRB_TYPE_TRANSFER_EVENT => {
888 let slot_id = ((trb.d3 >> 24) & 0xFF) as u8;
889 let ep_id = ((trb.d2 >> 16) & 0x1F) as u8;
890 crate::hardware::usb::hid::notify_transfer_complete(slot_id, ep_id);
891 }
892 _ => {}
893 }
894
895 let new_deq = (idx + 1) % 64;
896 controller.event_ring_deq.store(new_deq, Ordering::Release);
897 if new_deq == 0 {
898 controller.event_ring_cycle.store(
899 !controller.event_ring_cycle.load(Ordering::Acquire),
900 Ordering::Release,
901 );
902 }
903
904 let new_erdp = controller.event_ring_phys + (new_deq as u64) * 16;
905 ir.erdp = new_erdp | (1 << 3);
906
907 processed += 1;
908 }
909 }
910 }
911 }
912}