1use crate::{
5 arch::x86_64::pci::{self, Bar, ProbeCriteria},
6 memory::{self, allocate_dma_frame, phys_to_virt, PhysFrame},
7};
8use alloc::{sync::Arc, vec::Vec};
9use core::sync::atomic::{AtomicBool, Ordering};
10use spin::{Mutex, Once};
11
12const VIRTIO_RING_SIZE: usize = 64;
13const PAGE_SIZE: usize = 4096;
14const VIRTQ_PAYLOAD_ORDER: u8 = 6;
15const FLUSH_OPS_THRESHOLD: u32 = 64;
16
17pub struct VirtioGpu {
18 ctrl_queue: Mutex<Virtqueue>,
19 _cursor_queue: Mutex<Option<Virtqueue>>,
20 info: GpuInfo,
21 _framebuffer_pages: Vec<PhysFrame>,
22 framebuffer_segments: Vec<FramebufferSegment>,
23 framebuffer_size: usize,
24 dirty: Mutex<DirtyRect>,
25}
26
27struct VirtioDevice {
28 mmio: usize,
29 queue_notify_addr: usize,
30}
31
32struct Virtqueue {
33 desc: *mut VirtqDesc,
34 avail: *mut VirtqAvail,
35 used: *mut VirtqUsed,
36 queue_idx: u16,
37 queue_size: u16,
38 notify_addr: usize,
39 free_stack: [u16; VIRTIO_RING_SIZE],
40 free_len: usize,
41 last_used_idx: u16,
42 cmd_phys: u64,
43 cmd_virt: *mut u8,
44 payload_phys: u64,
45 payload_virt: *mut u8,
46 payload_capacity: usize,
47 resp_phys: u64,
48 resp_virt: *mut u8,
49}
50
51unsafe impl Send for Virtqueue {}
52
53#[derive(Clone, Copy)]
54struct FramebufferSegment {
55 virt: *mut u8,
56 len: usize,
57}
58
59unsafe impl Send for FramebufferSegment {}
60unsafe impl Sync for FramebufferSegment {}
61
62#[derive(Clone, Copy)]
63struct DirtyRect {
64 valid: bool,
65 x0: u32,
66 y0: u32,
67 x1: u32,
68 y1: u32,
69 pending_ops: u32,
70}
71
72impl DirtyRect {
73 const fn empty() -> Self {
75 Self {
76 valid: false,
77 x0: 0,
78 y0: 0,
79 x1: 0,
80 y1: 0,
81 pending_ops: 0,
82 }
83 }
84
85 fn include(&mut self, x: u32, y: u32, width: u32, height: u32) {
87 if width == 0 || height == 0 {
88 return;
89 }
90 let x1 = x.saturating_add(width);
91 let y1 = y.saturating_add(height);
92 if !self.valid {
93 self.valid = true;
94 self.x0 = x;
95 self.y0 = y;
96 self.x1 = x1;
97 self.y1 = y1;
98 } else {
99 self.x0 = self.x0.min(x);
100 self.y0 = self.y0.min(y);
101 self.x1 = self.x1.max(x1);
102 self.y1 = self.y1.max(y1);
103 }
104 self.pending_ops = self.pending_ops.saturating_add(1);
105 }
106}
107
108#[repr(C)]
109#[derive(Clone, Copy)]
110struct VirtqDesc {
111 addr: u64,
112 len: u32,
113 flags: u16,
114 next: u16,
115}
116
117#[repr(C)]
118struct VirtqAvail {
119 flags: u16,
120 idx: u16,
121 ring: [u16; VIRTIO_RING_SIZE],
122}
123
124#[repr(C)]
125struct VirtqUsed {
126 flags: u16,
127 idx: u16,
128 ring: [VirtqUsedElem; VIRTIO_RING_SIZE],
129}
130
131#[repr(C)]
132#[derive(Clone, Copy)]
133struct VirtqUsedElem {
134 id: u32,
135 len: u32,
136}
137
138#[derive(Clone, Copy)]
139pub struct GpuInfo {
140 pub width: u32,
141 pub height: u32,
142 pub stride: u32,
143 pub framebuffer_phys: u64,
144 pub framebuffer_virt: *mut u8,
145}
146
147unsafe impl Send for GpuInfo {}
148unsafe impl Sync for GpuInfo {}
149
150const VIRTIO_F_VERSION_1: u64 = 1 << 32;
151const VIRTIO_GPU_F_EDID: u32 = 1;
152
153const VIRTIO_STATUS_ACKNOWLEDGE: u8 = 1;
154const VIRTIO_STATUS_DRIVER: u8 = 2;
155const VIRTIO_STATUS_DRIVER_OK: u8 = 4;
156const VIRTIO_STATUS_FEATURES_OK: u8 = 8;
157
158const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x0100;
159const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: u32 = 0x0101;
160const VIRTIO_GPU_CMD_SET_SCANOUT: u32 = 0x0103;
161const VIRTIO_GPU_CMD_RESOURCE_FLUSH: u32 = 0x0104;
162const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: u32 = 0x0105;
163const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: u32 = 0x0106;
164
165const VIRTIO_GPU_RESP_OK_NODATA: u32 = 0x1100;
166const VIRTIO_GPU_RESP_OK_DISPLAY_INFO: u32 = 0x1101;
167
168const VIRTQ_DESC_F_NEXT: u16 = 1;
169const VIRTQ_DESC_F_WRITE: u16 = 2;
170
171const VIRTIO_GPU_FORMAT_X8R8G8B8: u32 = 1;
172
173#[repr(C)]
174#[derive(Clone, Copy, Default)]
175struct GpuRect {
176 x: u32,
177 y: u32,
178 width: u32,
179 height: u32,
180}
181
182#[repr(C)]
183#[derive(Clone, Copy)]
184struct CtrlHeader {
185 cmd_and_flags: u32,
186 fence_id: u64,
187 ctx_id: u32,
188 _padding: u32,
189}
190
191#[repr(C)]
192#[derive(Clone, Copy)]
193struct CmdGetDisplayInfo {
194 hdr: CtrlHeader,
195 scanout_id: u32,
196 _padding: [u32; 3],
197}
198
199#[repr(C)]
200#[derive(Clone, Copy)]
201struct RespDisplayInfo {
202 hdr: CtrlHeader,
203 rect: GpuRect,
204 enabled: u32,
205 _padding: [u32; 3],
206}
207
208#[repr(C)]
209#[derive(Clone, Copy)]
210struct CmdResourceCreate2d {
211 hdr: CtrlHeader,
212 resource_id: u32,
213 format: u32,
214 width: u32,
215 height: u32,
216}
217
218#[repr(C)]
219#[derive(Clone, Copy)]
220struct CmdResourceAttachBacking {
221 hdr: CtrlHeader,
222 resource_id: u32,
223 nr_entries: u32,
224}
225
226#[repr(C)]
227#[derive(Clone, Copy)]
228struct MemEntry {
229 addr: u64,
230 length: u32,
231 _padding: u32,
232}
233
234#[repr(C)]
235#[derive(Clone, Copy)]
236struct CmdSetScanout {
237 hdr: CtrlHeader,
238 rect: GpuRect,
239 scanout_id: u32,
240 resource_id: u32,
241}
242
243#[repr(C)]
244#[derive(Clone, Copy)]
245struct CmdResourceFlush {
246 hdr: CtrlHeader,
247 rect: GpuRect,
248 resource_id: u32,
249 _padding: u32,
250}
251
252#[repr(C)]
253#[derive(Clone, Copy)]
254struct CmdTransferToHost2d {
255 hdr: CtrlHeader,
256 rect: GpuRect,
257 offset: u64,
258 resource_id: u32,
259 _padding: u32,
260}
261
262impl VirtioGpu {
263 pub unsafe fn new(pci_dev: pci::PciDevice) -> Result<Self, &'static str> {
265 let bar = match pci_dev.read_bar(0) {
266 Some(Bar::Memory64 { addr, .. }) => addr,
267 _ => return Err("Invalid BAR"),
268 };
269
270 let mmio = phys_to_virt(bar) as usize;
271 let notify_mult = unsafe { ((mmio + 0x20) as *const u16).read_volatile() as usize };
272 let queue_notify_addr = mmio + 0x50 + notify_mult * 4;
273 let mut device = VirtioDevice {
274 mmio,
275 queue_notify_addr,
276 };
277
278 device.reset();
279 device.add_status(VIRTIO_STATUS_ACKNOWLEDGE);
280 device.add_status(VIRTIO_STATUS_DRIVER);
281
282 let features = device.read_features();
283 let mut guest_features = VIRTIO_F_VERSION_1;
284 if (features & (1 << VIRTIO_GPU_F_EDID)) != 0 {
285 guest_features |= 1 << VIRTIO_GPU_F_EDID;
286 }
287 device.write_features(guest_features);
288 device.add_status(VIRTIO_STATUS_FEATURES_OK);
289
290 if (device.read_status() & VIRTIO_STATUS_FEATURES_OK) == 0 {
291 return Err("Features negotiation failed");
292 }
293
294 let ctrl_queue = Virtqueue::new(&mut device, 0)?;
295
296 device.add_status(VIRTIO_STATUS_DRIVER_OK);
297
298 let mut gpu = Self {
299 ctrl_queue: Mutex::new(ctrl_queue),
300 _cursor_queue: Mutex::new(None),
301 info: GpuInfo {
302 width: 1024,
303 height: 768,
304 stride: 1024 * 4,
305 framebuffer_phys: 0,
306 framebuffer_virt: core::ptr::null_mut(),
307 },
308 _framebuffer_pages: Vec::new(),
309 framebuffer_segments: Vec::new(),
310 framebuffer_size: 0,
311 dirty: Mutex::new(DirtyRect::empty()),
312 };
313
314 gpu.init_display()?;
315 Ok(gpu)
316 }
317
318 fn init_display(&mut self) -> Result<(), &'static str> {
320 self.get_display_info()?;
321
322 let framebuffer_size = self.info.stride as usize * self.info.height as usize;
323 if framebuffer_size == 0 {
324 return Err("Display reports zero-sized framebuffer");
325 }
326 let page_count = (framebuffer_size + PAGE_SIZE - 1) / PAGE_SIZE;
327 let mut pages = Vec::with_capacity(page_count);
328 let mut entries = Vec::with_capacity(page_count);
329 let mut segments = Vec::with_capacity(page_count);
330
331 for _ in 0..page_count {
332 let frame = allocate_dma_frame().ok_or("Failed to allocate framebuffer page")?;
333 let phys = frame.start_address.as_u64();
334 pages.push(frame);
335 entries.push(MemEntry {
336 addr: phys,
337 length: PAGE_SIZE as u32,
338 _padding: 0,
339 });
340 segments.push(FramebufferSegment {
341 virt: phys_to_virt(phys) as *mut u8,
342 len: PAGE_SIZE,
343 });
344 }
345
346 if let Some(last) = entries.last_mut() {
347 let rem = framebuffer_size % PAGE_SIZE;
348 if rem != 0 {
349 last.length = rem as u32;
350 }
351 }
352 if let Some(last) = segments.last_mut() {
353 let rem = framebuffer_size % PAGE_SIZE;
354 if rem != 0 {
355 last.len = rem;
356 }
357 }
358
359 self.info.framebuffer_phys = entries.first().map(|e| e.addr).unwrap_or(0);
360 self.info.framebuffer_virt = segments
361 .first()
362 .map(|s| s.virt)
363 .unwrap_or(core::ptr::null_mut());
364 self.framebuffer_size = framebuffer_size;
365 self._framebuffer_pages = pages;
366 self.framebuffer_segments = segments;
367
368 let resource_id = 1;
369 self.resource_create_2d(resource_id, self.info.width, self.info.height)?;
370 self.resource_attach_backing(resource_id, &entries)?;
371 self.set_scanout(0, resource_id)?;
372 self.transfer_to_host_2d(resource_id, 0, 0, self.info.width, self.info.height)?;
373 self.resource_flush(resource_id, 0, 0, self.info.width, self.info.height)?;
374
375 log::info!(
376 "VirtIO GPU: {}x{} @ {} bpp, framebuffer {} pages",
377 self.info.width,
378 self.info.height,
379 32,
380 page_count
381 );
382
383 Ok(())
384 }
385
386 fn get_display_info(&mut self) -> Result<(), &'static str> {
388 let cmd = CmdGetDisplayInfo {
389 hdr: CtrlHeader {
390 cmd_and_flags: VIRTIO_GPU_CMD_GET_DISPLAY_INFO,
391 fence_id: 0,
392 ctx_id: 0,
393 _padding: 0,
394 },
395 scanout_id: 0,
396 _padding: [0; 3],
397 };
398
399 let resp: RespDisplayInfo = self.send_command(&cmd)?;
400 if resp.hdr.cmd_and_flags != VIRTIO_GPU_RESP_OK_DISPLAY_INFO {
401 return Err("GET_DISPLAY_INFO failed");
402 }
403
404 if resp.enabled != 0 {
405 self.info.width = resp.rect.width;
406 self.info.height = resp.rect.height;
407 self.info.stride = resp.rect.width * 4;
408 }
409
410 Ok(())
411 }
412
413 fn resource_create_2d(
415 &self,
416 resource_id: u32,
417 width: u32,
418 height: u32,
419 ) -> Result<(), &'static str> {
420 let cmd = CmdResourceCreate2d {
421 hdr: CtrlHeader {
422 cmd_and_flags: VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
423 fence_id: 0,
424 ctx_id: 0,
425 _padding: 0,
426 },
427 resource_id,
428 format: VIRTIO_GPU_FORMAT_X8R8G8B8,
429 width,
430 height,
431 };
432
433 let resp: CtrlHeader = self.send_command(&cmd)?;
434 if resp.cmd_and_flags != VIRTIO_GPU_RESP_OK_NODATA {
435 return Err("RESOURCE_CREATE_2D failed");
436 }
437 Ok(())
438 }
439
440 fn resource_attach_backing(
442 &self,
443 resource_id: u32,
444 entries: &[MemEntry],
445 ) -> Result<(), &'static str> {
446 if entries.is_empty() {
447 return Err("No backing entries");
448 }
449 let cmd = CmdResourceAttachBacking {
450 hdr: CtrlHeader {
451 cmd_and_flags: VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
452 fence_id: 0,
453 ctx_id: 0,
454 _padding: 0,
455 },
456 resource_id,
457 nr_entries: entries.len() as u32,
458 };
459
460 let payload = unsafe {
461 core::slice::from_raw_parts(
462 entries.as_ptr() as *const u8,
463 entries.len() * core::mem::size_of::<MemEntry>(),
464 )
465 };
466 let resp: CtrlHeader = self.send_command_with_payload(&cmd, Some(payload))?;
467 if resp.cmd_and_flags != VIRTIO_GPU_RESP_OK_NODATA {
468 return Err("RESOURCE_ATTACH_BACKING failed");
469 }
470 Ok(())
471 }
472
473 fn set_scanout(&self, scanout_id: u32, resource_id: u32) -> Result<(), &'static str> {
475 let cmd = CmdSetScanout {
476 hdr: CtrlHeader {
477 cmd_and_flags: VIRTIO_GPU_CMD_SET_SCANOUT,
478 fence_id: 0,
479 ctx_id: 0,
480 _padding: 0,
481 },
482 rect: GpuRect {
483 x: 0,
484 y: 0,
485 width: self.info.width,
486 height: self.info.height,
487 },
488 scanout_id,
489 resource_id,
490 };
491
492 let resp: CtrlHeader = self.send_command(&cmd)?;
493 if resp.cmd_and_flags != VIRTIO_GPU_RESP_OK_NODATA {
494 return Err("SET_SCANOUT failed");
495 }
496 Ok(())
497 }
498
499 fn transfer_to_host_2d(
501 &self,
502 resource_id: u32,
503 x: u32,
504 y: u32,
505 width: u32,
506 height: u32,
507 ) -> Result<(), &'static str> {
508 let cmd = CmdTransferToHost2d {
509 hdr: CtrlHeader {
510 cmd_and_flags: VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
511 fence_id: 0,
512 ctx_id: 0,
513 _padding: 0,
514 },
515 rect: GpuRect {
516 x,
517 y,
518 width,
519 height,
520 },
521 offset: 0,
522 resource_id,
523 _padding: 0,
524 };
525
526 let resp: CtrlHeader = self.send_command(&cmd)?;
527 if resp.cmd_and_flags != VIRTIO_GPU_RESP_OK_NODATA {
528 return Err("TRANSFER_TO_HOST_2D failed");
529 }
530 Ok(())
531 }
532
533 fn resource_flush(
535 &self,
536 resource_id: u32,
537 x: u32,
538 y: u32,
539 width: u32,
540 height: u32,
541 ) -> Result<(), &'static str> {
542 let cmd = CmdResourceFlush {
543 hdr: CtrlHeader {
544 cmd_and_flags: VIRTIO_GPU_CMD_RESOURCE_FLUSH,
545 fence_id: 0,
546 ctx_id: 0,
547 _padding: 0,
548 },
549 rect: GpuRect {
550 x,
551 y,
552 width,
553 height,
554 },
555 resource_id,
556 _padding: 0,
557 };
558
559 let resp: CtrlHeader = self.send_command(&cmd)?;
560 if resp.cmd_and_flags != VIRTIO_GPU_RESP_OK_NODATA {
561 return Err("RESOURCE_FLUSH failed");
562 }
563 Ok(())
564 }
565
566 fn send_command<T: Copy, R: Copy>(&self, cmd: &T) -> Result<R, &'static str> {
568 self.send_command_with_payload::<T, R>(cmd, None)
569 }
570
571 fn send_command_with_payload<T: Copy, R: Copy>(
573 &self,
574 cmd: &T,
575 payload: Option<&[u8]>,
576 ) -> Result<R, &'static str> {
577 let cmd_size = core::mem::size_of::<T>();
578 let resp_size = core::mem::size_of::<R>();
579 if cmd_size > PAGE_SIZE || resp_size > PAGE_SIZE {
580 return Err("Command or response too large");
581 }
582
583 let payload_len = payload.map_or(0, |p| p.len());
584 let mut ctrl_queue = self.ctrl_queue.lock();
585 if payload_len > ctrl_queue.payload_capacity {
586 return Err("Payload too large");
587 }
588
589 let needed_desc = if payload_len > 0 { 3 } else { 2 };
590 if ctrl_queue.free_len < needed_desc {
591 return Err("Not enough free descriptors");
592 }
593
594 let head_idx = ctrl_queue.pop_desc().ok_or("Missing descriptor")?;
595 let middle_idx = if payload_len > 0 {
596 Some(ctrl_queue.pop_desc().ok_or("Missing payload descriptor")?)
597 } else {
598 None
599 };
600 let resp_idx = ctrl_queue.pop_desc().ok_or("Missing response descriptor")?;
601
602 unsafe {
603 core::ptr::copy_nonoverlapping(
604 cmd as *const _ as *const u8,
605 ctrl_queue.cmd_virt,
606 cmd_size,
607 );
608 if let Some(data) = payload {
609 core::ptr::copy_nonoverlapping(data.as_ptr(), ctrl_queue.payload_virt, data.len());
610 }
611
612 let head_desc = &mut *ctrl_queue.desc.add(head_idx as usize);
613 head_desc.addr = ctrl_queue.cmd_phys;
614 head_desc.len = cmd_size as u32;
615 head_desc.flags = VIRTQ_DESC_F_NEXT;
616 head_desc.next = middle_idx.unwrap_or(resp_idx);
617
618 if let Some(mid) = middle_idx {
619 let data_desc = &mut *ctrl_queue.desc.add(mid as usize);
620 data_desc.addr = ctrl_queue.payload_phys;
621 data_desc.len = payload_len as u32;
622 data_desc.flags = VIRTQ_DESC_F_NEXT;
623 data_desc.next = resp_idx;
624 }
625
626 let resp_desc = &mut *ctrl_queue.desc.add(resp_idx as usize);
627 resp_desc.addr = ctrl_queue.resp_phys;
628 resp_desc.len = resp_size as u32;
629 resp_desc.flags = VIRTQ_DESC_F_WRITE;
630 resp_desc.next = 0;
631
632 let avail = &mut *ctrl_queue.avail;
633 let ring_idx = (avail.idx as usize) % (ctrl_queue.queue_size as usize);
634 avail.ring[ring_idx] = head_idx;
635 avail.idx = avail.idx.wrapping_add(1);
636 }
637
638 unsafe {
639 (ctrl_queue.notify_addr as *mut u32).write_volatile(ctrl_queue.queue_idx as u32);
640 }
641
642 let mut spins: u32 = 0;
643 loop {
644 unsafe {
645 let used = &*ctrl_queue.used;
646 if ctrl_queue.last_used_idx != used.idx {
647 let idx =
648 (ctrl_queue.last_used_idx as usize) % (ctrl_queue.queue_size as usize);
649 let elem = used.ring[idx];
650 ctrl_queue.last_used_idx = ctrl_queue.last_used_idx.wrapping_add(1);
651 if elem.id as u16 == head_idx {
652 break;
653 }
654 }
655 }
656
657 spins = spins.wrapping_add(1);
658 if (spins & 0x3ff) == 0 {
659 crate::process::yield_task();
660 } else {
661 core::hint::spin_loop();
662 }
663 }
664
665 let response = unsafe { core::ptr::read_unaligned(ctrl_queue.resp_virt as *const R) };
666
667 ctrl_queue.push_desc(head_idx);
668 if let Some(mid) = middle_idx {
669 ctrl_queue.push_desc(mid);
670 }
671 ctrl_queue.push_desc(resp_idx);
672
673 Ok(response)
674 }
675
676 fn copy_to_backing(
678 &self,
679 mut src: *const u8,
680 mut dst_offset: usize,
681 mut len: usize,
682 ) -> Result<(), &'static str> {
683 let end = dst_offset.checked_add(len).ok_or("Copy overflow")?;
684 if end > self.framebuffer_size {
685 return Err("Copy out of bounds");
686 }
687 while len > 0 {
688 let seg_idx = dst_offset / PAGE_SIZE;
689 let seg_off = dst_offset % PAGE_SIZE;
690 let seg = self
691 .framebuffer_segments
692 .get(seg_idx)
693 .ok_or("Segment out of bounds")?;
694 if seg_off >= seg.len {
695 return Err("Segment offset out of bounds");
696 }
697 let chunk = core::cmp::min(len, seg.len - seg_off);
698 unsafe {
699 core::ptr::copy_nonoverlapping(src, seg.virt.add(seg_off), chunk);
700 }
701 unsafe {
702 src = src.add(chunk);
703 }
704 dst_offset += chunk;
705 len -= chunk;
706 }
707 Ok(())
708 }
709
710 pub fn present_from_linear(
712 &self,
713 src: *const u8,
714 src_stride: u32,
715 x: u32,
716 y: u32,
717 width: u32,
718 height: u32,
719 ) -> Result<(), &'static str> {
720 if src.is_null() {
721 return Err("Invalid source pointer");
722 }
723 if width == 0 || height == 0 {
724 return Ok(());
725 }
726 if x >= self.info.width || y >= self.info.height {
727 return Ok(());
728 }
729
730 let width = width.min(self.info.width - x);
731 let height = height.min(self.info.height - y);
732 let src_stride = src_stride as usize;
733 let dst_stride = self.info.stride as usize;
734 let row_bytes = (width as usize).checked_mul(4).ok_or("Row overflow")?;
735
736 for row in 0..height as usize {
737 let src_off = (y as usize + row)
738 .checked_mul(src_stride)
739 .and_then(|o| o.checked_add(x as usize * 4))
740 .ok_or("Source offset overflow")?;
741 let dst_off = (y as usize + row)
742 .checked_mul(dst_stride)
743 .and_then(|o| o.checked_add(x as usize * 4))
744 .ok_or("Destination offset overflow")?;
745 let src_row = unsafe { src.add(src_off) };
746 self.copy_to_backing(src_row, dst_off, row_bytes)?;
747 }
748
749 self.transfer_to_host_2d(1, x, y, width, height)?;
750 self.resource_flush(1, x, y, width, height)?;
751 Ok(())
752 }
753
754 pub fn info(&self) -> GpuInfo {
756 self.info
757 }
758
759 pub fn flush(&self, x: u32, y: u32, width: u32, height: u32) {
761 if width == 0 || height == 0 {
762 return;
763 }
764 let mut dirty = self.dirty.lock();
765 dirty.include(x, y, width, height);
766 if dirty.pending_ops < FLUSH_OPS_THRESHOLD {
767 return;
768 }
769 let x0 = dirty.x0;
770 let y0 = dirty.y0;
771 let w = dirty.x1.saturating_sub(dirty.x0);
772 let h = dirty.y1.saturating_sub(dirty.y0);
773 *dirty = DirtyRect::empty();
774 drop(dirty);
775 let _ = self.transfer_to_host_2d(1, x0, y0, w, h);
776 let _ = self.resource_flush(1, x0, y0, w, h);
777 }
778
779 pub fn flush_now(&self) {
781 let (x0, y0, w, h) = {
782 let mut dirty = self.dirty.lock();
783 if !dirty.valid {
784 return;
785 }
786 let x0 = dirty.x0;
787 let y0 = dirty.y0;
788 let w = dirty.x1.saturating_sub(dirty.x0);
789 let h = dirty.y1.saturating_sub(dirty.y0);
790 *dirty = DirtyRect::empty();
791 (x0, y0, w, h)
792 };
793 let _ = self.transfer_to_host_2d(1, x0, y0, w, h);
794 let _ = self.resource_flush(1, x0, y0, w, h);
795 }
796}
797
798impl VirtioDevice {
799 fn reset(&mut self) {
801 unsafe {
802 (self.mmio as *mut u32).write_volatile(0);
803 }
804 core::hint::spin_loop();
805 }
806
807 fn add_status(&mut self, status: u8) {
809 unsafe {
810 let current = ((self.mmio + 0x14) as *const u8).read_volatile();
811 ((self.mmio + 0x14) as *mut u8).write_volatile(current | status);
812 }
813 }
814
815 fn read_status(&self) -> u8 {
817 unsafe { ((self.mmio + 0x14) as *const u8).read_volatile() }
818 }
819
820 fn read_features(&self) -> u64 {
822 unsafe {
823 let lo = (self.mmio as *const u32).read_volatile() as u64;
824 let hi = ((self.mmio + 4) as *const u32).read_volatile() as u64;
825 (hi << 32) | lo
826 }
827 }
828
829 fn write_features(&mut self, features: u64) {
831 unsafe {
832 (self.mmio as *mut u32).write_volatile((features & 0xFFFF_FFFF) as u32);
833 ((self.mmio + 4) as *mut u32).write_volatile(((features >> 32) & 0xFFFF_FFFF) as u32);
834 }
835 }
836}
837
838impl Virtqueue {
839 fn new(device: &mut VirtioDevice, queue_idx: u16) -> Result<Self, &'static str> {
841 unsafe {
842 ((device.mmio + 0x16) as *mut u16).write_volatile(queue_idx);
843 let max_size = ((device.mmio + 0x18) as *const u16).read_volatile() as usize;
844 if max_size == 0 {
845 return Err("Queue size is zero");
846 }
847
848 let queue_size = core::cmp::min(max_size, VIRTIO_RING_SIZE) as u16;
849 ((device.mmio + 0x16) as *mut u16).write_volatile(queue_size);
850
851 let desc_frame = allocate_dma_frame().ok_or("Failed to allocate desc")?;
852 let avail_frame = allocate_dma_frame().ok_or("Failed to allocate avail")?;
853 let used_frame = allocate_dma_frame().ok_or("Failed to allocate used")?;
854 let cmd_frame = allocate_dma_frame().ok_or("Failed to allocate command buffer")?;
855 let resp_frame = allocate_dma_frame().ok_or("Failed to allocate response buffer")?;
856
857 let payload_frame = crate::sync::with_irqs_disabled(|token| {
858 memory::allocate_frames(token, VIRTQ_PAYLOAD_ORDER)
859 })
860 .map_err(|_| "Failed to allocate payload buffer")?;
861
862 let desc_phys = desc_frame.start_address.as_u64();
863 let avail_phys = avail_frame.start_address.as_u64();
864 let used_phys = used_frame.start_address.as_u64();
865 let cmd_phys = cmd_frame.start_address.as_u64();
866 let payload_phys = payload_frame.start_address.as_u64();
867 let resp_phys = resp_frame.start_address.as_u64();
868
869 let desc_virt = phys_to_virt(desc_phys) as *mut VirtqDesc;
870 let avail_virt = phys_to_virt(avail_phys) as *mut VirtqAvail;
871 let used_virt = phys_to_virt(used_phys) as *mut VirtqUsed;
872 let cmd_virt = phys_to_virt(cmd_phys) as *mut u8;
873 let payload_virt = phys_to_virt(payload_phys) as *mut u8;
874 let resp_virt = phys_to_virt(resp_phys) as *mut u8;
875
876 core::ptr::write_bytes(
877 desc_virt as *mut u8,
878 0,
879 core::mem::size_of::<VirtqDesc>() * VIRTIO_RING_SIZE,
880 );
881 core::ptr::write_bytes(avail_virt as *mut u8, 0, core::mem::size_of::<VirtqAvail>());
882 core::ptr::write_bytes(used_virt as *mut u8, 0, core::mem::size_of::<VirtqUsed>());
883 core::ptr::write_bytes(payload_virt, 0, PAGE_SIZE << (VIRTQ_PAYLOAD_ORDER as usize));
884
885 ((device.mmio + 0x10) as *mut u32).write_volatile((desc_phys & 0xFFFF_FFFF) as u32);
886 ((device.mmio + 0x1A) as *mut u16).write_volatile(0xFFFF);
887
888 let mut free_stack = [0u16; VIRTIO_RING_SIZE];
889 for i in 0..(queue_size as usize) {
890 free_stack[i] = i as u16;
891 }
892
893 Ok(Self {
894 desc: desc_virt,
895 avail: avail_virt,
896 used: used_virt,
897 queue_idx,
898 queue_size,
899 notify_addr: device.queue_notify_addr,
900 free_stack,
901 free_len: queue_size as usize,
902 last_used_idx: 0,
903 cmd_phys,
904 cmd_virt,
905 payload_phys,
906 payload_virt,
907 payload_capacity: PAGE_SIZE << (VIRTQ_PAYLOAD_ORDER as usize),
908 resp_phys,
909 resp_virt,
910 })
911 }
912 }
913
914 fn pop_desc(&mut self) -> Option<u16> {
916 if self.free_len == 0 {
917 None
918 } else {
919 self.free_len -= 1;
920 Some(self.free_stack[self.free_len])
921 }
922 }
923
924 fn push_desc(&mut self, idx: u16) {
926 if self.free_len < self.free_stack.len() {
927 self.free_stack[self.free_len] = idx;
928 self.free_len += 1;
929 }
930 }
931}
932
933static GPU_INSTANCE: Once<Arc<VirtioGpu>> = Once::new();
934static GPU_INITIALIZED: AtomicBool = AtomicBool::new(false);
935
936pub fn init() {
938 log::info!("[VirtIO-GPU] Scanning for VirtIO GPU devices...");
939
940 let candidates = pci::probe_all(ProbeCriteria {
941 vendor_id: Some(pci::vendor::VIRTIO),
942 device_id: Some(pci::device::VIRTIO_GPU),
943 class_code: None,
944 subclass: None,
945 prog_if: None,
946 });
947
948 for pci_dev in candidates.into_iter() {
949 log::info!(
950 "VirtIO-GPU: Found device at {:?} (VEN:{:04x} DEV:{:04x})",
951 pci_dev.address,
952 pci_dev.vendor_id,
953 pci_dev.device_id
954 );
955
956 pci_dev.enable_bus_master();
957
958 match unsafe { VirtioGpu::new(pci_dev) } {
959 Ok(gpu) => {
960 let arc = Arc::new(gpu);
961 GPU_INSTANCE.call_once(|| arc.clone());
962 GPU_INITIALIZED.store(true, Ordering::SeqCst);
963
964 let info = arc.info();
965 log::info!(
966 "[VirtIO-GPU] Initialized: {}x{} @ 32bpp",
967 info.width,
968 info.height
969 );
970 return;
971 }
972 Err(e) => {
973 log::warn!("VirtIO-GPU: Failed to initialize device: {}", e);
974 }
975 }
976 }
977
978 log::info!("[VirtIO-GPU] No device found");
979}
980
981pub fn get_gpu() -> Option<Arc<VirtioGpu>> {
983 GPU_INSTANCE.get().cloned()
984}
985
986pub fn is_available() -> bool {
988 GPU_INITIALIZED.load(Ordering::Relaxed)
989}
990
991pub fn get_framebuffer_info() -> Option<GpuInfo> {
993 GPU_INSTANCE.get().map(|gpu| gpu.info())
994}