strat9_kernel/hardware/virtio/
common.rs1use super::{vring_flags, VirtqDesc};
10use crate::{
11 arch::x86_64::pci::{Bar, PciDevice},
12 memory::{self, PhysFrame},
13};
14use alloc::vec::Vec;
15use core::{
16 ptr::{read_volatile, write_volatile},
17 sync::atomic::{fence, AtomicU16, Ordering},
18};
19
20pub mod features {
22 pub const VIRTIO_F_RING_INDIRECT_DESC: u64 = 1 << 28;
23 pub const VIRTIO_F_RING_EVENT_IDX: u64 = 1 << 29;
24 pub const VIRTIO_F_VERSION_1: u64 = 1 << 32;
25 pub const VIRTIO_F_ACCESS_PLATFORM: u64 = 1 << 33;
26 pub const VIRTIO_F_RING_PACKED: u64 = 1 << 34;
27 pub const VIRTIO_F_IN_ORDER: u64 = 1 << 35;
28 pub const VIRTIO_F_ORDER_PLATFORM: u64 = 1 << 36;
29 pub const VIRTIO_F_SR_IOV: u64 = 1 << 37;
30 pub const VIRTIO_F_NOTIFICATION_DATA: u64 = 1 << 38;
31}
32
33#[repr(C)]
35pub struct VirtqAvail {
36 pub flags: AtomicU16,
37 pub idx: AtomicU16,
38 }
41
42#[repr(C)]
44#[derive(Debug, Clone, Copy)]
45pub struct VirtqUsedElem {
46 pub id: u32,
48 pub len: u32,
50}
51
52#[repr(C)]
54pub struct VirtqUsed {
55 pub flags: AtomicU16,
56 pub idx: AtomicU16,
57 }
60
61pub struct Virtqueue {
66 queue_size: u16,
68
69 _ring_area: PhysFrame,
71
72 desc_area: u64,
74
75 avail_area: u64,
77
78 used_area: u64,
80
81 desc_ptr: *mut VirtqDesc,
83
84 avail_ptr: *mut VirtqAvail,
86
87 avail_ring_ptr: *mut u16,
89
90 used_ptr: *mut VirtqUsed,
92
93 used_ring_ptr: *mut VirtqUsedElem,
95
96 free_descriptors: Vec<u16>,
98
99 last_used_idx: u16,
101
102 next_avail_idx: u16,
104}
105
106unsafe impl Send for Virtqueue {}
108
109impl Virtqueue {
110 #[inline]
111 fn align_up(value: usize, align: usize) -> usize {
112 debug_assert!(align.is_power_of_two());
113 (value + align - 1) & !(align - 1)
114 }
115
116 pub unsafe fn new(queue_size: u16) -> Result<Self, &'static str> {
122 if !queue_size.is_power_of_two() {
123 return Err("Queue size must be power of 2");
124 }
125
126 let desc_size = queue_size as usize * core::mem::size_of::<VirtqDesc>();
127 let avail_size = 6 + queue_size as usize * 2;
128 let used_size = 6 + queue_size as usize * core::mem::size_of::<VirtqUsedElem>();
129 let avail_offset = desc_size;
130 let used_offset = Self::align_up(avail_offset + avail_size, 4096);
131 let total_size = used_offset + used_size;
132
133 let ring_pages = (total_size + 4095) / 4096;
135 let ring_order = ring_pages.next_power_of_two().trailing_zeros() as u8;
136 let ring_area = crate::sync::with_irqs_disabled(|token| {
137 memory::allocate_frames(token, ring_order)
138 })
139 .map_err(|_| "Failed to allocate virtqueue ring")?;
140 let ring_phys = ring_area.start_address.as_u64();
141 let desc_phys = ring_phys;
142 let avail_phys = ring_phys + avail_offset as u64;
143 let used_phys = ring_phys + used_offset as u64;
144
145 let desc_virt = crate::memory::phys_to_virt(desc_phys);
151 let avail_virt = crate::memory::phys_to_virt(avail_phys);
152 let used_virt = crate::memory::phys_to_virt(used_phys);
153
154 let desc_ptr = desc_virt as *mut VirtqDesc;
155 let avail_ptr = avail_virt as *mut VirtqAvail;
156 let avail_ring_ptr = (avail_virt + 4) as *mut u16;
157 let used_ptr = used_virt as *mut VirtqUsed;
158 let used_ring_ptr = (used_virt + 4) as *mut VirtqUsedElem;
159
160 core::ptr::write_bytes(desc_ptr as *mut u8, 0, total_size);
163
164 let mut free_descriptors = Vec::with_capacity(queue_size as usize);
166 for i in (0..queue_size).rev() {
167 free_descriptors.push(i);
168 }
169
170 Ok(Self {
171 queue_size,
172 _ring_area: ring_area,
173 desc_area: desc_phys,
174 avail_area: avail_phys,
175 used_area: used_phys,
176 desc_ptr,
177 avail_ptr,
178 avail_ring_ptr,
179 used_ptr,
180 used_ring_ptr,
181 free_descriptors,
182 last_used_idx: 0,
183 next_avail_idx: 0,
184 })
185 }
186
187 pub fn desc_area(&self) -> u64 {
189 self.desc_area
190 }
191
192 pub fn avail_area(&self) -> u64 {
194 self.avail_area
195 }
196
197 pub fn used_area(&self) -> u64 {
199 self.used_area
200 }
201
202 pub fn size(&self) -> u16 {
204 self.queue_size
205 }
206
207 pub fn alloc_descriptor(&mut self) -> Option<u16> {
211 self.free_descriptors.pop()
212 }
213
214 pub fn free_descriptor(&mut self, head: u16) {
218 let mut current = head;
219
220 loop {
221 let desc = unsafe { &*self.desc_ptr.add(current as usize) };
223 let has_next = desc.flags & vring_flags::NEXT != 0;
224 let next = desc.next;
225
226 self.free_descriptors.push(current);
227
228 if !has_next {
229 break;
230 }
231 current = next;
232 }
233 }
234
235 pub fn add_buffer(&mut self, buffers: &[(u64, u32, bool)]) -> Result<u16, &'static str> {
242 if buffers.is_empty() {
243 return Err("Empty buffer list");
244 }
245
246 if buffers.len() > self.free_descriptors.len() {
247 return Err("Not enough free descriptors");
248 }
249
250 let head = self.alloc_descriptor().ok_or("No free descriptors")?;
252 let mut current = head;
253
254 for (i, &(addr, len, write)) in buffers.iter().enumerate() {
255 let is_last = i == buffers.len() - 1;
256
257 let desc = unsafe { &mut *self.desc_ptr.add(current as usize) };
259 desc.addr = addr;
260 desc.len = len;
261 desc.flags = if write { vring_flags::WRITE } else { 0 };
262
263 if !is_last {
264 let next = self.alloc_descriptor().ok_or("No free descriptors")?;
265 desc.flags |= vring_flags::NEXT;
266 desc.next = next;
267 current = next;
268 }
269 }
270
271 let avail_idx = unsafe { (*self.avail_ptr).idx.load(Ordering::Acquire) };
274 let ring_idx = (avail_idx % self.queue_size) as usize;
275
276 unsafe {
278 write_volatile(self.avail_ring_ptr.add(ring_idx), head);
279 }
280
281 fence(Ordering::Release);
283
284 unsafe {
287 (*self.avail_ptr)
288 .idx
289 .store(avail_idx.wrapping_add(1), Ordering::Release);
290 }
291
292 self.next_avail_idx = avail_idx.wrapping_add(1);
293
294 Ok(head)
295 }
296
297 pub fn has_used(&self) -> bool {
299 let used_idx = unsafe { (*self.used_ptr).idx.load(Ordering::Acquire) };
301 self.last_used_idx != used_idx
302 }
303
304 pub fn get_used(&mut self) -> Option<(u16, u32)> {
308 let used_idx = unsafe { (*self.used_ptr).idx.load(Ordering::Acquire) };
310
311 if self.last_used_idx == used_idx {
312 return None;
313 }
314
315 let ring_idx = (self.last_used_idx % self.queue_size) as usize;
316
317 let elem = unsafe { read_volatile(self.used_ring_ptr.add(ring_idx)) };
319
320 self.last_used_idx = self.last_used_idx.wrapping_add(1);
321
322 self.free_descriptor(elem.id as u16);
329
330 Some((elem.id as u16, elem.len))
331 }
332
333 pub fn should_notify(&self) -> bool {
335 true
338 }
339}
340
341pub struct VirtioDevice {
345 pub pci_dev: PciDevice,
347
348 pub io_base: u16,
350}
351
352impl VirtioDevice {
353 pub unsafe fn new(pci_dev: PciDevice) -> Result<Self, &'static str> {
358 let bar0 = pci_dev.read_bar(0).ok_or("BAR0 not present")?;
360
361 let io_base = match bar0 {
362 Bar::Io { port } => port,
363 _ => return Err("BAR0 is not I/O space (legacy VirtIO required)"),
364 };
365
366 pci_dev.enable_io_space();
368 pci_dev.enable_bus_master();
369
370 Ok(Self { pci_dev, io_base })
371 }
372
373 pub fn read_reg_u8(&self, offset: u16) -> u8 {
375 unsafe { crate::arch::x86_64::io::inb(self.io_base + offset) }
377 }
378
379 pub fn read_reg_u16(&self, offset: u16) -> u16 {
381 unsafe { crate::arch::x86_64::io::inw(self.io_base + offset) }
383 }
384
385 pub fn read_reg_u32(&self, offset: u16) -> u32 {
387 unsafe { crate::arch::x86_64::io::inl(self.io_base + offset) }
389 }
390
391 pub fn write_reg_u8(&self, offset: u16, value: u8) {
393 unsafe { crate::arch::x86_64::io::outb(self.io_base + offset, value) }
395 }
396
397 pub fn write_reg_u16(&self, offset: u16, value: u16) {
399 unsafe { crate::arch::x86_64::io::outw(self.io_base + offset, value) }
401 }
402
403 pub fn write_reg_u32(&self, offset: u16, value: u32) {
405 unsafe { crate::arch::x86_64::io::outl(self.io_base + offset, value) }
407 }
408
409 pub fn read_device_features(&self) -> u32 {
411 self.read_reg_u32(0) }
413
414 pub fn write_guest_features(&self, features: u32) {
416 self.write_reg_u32(4, features); }
418
419 pub fn get_status(&self) -> u8 {
421 self.read_reg_u8(18) }
423
424 pub fn set_status(&self, status: u8) {
426 self.write_reg_u8(18, status); }
428
429 pub fn add_status(&self, status: u8) {
431 let current = self.get_status();
432 self.set_status(current | status);
433 }
434
435 pub fn reset(&self) {
437 self.set_status(0);
438 }
439
440 pub fn read_isr_status(&self) -> u8 {
442 self.read_reg_u8(19) }
444
445 pub fn ack_interrupt(&self) {
447 let _ = self.read_reg_u8(19); }
450
451 pub fn setup_queue(&self, queue_index: u16, queue: &Virtqueue) {
453 self.write_reg_u16(14, queue_index); self.write_reg_u16(12, queue.size()); let desc_pfn = (queue.desc_area() >> 12) as u32;
461 self.write_reg_u32(8, desc_pfn); }
463
464 pub fn notify_queue(&self, queue_index: u16) {
466 self.write_reg_u16(16, queue_index); }
468}