strat9_kernel/hardware/storage/
virtio_block.rs1use crate::{
9 arch::x86_64::pci::{self, PciDevice},
10 hardware::virtio::{
11 common::{VirtioDevice, Virtqueue},
12 status,
13 },
14 memory,
15 sync::SpinLock,
16};
17use alloc::{boxed::Box, vec::Vec};
18use core::{mem, ptr};
19
20pub const SECTOR_SIZE: usize = 512;
22
23pub mod features {
25 pub const VIRTIO_BLK_F_SIZE_MAX: u32 = 1 << 1;
26 pub const VIRTIO_BLK_F_SEG_MAX: u32 = 1 << 2;
27 pub const VIRTIO_BLK_F_GEOMETRY: u32 = 1 << 4;
28 pub const VIRTIO_BLK_F_RO: u32 = 1 << 5;
29 pub const VIRTIO_BLK_F_BLK_SIZE: u32 = 1 << 6;
30 pub const VIRTIO_BLK_F_FLUSH: u32 = 1 << 9;
31 pub const VIRTIO_BLK_F_TOPOLOGY: u32 = 1 << 10;
32 pub const VIRTIO_BLK_F_CONFIG_WCE: u32 = 1 << 11;
33 pub const VIRTIO_BLK_F_DISCARD: u32 = 1 << 13;
34 pub const VIRTIO_BLK_F_WRITE_ZEROES: u32 = 1 << 14;
35}
36
37#[allow(dead_code)]
39#[repr(u32)]
40pub enum RequestType {
41 In = 0,
43 Out = 1,
45 Flush = 4,
47 GetId = 8,
49 Discard = 11,
51 WriteZeroes = 13,
53}
54
55#[repr(C)]
57#[derive(Debug, Clone, Copy)]
58pub struct BlockRequestHeader {
59 pub request_type: u32,
60 pub reserved: u32,
61 pub sector: u64,
62}
63
64#[repr(u8)]
66#[derive(Debug, Clone, Copy, PartialEq, Eq)]
67pub enum BlockStatus {
68 Ok = 0,
69 IoError = 1,
70 Unsupported = 2,
71}
72
73#[repr(C)]
75#[allow(dead_code)]
76struct BlockConfig {
77 capacity: u64,
78 size_max: u32,
79 seg_max: u32,
80 geometry_cylinders: u16,
81 geometry_heads: u8,
82 geometry_sectors: u8,
83 blk_size: u32,
84 }
86
87pub trait BlockDevice {
89 fn read_sector(&self, sector: u64, buf: &mut [u8]) -> Result<(), BlockError>;
91 fn write_sector(&self, sector: u64, buf: &[u8]) -> Result<(), BlockError>;
93 fn sector_count(&self) -> u64;
95}
96
97#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
98pub enum BlockError {
99 #[error("device I/O error")]
100 IoError,
101 #[error("invalid sector number")]
102 InvalidSector,
103 #[error("buffer too small")]
104 BufferTooSmall,
105 #[error("device not ready")]
106 NotReady,
107}
108
109pub struct VirtioBlockDevice {
111 device: VirtioDevice,
112 queue: SpinLock<Virtqueue>,
113 capacity: u64,
114}
115
116unsafe impl Send for VirtioBlockDevice {}
118unsafe impl Sync for VirtioBlockDevice {}
119
120impl VirtioBlockDevice {
121 pub unsafe fn new(pci_dev: PciDevice) -> Result<Self, &'static str> {
126 log::info!("VirtIO-blk: Initializing device at {:?}", pci_dev.address);
127
128 let device = VirtioDevice::new(pci_dev)?;
130
131 device.reset();
133
134 device.add_status(status::ACKNOWLEDGE as u8);
136
137 device.add_status(status::DRIVER as u8);
139
140 let device_features = device.read_device_features();
142 log::debug!("VirtIO-blk: Device features: 0x{:08x}", device_features);
143
144 let guest_features = 0;
146 device.write_guest_features(guest_features);
147
148 device.add_status(status::FEATURES_OK as u8);
150
151 if device.get_status() & (status::FEATURES_OK as u8) == 0 {
153 return Err("Device doesn't support our feature set");
154 }
155
156 let queue_size = device.queue_max_size(0);
159 if queue_size == 0 {
160 return Err("VirtIO-blk queue 0 is unavailable");
161 }
162 log::info!("VirtIO-blk: queue 0 size = {}", queue_size);
163
164 let queue = Virtqueue::new(queue_size)?;
166
167 device.setup_queue(0, &queue);
169
170 device.add_status(status::DRIVER_OK as u8);
172
173 let capacity_low = device.read_reg_u32(20);
179 let capacity_high = device.read_reg_u32(24);
180 let capacity = ((capacity_high as u64) << 32) | (capacity_low as u64);
181
182 log::info!(
183 "VirtIO-blk: Capacity: {} sectors ({} MB)",
184 capacity,
185 (capacity * SECTOR_SIZE as u64) / (1024 * 1024)
186 );
187
188 log::info!("VirtIO-blk: Device initialized successfully");
189
190 Ok(Self {
191 device,
192 queue: SpinLock::new(queue),
193 capacity,
194 })
195 }
196
197 fn do_request(
199 &self,
200 request_type: RequestType,
201 sector: u64,
202 mut data_buf: Option<(&mut [u8], bool)>, ) -> Result<(), BlockError> {
204 let metadata_frame = crate::sync::with_irqs_disabled(|token| memory::allocate_frame(token))
210 .map_err(|_| BlockError::NotReady)?;
211
212 let metadata_phys = metadata_frame.start_address.as_u64();
213 let metadata_virt = crate::memory::phys_to_virt(metadata_phys);
214 let status_offset = mem::size_of::<BlockRequestHeader>() as u64;
215
216 let header_ptr = metadata_virt as *mut BlockRequestHeader;
218 let status_ptr = (metadata_virt + status_offset) as *mut u8;
219
220 unsafe {
222 ptr::write(
223 header_ptr,
224 BlockRequestHeader {
225 request_type: request_type as u32,
226 reserved: 0,
227 sector,
228 },
229 );
230 ptr::write(status_ptr, 0xFF); }
232
233 let mut buffers = Vec::with_capacity(3);
235
236 buffers.push((
238 metadata_phys,
239 mem::size_of::<BlockRequestHeader>() as u32,
240 false,
241 ));
242
243 let mut data_frame_info = None;
246
247 if let Some((buf, is_write)) = data_buf.as_mut() {
248 let buf_size = buf.len();
253 let buf_pages = (buf_size + 4095) / 4096;
254 let buf_order = buf_pages.next_power_of_two().trailing_zeros() as u8;
255
256 let buf_frame = crate::sync::with_irqs_disabled(|token| {
257 memory::allocate_phys_contiguous(token, buf_order)
258 })
259 .map_err(|_| {
260 crate::sync::with_irqs_disabled(|token| {
261 memory::free_frame(token, metadata_frame);
262 });
263 BlockError::NotReady
264 })?;
265
266 let buf_phys = buf_frame.start_address.as_u64();
267 let buf_virt = crate::memory::phys_to_virt(buf_phys);
268 data_frame_info = Some((buf_frame, buf_order));
269
270 if *is_write {
272 unsafe {
273 ptr::copy_nonoverlapping(buf.as_ptr(), buf_virt as *mut u8, buf_size);
274 }
275 }
276
277 let device_writable = !*is_write;
281
282 buffers.push((buf_phys, buf_size as u32, device_writable));
284 }
285
286 buffers.push((metadata_phys + status_offset, 1, true));
288
289 let mut queue = self.queue.lock();
291 let token = match queue.add_buffer(&buffers) {
292 Ok(t) => t,
293 Err(_) => {
294 drop(queue);
295 crate::sync::with_irqs_disabled(|token| {
297 memory::free_frame(token, metadata_frame);
298 if let Some((f, o)) = data_frame_info {
299 memory::free_phys_contiguous(token, f, o);
300 }
301 });
302 return Err(BlockError::IoError);
303 }
304 };
305
306 if queue.should_notify() {
308 self.device.notify_queue(0);
309 }
310 drop(queue);
311
312 let mut spins = 0u32;
319 loop {
320 let mut queue = self.queue.lock();
321 if queue.has_used() {
322 if let Some((used_token, _len)) = queue.get_used() {
328 if used_token == token {
329 break;
330 } else {
331 log::warn!("VirtIO-blk: Received unexpected token {}", used_token);
334 }
335 }
336 }
337 let (used_idx, last_used_idx) = queue.used_indices();
338 drop(queue);
339 spins = spins.saturating_add(1);
340 if spins == 5_000_000 {
341 let isr = self.device.read_isr_status();
342 log::error!(
343 "VirtIO-blk: request timeout sector={} token={} used_idx={} last_used_idx={} isr={}",
344 sector,
345 token,
346 used_idx,
347 last_used_idx,
348 isr
349 );
350 crate::serial_println!(
351 "[virtio-blk] timeout sector={} token={} used_idx={} last_used_idx={} isr={}",
352 sector,
353 token,
354 used_idx,
355 last_used_idx,
356 isr
357 );
358 crate::sync::with_irqs_disabled(|token| {
359 memory::free_frame(token, metadata_frame);
360 if let Some((f, o)) = data_frame_info {
361 memory::free_phys_contiguous(token, f, o);
362 }
363 });
364 return Err(BlockError::IoError);
365 }
366 core::hint::spin_loop();
367 }
368
369 let status_byte = unsafe { ptr::read(status_ptr) };
371
372 if let Some((buf, is_write)) = data_buf {
374 if let Some((buf_frame, buf_order)) = data_frame_info {
375 let buf_virt = crate::memory::phys_to_virt(buf_frame.start_address.as_u64());
376
377 if !is_write && status_byte == BlockStatus::Ok as u8 {
379 unsafe {
380 ptr::copy_nonoverlapping(
381 buf_virt as *const u8,
382 buf.as_mut_ptr(),
383 buf.len(),
384 );
385 }
386 }
387
388 crate::sync::with_irqs_disabled(|token| {
390 memory::free_phys_contiguous(token, buf_frame, buf_order);
391 });
392 }
393 }
394
395 crate::sync::with_irqs_disabled(|token| {
397 memory::free_frame(token, metadata_frame);
398 });
399
400 if status_byte == BlockStatus::Ok as u8 {
401 Ok(())
402 } else {
403 log::error!("VirtIO-blk: Request failed with status {}", status_byte);
404 Err(BlockError::IoError)
405 }
406 }
407}
408
409impl BlockDevice for VirtioBlockDevice {
410 fn read_sector(&self, sector: u64, buf: &mut [u8]) -> Result<(), BlockError> {
412 if sector >= self.capacity {
413 return Err(BlockError::InvalidSector);
414 }
415
416 if buf.len() < SECTOR_SIZE {
417 return Err(BlockError::BufferTooSmall);
418 }
419
420 self.do_request(RequestType::In, sector, Some((buf, false)))
421 }
422
423 fn write_sector(&self, sector: u64, buf: &[u8]) -> Result<(), BlockError> {
425 if sector >= self.capacity {
426 return Err(BlockError::InvalidSector);
427 }
428
429 if buf.len() < SECTOR_SIZE {
430 return Err(BlockError::BufferTooSmall);
431 }
432
433 let mut buf_copy = [0u8; SECTOR_SIZE];
442 buf_copy[..SECTOR_SIZE].copy_from_slice(&buf[..SECTOR_SIZE]);
443
444 self.do_request(RequestType::Out, sector, Some((&mut buf_copy, true)))
445 }
446
447 fn sector_count(&self) -> u64 {
449 self.capacity
450 }
451}
452
453static VIRTIO_BLOCK: SpinLock<Option<Box<VirtioBlockDevice>>> = SpinLock::new(None);
455
456static mut VIRTIO_BLOCK_IRQ: u8 = 0;
458
459pub fn init() {
463 log::info!("VirtIO-blk: Scanning for devices...");
464
465 let pci_dev = match pci::probe_first(pci::ProbeCriteria {
468 vendor_id: Some(pci::vendor::VIRTIO),
469 device_id: Some(pci::device::VIRTIO_BLOCK),
470 class_code: Some(pci::class::MASS_STORAGE),
471 subclass: None,
472 prog_if: None,
473 })
474 .or_else(|| pci::find_virtio_device(pci::device::VIRTIO_BLOCK))
475 {
476 Some(dev) => dev,
477 None => {
478 log::warn!("VirtIO-blk: No block device found");
479 return;
480 }
481 };
482
483 let irq_line = pci_dev.read_config_u8(pci::config::INTERRUPT_LINE);
485
486 match unsafe { VirtioBlockDevice::new(pci_dev) } {
488 Ok(device) => {
489 unsafe {
491 VIRTIO_BLOCK_IRQ = irq_line;
492 }
493
494 *VIRTIO_BLOCK.lock() = Some(Box::new(device));
496
497 crate::arch::x86_64::idt::register_virtio_block_irq(irq_line);
499
500 log::info!("VirtIO-blk: Device initialized on IRQ {}", irq_line);
501 }
502 Err(e) => {
503 log::error!("VirtIO-blk: Failed to initialize device: {}", e);
504 }
505 }
506}
507
508pub fn handle_interrupt() {
513 let lock = VIRTIO_BLOCK.lock();
515 if let Some(device) = lock.as_ref() {
516 let isr_status = device.device.read_isr_status();
518 if isr_status != 0 {
519 device.device.ack_interrupt();
521
522 log::trace!("VirtIO-blk: Interrupt handled (ISR={})", isr_status);
525 }
526 }
527}
528
529pub fn get_device() -> Option<&'static VirtioBlockDevice> {
531 unsafe {
532 let lock = VIRTIO_BLOCK.lock();
533 if lock.is_some() {
534 let ptr = &**lock.as_ref().unwrap() as *const VirtioBlockDevice;
538 Some(&*ptr)
539 } else {
540 None
541 }
542 }
543}
544
545pub fn get_irq() -> u8 {
547 unsafe { VIRTIO_BLOCK_IRQ }
548}