strat9_bus_drivers/
mmio.rs1use core::sync::atomic::{AtomicUsize, Ordering};
2
3pub struct MmioRegion {
4 base: AtomicUsize,
5 size: usize,
6}
7
8impl MmioRegion {
9 pub const fn new() -> Self {
11 Self {
12 base: AtomicUsize::new(0),
13 size: 0,
14 }
15 }
16
17 pub fn init(&mut self, base: usize, size: usize) {
19 self.base.store(base, Ordering::Release);
20 self.size = size;
21 }
22
23 pub fn base(&self) -> usize {
25 self.base.load(Ordering::Acquire)
26 }
27
28 pub fn is_valid(&self) -> bool {
30 self.base() != 0
31 }
32
33 fn checked_addr(&self, offset: usize, width: usize) -> usize {
35 let base = self.base();
36 assert!(base != 0);
37 let end = offset.checked_add(width).expect("mmio offset overflow");
38 assert!(end <= self.size);
39 base.checked_add(offset).expect("mmio address overflow")
40 }
41
42 pub fn read8(&self, offset: usize) -> u8 {
44 let addr = self.checked_addr(offset, core::mem::size_of::<u8>());
45 unsafe { core::ptr::read_volatile(addr as *const u8) }
47 }
48
49 pub fn read16(&self, offset: usize) -> u16 {
51 let addr = self.checked_addr(offset, core::mem::size_of::<u16>());
52 unsafe { core::ptr::read_volatile(addr as *const u16) }
54 }
55
56 pub fn read32(&self, offset: usize) -> u32 {
58 let addr = self.checked_addr(offset, core::mem::size_of::<u32>());
59 unsafe { core::ptr::read_volatile(addr as *const u32) }
61 }
62
63 pub fn read64(&self, offset: usize) -> u64 {
65 let addr = self.checked_addr(offset, core::mem::size_of::<u64>());
66 unsafe { core::ptr::read_volatile(addr as *const u64) }
68 }
69
70 pub fn write8(&self, offset: usize, val: u8) {
72 let addr = self.checked_addr(offset, core::mem::size_of::<u8>());
73 unsafe { core::ptr::write_volatile(addr as *mut u8, val) }
75 }
76
77 pub fn write16(&self, offset: usize, val: u16) {
79 let addr = self.checked_addr(offset, core::mem::size_of::<u16>());
80 unsafe { core::ptr::write_volatile(addr as *mut u16, val) }
82 }
83
84 pub fn write32(&self, offset: usize, val: u32) {
86 let addr = self.checked_addr(offset, core::mem::size_of::<u32>());
87 unsafe { core::ptr::write_volatile(addr as *mut u32, val) }
89 }
90
91 pub fn write64(&self, offset: usize, val: u64) {
93 let addr = self.checked_addr(offset, core::mem::size_of::<u64>());
94 unsafe { core::ptr::write_volatile(addr as *mut u64, val) }
96 }
97
98 pub fn set_bits32(&self, offset: usize, bits: u32) {
100 let val = self.read32(offset);
101 self.write32(offset, val | bits);
102 }
103
104 pub fn clear_bits32(&self, offset: usize, bits: u32) {
106 let val = self.read32(offset);
107 self.write32(offset, val & !bits);
108 }
109
110 pub fn modify32(&self, offset: usize, clear: u32, set: u32) {
112 let val = self.read32(offset);
113 self.write32(offset, (val & !clear) | set);
114 }
115
116 pub fn read_field32(&self, offset: usize, mask: u32, shift: u32) -> u32 {
118 (self.read32(offset) & mask) >> shift
119 }
120
121 pub fn write_field32(&self, offset: usize, mask: u32, shift: u32, value: u32) {
123 self.modify32(offset, mask, (value << shift) & mask);
124 }
125}
126
127unsafe impl Send for MmioRegion {}
131unsafe impl Sync for MmioRegion {}
132
133pub fn memory_barrier() {
135 core::sync::atomic::fence(Ordering::SeqCst);
136}