Skip to main content

strat9_kernel/ipc/
shared_ring.rs

1use crate::{
2    capability::CapId,
3    memory::{
4        allocate_mapping_cap_id, ownership_table, release_owned_block, resolve_handle,
5        revoke_mapping_cap_id, unregister_mapping_identity, OwnerError, PhysFrame,
6    },
7    sync::SpinLock,
8};
9use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
10use core::sync::atomic::{AtomicU64, Ordering};
11
12#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
13pub struct RingId(pub u64);
14
15impl RingId {
16    /// Returns this as u64.
17    pub fn as_u64(self) -> u64 {
18        self.0
19    }
20    /// Builds this from u64.
21    pub fn from_u64(raw: u64) -> Self {
22        Self(raw)
23    }
24}
25
26#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
27pub enum RingError {
28    #[error("invalid size")]
29    InvalidSize,
30    #[error("allocation failed")]
31    Alloc,
32    #[error("ring not found")]
33    NotFound,
34}
35
36pub struct SharedRing {
37    size: usize,
38    frames: Vec<PhysFrame>,
39    owner_cap_ids: Vec<CapId>,
40    mapping_cap_ids: Vec<CapId>,
41}
42
43impl SharedRing {
44    /// Performs the size operation.
45    pub fn size(&self) -> usize {
46        self.size
47    }
48
49    /// Performs the page count operation.
50    pub fn page_count(&self) -> usize {
51        self.frames.len()
52    }
53
54    /// Performs the frame phys addrs operation.
55    pub fn frame_phys_addrs(&self) -> Vec<u64> {
56        self.frames
57            .iter()
58            .map(|f| f.start_address.as_u64())
59            .collect()
60    }
61
62    /// Returns stable mapping identities for every page in the ring.
63    pub fn mapping_cap_ids(&self) -> &[CapId] {
64        &self.mapping_cap_ids
65    }
66}
67
68impl Drop for SharedRing {
69    /// Performs the drop operation.
70    fn drop(&mut self) {
71        for (frame, owner_cap_id) in self.frames.drain(..).zip(self.owner_cap_ids.drain(..)) {
72            let handle = resolve_handle(frame.start_address);
73            if let Some(block) = unregister_mapping_identity(handle, owner_cap_id) {
74                release_owned_block(block);
75            }
76        }
77    }
78}
79
80fn rollback_ring_frames(frames: Vec<PhysFrame>, owner_cap_ids: &[CapId], registered: usize) {
81    for (frame, owner_cap_id) in frames
82        .iter()
83        .copied()
84        .zip(owner_cap_ids.iter().copied())
85        .take(registered)
86    {
87        let handle = resolve_handle(frame.start_address);
88        if let Some(block) = unregister_mapping_identity(handle, owner_cap_id) {
89            release_owned_block(block);
90        }
91    }
92
93    for frame in frames.into_iter().skip(registered) {
94        crate::sync::with_irqs_disabled(|token| crate::memory::free_frame(token, frame));
95    }
96}
97
98static NEXT_RING_ID: AtomicU64 = AtomicU64::new(1);
99static RINGS: SpinLock<Option<BTreeMap<RingId, Arc<SharedRing>>>> = SpinLock::new(None);
100
101/// Performs the ensure registry operation.
102fn ensure_registry(guard: &mut Option<BTreeMap<RingId, Arc<SharedRing>>>) {
103    if guard.is_none() {
104        *guard = Some(BTreeMap::new());
105    }
106}
107
108/// Creates ring.
109pub fn create_ring(size: usize) -> Result<RingId, RingError> {
110    if size == 0 {
111        return Err(RingError::InvalidSize);
112    }
113    let page_count = (size.saturating_add(4095)) / 4096;
114    if page_count == 0 {
115        return Err(RingError::InvalidSize);
116    }
117
118    let mut frames = Vec::with_capacity(page_count);
119    let mut alloc_failed = false;
120    for _ in 0..page_count {
121        let frame =
122            match crate::sync::with_irqs_disabled(|token| crate::memory::allocate_frame(token)) {
123                Ok(f) => f,
124                Err(_) => {
125                    alloc_failed = true;
126                    break;
127                }
128            };
129        let v = crate::memory::phys_to_virt(frame.start_address.as_u64());
130        unsafe { core::ptr::write_bytes(v as *mut u8, 0, 4096) };
131        frames.push(frame);
132    }
133    if alloc_failed {
134        for rollback in frames.drain(..) {
135            crate::sync::with_irqs_disabled(|token| crate::memory::free_frame(token, rollback));
136        }
137        return Err(RingError::Alloc);
138    }
139
140    let id = RingId(NEXT_RING_ID.fetch_add(1, Ordering::Relaxed));
141    let owner_cap_ids = (0..page_count)
142        .map(|_| allocate_mapping_cap_id())
143        .collect::<Vec<_>>();
144    let mapping_cap_ids = (0..page_count).map(|_| allocate_mapping_cap_id()).collect();
145
146    let mut registered = 0usize;
147    for (frame, owner_cap_id) in frames.iter().zip(owner_cap_ids.iter().copied()) {
148        let handle = resolve_handle(frame.start_address);
149        match ownership_table().ensure_ref(handle, owner_cap_id) {
150            Ok(_) | Err(OwnerError::CapAlreadyPresent) => {
151                registered += 1;
152            }
153            Err(error) => {
154                log::warn!(
155                    "ipc: failed to register shared ring owner cap={} block={:#x}/{}: {:?}",
156                    owner_cap_id.as_u64(),
157                    handle.base.as_u64(),
158                    handle.order,
159                    error
160                );
161                rollback_ring_frames(frames, &owner_cap_ids, registered);
162                return Err(RingError::Alloc);
163            }
164        }
165    }
166
167    let ring = Arc::new(SharedRing {
168        size,
169        frames,
170        owner_cap_ids,
171        mapping_cap_ids,
172    });
173
174    let mut reg = RINGS.lock();
175    ensure_registry(&mut *reg);
176    reg.as_mut().unwrap().insert(id, ring);
177    Ok(id)
178}
179
180/// Returns ring.
181pub fn get_ring(id: RingId) -> Option<Arc<SharedRing>> {
182    let reg = RINGS.lock();
183    reg.as_ref().and_then(|map| map.get(&id).cloned())
184}
185
186/// Destroys ring.
187pub fn destroy_ring(id: RingId) -> Result<(), RingError> {
188    let mut reg = RINGS.lock();
189    let map = reg.as_mut().ok_or(RingError::NotFound)?;
190
191    // ==========================================================================
192    // CRITICAL: BTreeMap corruption guard
193    //
194    // A corrupted heap can produce a BTreeMap whose internal node pointers are
195    // invalid (e.g. NULL + offset 16 = 0x10), causing remove() to page-fault.
196    // Sanity-check `len()` before mutating: the ring registry never holds more
197    // than a few hundred entries under normal operation.  An absurd value
198    // indicates that the BTreeMap header itself has been overwritten, and
199    // calling remove() would immediately dereference a bad node pointer.
200    //
201    // In that case we bail early rather than crash.  The heap poison detector
202    // in heap.rs will identify the corrupting allocation on the next alloc/free.
203    // ==========================================================================
204    let len = map.len();
205    if len > 10_000 {
206        crate::serial_println!(
207            "\x1b[1;31m[ipc] RINGS BTreeMap corrupted: len={} for id={} \u{2014} aborting remove\x1b[0m",
208            len, id.as_u64()
209        );
210        return Err(RingError::NotFound);
211    }
212    crate::serial_println!("[ipc] destroy_ring(id={}) map.len={}", id.as_u64(), len);
213
214    let ring = map.remove(&id).ok_or(RingError::NotFound)?;
215    drop(reg);
216
217    for &cap_id in ring.mapping_cap_ids() {
218        let _ = revoke_mapping_cap_id(cap_id);
219    }
220    Ok(())
221}