1use alloc::{collections::BTreeMap, vec::Vec};
4use core::sync::atomic::{AtomicU64, Ordering};
5
6use smallvec::{smallvec, SmallVec};
7
8use crate::{capability::CapId, sync::SpinLock};
9
10use super::{
11 address_space::{AddressSpace, VmaFlags, VmaPageSize, VmaType},
12 cow, ownership_table, release_owned_block, revoke_mapping_cap_id,
13 try_register_mapping_identity, unregister_mapping_identity, BlockHandle, OwnerError,
14};
15
16#[derive(Debug, Clone, Copy)]
18pub struct PublicMemoryRegionInfo {
19 pub size: u64,
21 pub page_size: VmaPageSize,
23 pub flags: VmaFlags,
25}
26
27#[derive(Debug, Clone)]
28struct ExportedMemoryRegion {
29 size: u64,
30 page_size: VmaPageSize,
31 flags: VmaFlags,
32 vma_type: VmaType,
33 handles: Vec<BlockHandle>,
34 mapping_cap_ids: Vec<CapId>,
35 handle_caps: SmallVec<[CapId; 2]>,
36}
37
38#[derive(Debug, Clone, Copy, PartialEq, Eq)]
40pub enum RegionCapError {
41 NotFound,
43 InvalidRegion,
45 IncompleteRegion,
47 InvalidAddress,
49 PermissionDenied,
51 OutOfMemory,
53 InconsistentState,
55}
56
57#[derive(Debug, Clone, Copy, PartialEq, Eq)]
59pub enum ReleaseRegionResult {
60 Retained,
62 Destroyed {
64 revoked_mappings: usize,
66 },
67}
68
69pub struct MemoryRegionRegistry {
71 next_id: AtomicU64,
72 entries: SpinLock<BTreeMap<u64, ExportedMemoryRegion>>,
73}
74
75impl MemoryRegionRegistry {
76 pub fn new() -> Self {
78 Self {
79 next_id: AtomicU64::new(1),
80 entries: SpinLock::new(BTreeMap::new()),
81 }
82 }
83
84 fn rollback_registered_handles(handles: &[BlockHandle], handle_cap: CapId) {
85 for handle in handles.iter().copied() {
86 if let Some(block) = unregister_mapping_identity(handle, handle_cap) {
87 release_owned_block(block);
88 }
89 }
90 }
91
92 pub fn export_region(
94 &self,
95 address_space: &AddressSpace,
96 start: u64,
97 handle_cap: CapId,
98 ) -> Result<u64, RegionCapError> {
99 let region = address_space
100 .region_by_start(start)
101 .ok_or(RegionCapError::InvalidRegion)?;
102 let page_bytes = region.page_size.bytes();
103 let size = (region.page_count as u64)
104 .checked_mul(page_bytes)
105 .ok_or(RegionCapError::InvalidRegion)?;
106
107 let mut handles = Vec::with_capacity(region.page_count);
108 let mut mapping_cap_ids = Vec::with_capacity(region.page_count);
109
110 for index in 0..region.page_count {
111 let vaddr = start
112 .checked_add((index as u64).saturating_mul(page_bytes))
113 .ok_or(RegionCapError::InvalidRegion)?;
114 let mapping = address_space
115 .effective_mapping_by_start(vaddr)
116 .ok_or(RegionCapError::IncompleteRegion)?;
117 if let Err(error) = try_register_mapping_identity(mapping.handle, handle_cap) {
118 if error != OwnerError::CapAlreadyPresent {
119 log::warn!(
120 "memory: failed to export region start={:#x} handle cap={} block={:#x}/{}: {:?}",
121 start,
122 handle_cap.as_u64(),
123 mapping.handle.base.as_u64(),
124 mapping.handle.order,
125 error
126 );
127 Self::rollback_registered_handles(&handles, handle_cap);
128 return Err(RegionCapError::InconsistentState);
129 }
130 }
131 handles.push(mapping.handle);
132 mapping_cap_ids.push(mapping.cap_id);
133 }
134
135 let resource_id = self.next_id.fetch_add(1, Ordering::Relaxed);
136 self.entries.lock().insert(
137 resource_id,
138 ExportedMemoryRegion {
139 size,
140 page_size: region.page_size,
141 flags: region.flags,
142 vma_type: region.vma_type,
143 handles,
144 mapping_cap_ids,
145 handle_caps: smallvec![handle_cap],
146 },
147 );
148 Ok(resource_id)
149 }
150
151 pub fn retain_handle(&self, resource_id: u64, handle_cap: CapId) -> Result<(), RegionCapError> {
153 let mut entries = self.entries.lock();
154 let entry = entries
155 .get_mut(&resource_id)
156 .ok_or(RegionCapError::NotFound)?;
157 if !entry
158 .handle_caps
159 .iter()
160 .any(|existing| *existing == handle_cap)
161 {
162 let mut newly_registered = Vec::with_capacity(entry.handles.len());
163 for handle in &entry.handles {
164 match try_register_mapping_identity(*handle, handle_cap) {
165 Ok(()) => {
166 newly_registered.push(*handle);
167 }
168 Err(OwnerError::CapAlreadyPresent) => {}
169 Err(error) => {
170 log::warn!(
171 "memory: failed to retain region={} cap={} block={:#x}/{}: {:?}",
172 resource_id,
173 handle_cap.as_u64(),
174 handle.base.as_u64(),
175 handle.order,
176 error
177 );
178 Self::rollback_registered_handles(&newly_registered, handle_cap);
179 return Err(RegionCapError::InconsistentState);
180 }
181 }
182 }
183 entry.handle_caps.push(handle_cap);
184 }
185 Ok(())
186 }
187
188 pub fn info(&self, resource_id: u64) -> Option<PublicMemoryRegionInfo> {
190 self.entries
191 .lock()
192 .get(&resource_id)
193 .map(|entry| PublicMemoryRegionInfo {
194 size: entry.size,
195 page_size: entry.page_size,
196 flags: entry.flags,
197 })
198 }
199
200 pub fn map_region(
202 &self,
203 resource_id: u64,
204 address_space: &AddressSpace,
205 addr_hint: u64,
206 requested_flags: VmaFlags,
207 ) -> Result<(u64, u64), RegionCapError> {
208 let entry = {
209 let entries = self.entries.lock();
210 let entry = entries.get(&resource_id).ok_or(RegionCapError::NotFound)?;
211 let mut pinned = 0usize;
212 for handle in &entry.handles {
213 match ownership_table().pin(*handle) {
214 Ok(_) => {
215 pinned += 1;
216 }
217 Err(error) => {
218 log::warn!(
219 "memory: failed to pin exported handle resource={} block={:#x}/{}: {:?}",
220 resource_id,
221 handle.base.as_u64(),
222 handle.order,
223 error
224 );
225 for pinned_handle in entry.handles.iter().take(pinned) {
226 cow::handle_dec_ref(*pinned_handle);
227 }
228 return Err(RegionCapError::OutOfMemory);
229 }
230 }
231 }
232 entry.clone()
233 };
234
235 let map_result = (|| {
236 let effective_flags = VmaFlags {
237 readable: entry.flags.readable && requested_flags.readable,
238 writable: entry.flags.writable && requested_flags.writable,
239 executable: entry.flags.executable && requested_flags.executable,
240 user_accessible: true,
241 };
242 let page_count = entry.handles.len();
243 let page_bytes = entry.page_size.bytes();
244
245 let base = if addr_hint != 0 {
246 if addr_hint % page_bytes != 0 {
247 return Err(RegionCapError::InvalidAddress);
248 }
249 address_space
250 .find_free_vma_range(addr_hint, page_count, entry.page_size)
251 .or_else(|| {
252 address_space.find_free_vma_range(
253 crate::syscall::mmap::MMAP_BASE,
254 page_count,
255 entry.page_size,
256 )
257 })
258 .ok_or(RegionCapError::OutOfMemory)?
259 } else {
260 address_space
261 .find_free_vma_range(
262 crate::syscall::mmap::MMAP_BASE,
263 page_count,
264 entry.page_size,
265 )
266 .ok_or(RegionCapError::OutOfMemory)?
267 };
268
269 address_space
270 .map_shared_handles_with_cap_ids(
271 base,
272 &entry.handles,
273 Some(&entry.mapping_cap_ids),
274 effective_flags,
275 entry.vma_type,
276 entry.page_size,
277 )
278 .map(|_| (base, entry.size))
279 .map_err(|_| RegionCapError::OutOfMemory)
280 })();
281
282 for handle in &entry.handles {
283 cow::handle_dec_ref(*handle);
284 }
285
286 map_result
287 }
288
289 pub fn release_handle(
291 &self,
292 resource_id: u64,
293 handle_cap: CapId,
294 ) -> Result<ReleaseRegionResult, RegionCapError> {
295 let entry = {
296 let mut entries = self.entries.lock();
297 let current = entries
298 .get_mut(&resource_id)
299 .ok_or(RegionCapError::NotFound)?;
300 let position = current
301 .handle_caps
302 .iter()
303 .position(|existing| *existing == handle_cap)
304 .ok_or(RegionCapError::NotFound)?;
305 current.handle_caps.remove(position);
306 for handle in ¤t.handles {
307 if let Some(block) = unregister_mapping_identity(*handle, handle_cap) {
308 release_owned_block(block);
309 }
310 }
311 if !current.handle_caps.is_empty() {
312 return Ok(ReleaseRegionResult::Retained);
313 }
314 entries
315 .remove(&resource_id)
316 .ok_or(RegionCapError::NotFound)?
317 };
318
319 let mut revoked_mappings = 0usize;
320 for cap_id in entry.mapping_cap_ids {
321 revoked_mappings = revoked_mappings.saturating_add(revoke_mapping_cap_id(cap_id));
322 }
323
324 Ok(ReleaseRegionResult::Destroyed { revoked_mappings })
325 }
326}
327
328impl Default for MemoryRegionRegistry {
329 fn default() -> Self {
331 Self::new()
332 }
333}