Skip to main content

strat9_kernel/memory/
userslice.rs

1//! Userspace pointer validation for Strat9-OS.
2//!
3//! The `UserSlice` pattern (inspired by RedoxOS `usercopy.rs`) ensures the
4//! kernel never dereferences a raw userspace pointer without first checking:
5//!
6//! 1. **Range**: The entire region lies in the user half (< `USER_SPACE_END`)
7//! 2. **Overflow**: `base + len` doesn't wrap around
8//! 3. **Mapping**: Every page in the region is present in the *active* page
9//!    tables with the requested permissions (read or write)
10//!
11//! After validation, `UserSlice` provides safe copy operations that transfer
12//! data between userspace and kernel buffers.
13//!
14//! # Example
15//!
16//! ```ignore
17//! // In a syscall handler:
18//! let user_buf = UserSliceRead::new(buf_ptr, buf_len)?;
19//! let mut kernel_buf = [0u8; 256];
20//! let n = user_buf.copy_to(&mut kernel_buf)?;
21//! ```
22
23use crate::syscall::error::SyscallError;
24use alloc::vec::Vec;
25use x86_64::{
26    structures::paging::{PageTableFlags, Translate},
27    VirtAddr,
28};
29
30/// End of user-accessible virtual address space.
31///
32/// On x86_64 with 4-level paging, canonical user addresses are
33/// `0x0000_0000_0000_0000 ..= 0x0000_7FFF_FFFF_FFFF`.
34/// Anything at or above this boundary is kernel space.
35const USER_SPACE_END: u64 = 0x0000_8000_0000_0000;
36
37/// Maximum length allowed for a single UserSlice (16 MiB).
38///
39/// Prevents a malicious userspace from causing the kernel to walk
40/// millions of page table entries or allocate huge kernel buffers.
41const MAX_USER_SLICE_LEN: usize = 16 * 1024 * 1024;
42
43/// Errors that can occur when constructing or using a `UserSlice`.
44#[derive(Debug, Clone, Copy, PartialEq, Eq)]
45pub enum UserSliceError {
46    /// The pointer is null.
47    NullPointer,
48    /// The region extends into or past kernel address space.
49    KernelAddress,
50    /// `base + len` overflows (wraps around the address space).
51    Overflow,
52    /// The region exceeds the maximum allowed length.
53    TooLong,
54    /// One or more pages in the region are not mapped.
55    NotMapped,
56    /// The mapping lacks the required permission (e.g. not writable).
57    PermissionDenied,
58    /// The slice is too small for the requested operation.
59    InvalidSize,
60}
61
62impl From<UserSliceError> for SyscallError {
63    /// Performs the from operation.
64    fn from(e: UserSliceError) -> Self {
65        match e {
66            UserSliceError::NullPointer => SyscallError::Fault,
67            UserSliceError::KernelAddress => SyscallError::Fault,
68            UserSliceError::Overflow => SyscallError::Fault,
69            UserSliceError::TooLong => SyscallError::InvalidArgument,
70            UserSliceError::NotMapped => SyscallError::Fault,
71            UserSliceError::PermissionDenied => SyscallError::Fault,
72            UserSliceError::InvalidSize => SyscallError::InvalidArgument,
73        }
74    }
75}
76
77/// Permission requirements for a user memory region.
78#[derive(Debug, Clone, Copy)]
79enum Access {
80    /// Read-only access (the kernel reads from userspace).
81    Read,
82    /// Write access (the kernel writes to userspace).
83    Write,
84}
85
86/// Validate that a user memory region `[base, base+len)` is:
87/// - entirely within the user address space
88/// - mapped with the required permissions in the active page tables
89///
90/// Returns `Ok(())` on success, or a `UserSliceError` describing the problem.
91fn validate_user_region(base: u64, len: usize, access: Access) -> Result<(), UserSliceError> {
92    if len == 0 {
93        return Ok(());
94    }
95
96    if base == 0 {
97        return Err(UserSliceError::NullPointer);
98    }
99
100    if len > MAX_USER_SLICE_LEN {
101        return Err(UserSliceError::TooLong);
102    }
103
104    let end = base
105        .checked_add(len as u64)
106        .ok_or(UserSliceError::Overflow)?;
107
108    if base >= USER_SPACE_END || end > USER_SPACE_END {
109        return Err(UserSliceError::KernelAddress);
110    }
111
112    // Walk every page in the region and check the page tables.
113    let required_flags = match access {
114        Access::Read => PageTableFlags::PRESENT | PageTableFlags::USER_ACCESSIBLE,
115        Access::Write => {
116            PageTableFlags::PRESENT | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::WRITABLE
117        }
118    };
119
120    check_pages_mapped(base, len, required_flags)
121}
122
123/// Walk the active page tables to verify that every 4 KiB page covering
124/// `[base, base+len)` is mapped with at least `required_flags`.
125fn check_pages_mapped(
126    base: u64,
127    len: usize,
128    required_flags: PageTableFlags,
129) -> Result<(), UserSliceError> {
130    use x86_64::{
131        registers::control::Cr3,
132        structures::paging::{OffsetPageTable, PageTable},
133    };
134
135    let hhdm = crate::memory::hhdm_offset();
136    let phys_offset = VirtAddr::new(hhdm);
137
138    // Read the active CR3 to get the current process's page table.
139    let (l4_frame, _) = Cr3::read();
140    let l4_phys = l4_frame.start_address().as_u64();
141    let l4_virt = VirtAddr::new(l4_phys + hhdm);
142
143    // SAFETY: The HHDM mapping is always valid for physical RAM.
144    // We only read the page tables; no mutation.
145    let mapper =
146        unsafe { OffsetPageTable::new(&mut *l4_virt.as_mut_ptr::<PageTable>(), phys_offset) };
147
148    let page_size: u64 = 4096;
149    let start_page = base & !0xFFF; // Round down to page boundary
150    let end_addr = base + len as u64;
151
152    let mut addr = start_page;
153    while addr < end_addr {
154        let vaddr = VirtAddr::new(addr);
155
156        // Use the x86_64 crate's full translate to get the mapped frame + flags.
157        use x86_64::structures::paging::mapper::TranslateResult;
158        match mapper.translate(vaddr) {
159            TranslateResult::Mapped { flags, .. } => {
160                // Check that the mapping has all required flags
161                if !flags.contains(required_flags) {
162                    log::trace!(
163                        "UserSlice: page {:#x} missing flags: have {:?}, need {:?}",
164                        addr,
165                        flags,
166                        required_flags
167                    );
168                    return Err(UserSliceError::PermissionDenied);
169                }
170            }
171            TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
172                log::trace!("UserSlice: page {:#x} not mapped", addr);
173                return Err(UserSliceError::NotMapped);
174            }
175        }
176
177        addr += page_size;
178    }
179
180    Ok(())
181}
182
183// ============================================================================
184// UserSliceRead — validated read-only access to user memory
185// ============================================================================
186
187/// A validated read-only reference to a user-space memory region.
188///
189/// Construction validates that `[ptr, ptr+len)` is mapped and readable
190/// by the current process. After construction, the kernel can safely
191/// read from this region.
192///
193/// **Note**: The mapping could theoretically be changed by another thread
194/// between validation and use. On our single-core kernel this can't happen
195/// because we don't preempt during a syscall handler (interrupts are
196/// re-enabled but the scheduler won't remove our mappings). For SMP this
197/// would need additional protection (e.g. pinning pages).
198pub struct UserSliceRead {
199    ptr: u64,
200    len: usize,
201}
202
203impl UserSliceRead {
204    /// Create a new validated read-only user slice.
205    ///
206    /// Fails if:
207    /// - `ptr` is null
208    /// - `ptr + len` overflows or crosses into kernel space
209    /// - Any page in the range is not mapped or not user-accessible
210    pub fn new(ptr: u64, len: usize) -> Result<Self, UserSliceError> {
211        validate_user_region(ptr, len, Access::Read)?;
212        Ok(UserSliceRead { ptr, len })
213    }
214
215    /// The length of the validated region in bytes.
216    pub fn len(&self) -> usize {
217        self.len
218    }
219
220    /// Whether the region is empty (zero length).
221    pub fn is_empty(&self) -> bool {
222        self.len == 0
223    }
224
225    /// Copy validated user data into a kernel-owned `Vec<u8>`.
226    ///
227    /// Returns a vector containing a copy of the user memory.
228    pub fn read_to_vec(&self) -> Vec<u8> {
229        if self.len == 0 {
230            return Vec::new();
231        }
232
233        let mut buf = alloc::vec![0u8; self.len];
234        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-readable.
235        unsafe {
236            core::ptr::copy_nonoverlapping(self.ptr as *const u8, buf.as_mut_ptr(), self.len);
237        }
238        buf
239    }
240
241    /// Copy validated user data into a kernel buffer.
242    ///
243    /// Copies `min(self.len, dest.len())` bytes and returns how many were copied.
244    pub fn copy_to(&self, dest: &mut [u8]) -> usize {
245        let n = core::cmp::min(self.len, dest.len());
246        if n == 0 {
247            return 0;
248        }
249
250        // SAFETY: We validated that [ptr, ptr+n) is mapped and user-readable.
251        // n <= self.len, so we stay within the validated region.
252        unsafe {
253            core::ptr::copy_nonoverlapping(self.ptr as *const u8, dest.as_mut_ptr(), n);
254        }
255        n
256    }
257
258    /// Read a single byte from the user slice at the given offset.
259    pub fn read_u8(&self, offset: usize) -> Result<u8, UserSliceError> {
260        if offset >= self.len {
261            return Err(UserSliceError::InvalidSize);
262        }
263        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-readable.
264        unsafe {
265            Ok(core::ptr::read_unaligned(
266                (self.ptr + offset as u64) as *const u8,
267            ))
268        }
269    }
270
271    /// Read a u64 from the user slice at the given offset.
272    pub fn read_u64(&self, offset: usize) -> Result<u64, UserSliceError> {
273        if offset + 8 > self.len {
274            return Err(UserSliceError::InvalidSize);
275        }
276        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-readable.
277        unsafe {
278            Ok(core::ptr::read_unaligned(
279                (self.ptr + offset as u64) as *const u64,
280            ))
281        }
282    }
283
284    /// Read a value of type T from the user slice.
285    ///
286    /// # Safety
287    /// The caller must ensure that T is Pod (plain old data) and that the
288    /// slice is at least size_of::<T>() bytes.
289    pub fn read_val<T: Copy>(&self) -> Result<T, UserSliceError> {
290        if self.len < core::mem::size_of::<T>() {
291            return Err(UserSliceError::InvalidSize);
292        }
293        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-readable.
294        // T is Copy, so we can safely read it.
295        unsafe { Ok(core::ptr::read_unaligned(self.ptr as *const T)) }
296    }
297
298    /// Get the raw pointer (for logging/debugging only).
299    pub fn as_ptr(&self) -> u64 {
300        self.ptr
301    }
302}
303
304// ============================================================================
305// UserSliceWrite — validated write access to user memory
306// ============================================================================
307
308/// A validated writable reference to a user-space memory region.
309///
310/// Construction validates that `[ptr, ptr+len)` is mapped, user-accessible,
311/// and writable. After construction, the kernel can safely write to this region.
312pub struct UserSliceWrite {
313    ptr: u64,
314    len: usize,
315}
316
317impl UserSliceWrite {
318    /// Create a new validated writable user slice.
319    ///
320    /// Fails if:
321    /// - `ptr` is null
322    /// - `ptr + len` overflows or crosses into kernel space
323    /// - Any page in the range is not mapped, not user-accessible, or not writable
324    pub fn new(ptr: u64, len: usize) -> Result<Self, UserSliceError> {
325        validate_user_region(ptr, len, Access::Write)?;
326        Ok(UserSliceWrite { ptr, len })
327    }
328
329    /// The length of the validated region in bytes.
330    pub fn len(&self) -> usize {
331        self.len
332    }
333
334    /// Whether the region is empty (zero length).
335    pub fn is_empty(&self) -> bool {
336        self.len == 0
337    }
338
339    /// Copy kernel data into validated user memory.
340    ///
341    /// Copies `min(src.len(), self.len)` bytes and returns how many were copied.
342    pub fn copy_from(&self, src: &[u8]) -> usize {
343        let n = core::cmp::min(src.len(), self.len);
344        if n == 0 {
345            return 0;
346        }
347
348        // SAFETY: We validated that [ptr, ptr+n) is mapped and user-writable.
349        // n <= self.len, so we stay within the validated region.
350        unsafe {
351            core::ptr::copy_nonoverlapping(src.as_ptr(), self.ptr as *mut u8, n);
352        }
353        n
354    }
355
356    /// Zero-fill the validated user memory region.
357    pub fn zero(&self) {
358        if self.len == 0 {
359            return;
360        }
361
362        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-writable.
363        unsafe {
364            core::ptr::write_bytes(self.ptr as *mut u8, 0, self.len);
365        }
366    }
367
368    /// Get the raw pointer (for logging/debugging only).
369    pub fn as_ptr(&self) -> u64 {
370        self.ptr
371    }
372}
373
374// ============================================================================
375// UserSliceReadWrite — validated read+write access to user memory
376// ============================================================================
377
378/// A validated read-write reference to a user-space memory region.
379///
380/// Construction validates that `[ptr, ptr+len)` is mapped, user-accessible,
381/// and writable (writable implies readable on x86_64).
382pub struct UserSliceReadWrite {
383    ptr: u64,
384    len: usize,
385}
386
387impl UserSliceReadWrite {
388    /// Create a new validated read-write user slice.
389    pub fn new(ptr: u64, len: usize) -> Result<Self, UserSliceError> {
390        validate_user_region(ptr, len, Access::Write)?;
391        Ok(UserSliceReadWrite { ptr, len })
392    }
393
394    /// The length of the validated region in bytes.
395    pub fn len(&self) -> usize {
396        self.len
397    }
398
399    /// Whether the region is empty (zero length).
400    pub fn is_empty(&self) -> bool {
401        self.len == 0
402    }
403
404    /// Copy validated user data into a kernel buffer.
405    pub fn copy_to(&self, dest: &mut [u8]) -> usize {
406        let n = core::cmp::min(self.len, dest.len());
407        if n == 0 {
408            return 0;
409        }
410        // SAFETY: Validated as writable (which implies readable on x86_64).
411        unsafe {
412            core::ptr::copy_nonoverlapping(self.ptr as *const u8, dest.as_mut_ptr(), n);
413        }
414        n
415    }
416
417    /// Copy kernel data into validated user memory.
418    pub fn copy_from(&self, src: &[u8]) -> usize {
419        let n = core::cmp::min(src.len(), self.len);
420        if n == 0 {
421            return 0;
422        }
423        // SAFETY: Validated as writable.
424        unsafe {
425            core::ptr::copy_nonoverlapping(src.as_ptr(), self.ptr as *mut u8, n);
426        }
427        n
428    }
429
430    /// Write a value of type T to the user slice.
431    ///
432    /// # Safety
433    /// The caller must ensure that T is Pod (plain old data) and that the
434    /// slice is at least size_of::<T>() bytes.
435    pub fn write_val<T: Copy>(&self, val: &T) -> Result<(), UserSliceError> {
436        if self.len < core::mem::size_of::<T>() {
437            return Err(UserSliceError::InvalidSize);
438        }
439        // SAFETY: We validated that [ptr, ptr+len) is mapped and user-writable.
440        // T is Copy, so we can safely write it.
441        unsafe {
442            core::ptr::write_unaligned(self.ptr as *mut T, *val);
443        }
444        Ok(())
445    }
446
447    /// Get the raw pointer (for logging/debugging only).
448    pub fn as_ptr(&self) -> u64 {
449        self.ptr
450    }
451}