1#![allow(unsafe_code)]
11#![allow(unsafe_op_in_unsafe_fn)]
12
13extern crate alloc;
14
15use core::{marker::PhantomData, ops::Range};
16
17#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
19#[repr(transparent)]
20pub struct PhysAddr(u64);
21
22impl PhysAddr {
23 pub const fn new(addr: u64) -> Self {
25 Self(addr)
26 }
27
28 pub const fn null() -> Self {
30 Self(0)
31 }
32
33 pub const fn as_u64(&self) -> u64 {
35 self.0
36 }
37
38 pub const fn as_usize(&self) -> usize {
40 self.0 as usize
41 }
42
43 pub const fn is_null(&self) -> bool {
45 self.0 == 0
46 }
47
48 pub const fn align_up(&self, align: u64) -> Self {
50 Self((self.0 + align - 1) & !(align - 1))
51 }
52
53 pub const fn align_down(&self, align: u64) -> Self {
55 Self(self.0 & !(align - 1))
56 }
57
58 pub const fn is_aligned(&self, align: u64) -> bool {
60 self.0 & (align - 1) == 0
61 }
62
63 pub const fn add(&self, offset: u64) -> Self {
65 Self(self.0 + offset)
66 }
67
68 pub const fn sub(&self, offset: u64) -> Self {
70 Self(self.0 - offset)
71 }
72}
73
74impl From<u64> for PhysAddr {
75 fn from(addr: u64) -> Self {
77 Self::new(addr)
78 }
79}
80
81impl From<PhysAddr> for u64 {
82 fn from(addr: PhysAddr) -> u64 {
84 addr.as_u64()
85 }
86}
87
88#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
90#[repr(transparent)]
91pub struct VirtAddr(u64);
92
93impl VirtAddr {
94 pub const fn new(addr: u64) -> Self {
96 Self(addr)
97 }
98
99 pub const fn null() -> Self {
101 Self(0)
102 }
103
104 pub const fn as_u64(&self) -> u64 {
106 self.0
107 }
108
109 pub const fn as_usize(&self) -> usize {
111 self.0 as usize
112 }
113
114 pub const fn is_null(&self) -> bool {
116 self.0 == 0
117 }
118
119 pub const fn align_up(&self, align: u64) -> Self {
121 Self((self.0 + align - 1) & !(align - 1))
122 }
123
124 pub const fn align_down(&self, align: u64) -> Self {
126 Self(self.0 & !(align - 1))
127 }
128
129 pub const fn is_aligned(&self, align: u64) -> bool {
131 self.0 & (align - 1) == 0
132 }
133
134 pub const fn add(&self, offset: u64) -> Self {
136 Self(self.0 + offset)
137 }
138
139 pub const fn sub(&self, offset: u64) -> Self {
141 Self(self.0 - offset)
142 }
143}
144
145impl From<u64> for VirtAddr {
146 fn from(addr: u64) -> Self {
148 Self::new(addr)
149 }
150}
151
152impl From<VirtAddr> for u64 {
153 fn from(addr: VirtAddr) -> u64 {
155 addr.as_u64()
156 }
157}
158
159pub const PAGE_SIZE: usize = 4096;
161
162#[inline]
164pub fn phys_to_virt(phys: PhysAddr) -> VirtAddr {
165 VirtAddr::new(crate::memory::phys_to_virt(phys.as_u64()))
166}
167
168#[inline]
170pub fn virt_to_phys(virt: VirtAddr) -> PhysAddr {
171 PhysAddr::new(crate::memory::virt_to_phys(virt.as_u64()))
172}
173
174pub struct MappedPages {
181 start_vaddr: VirtAddr,
183 size: usize,
185 owned: bool,
187 _marker: PhantomData<*mut ()>,
189}
190
191unsafe impl Send for MappedPages {}
193
194impl MappedPages {
195 pub unsafe fn new(start_vaddr: VirtAddr, size: usize, owned: bool) -> Self {
203 Self {
204 start_vaddr,
205 size,
206 owned,
207 _marker: PhantomData,
208 }
209 }
210
211 pub fn start_address(&self) -> VirtAddr {
213 self.start_vaddr
214 }
215
216 pub fn size(&self) -> usize {
218 self.size
219 }
220
221 pub fn end_address(&self) -> VirtAddr {
223 self.start_vaddr.add(self.size as u64)
224 }
225
226 pub fn range(&self) -> Range<VirtAddr> {
228 self.start_vaddr..self.end_address()
229 }
230
231 pub fn as_ptr(&self) -> *const u8 {
233 self.start_vaddr.as_usize() as *const u8
234 }
235
236 pub fn as_mut_ptr(&mut self) -> *mut u8 {
238 self.start_vaddr.as_usize() as *mut u8
239 }
240
241 pub unsafe fn read<T>(&self, offset: usize) -> Result<T, MapError> {
249 if offset + core::mem::size_of::<T>() > self.size {
250 return Err(MapError::OutOfBounds);
251 }
252 let ptr = self.start_vaddr.as_usize().wrapping_add(offset) as *const T;
253 Ok(ptr.read_volatile())
255 }
256
257 pub unsafe fn write<T>(&mut self, offset: usize, value: T) -> Result<(), MapError> {
265 if offset + core::mem::size_of::<T>() > self.size {
266 return Err(MapError::OutOfBounds);
267 }
268 let ptr = self.start_vaddr.as_usize().wrapping_add(offset) as *mut T;
269 ptr.write_volatile(value);
271 Ok(())
272 }
273
274 pub unsafe fn as_slice(&self, len: usize) -> Result<&[u8], MapError> {
281 if len > self.size {
282 return Err(MapError::OutOfBounds);
283 }
284 Ok(core::slice::from_raw_parts(self.as_ptr(), len))
285 }
286
287 pub unsafe fn as_mut_slice(&mut self, len: usize) -> Result<&mut [u8], MapError> {
294 if len > self.size {
295 return Err(MapError::OutOfBounds);
296 }
297 Ok(core::slice::from_raw_parts_mut(self.as_mut_ptr(), len))
298 }
299
300 pub fn into_allocated_pages(self) -> Result<AllocatedPages, MapError> {
304 if !self.owned {
305 return Err(MapError::NotOwner);
306 }
307 let pages = AllocatedPages {
308 start_vaddr: self.start_vaddr,
309 size: self.size,
310 };
311 core::mem::forget(self);
313 Ok(pages)
314 }
315}
316
317impl Drop for MappedPages {
318 fn drop(&mut self) {
320 if self.owned {
321 let page_count = (self.size + PAGE_SIZE - 1) / PAGE_SIZE;
323 crate::memory::address_space::kernel_address_space()
325 .unmap_region(
326 self.start_vaddr.as_u64(),
327 page_count,
328 crate::memory::address_space::VmaPageSize::Small,
329 )
330 .ok();
331 }
332 }
333}
334
335impl core::fmt::Debug for MappedPages {
336 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
338 f.debug_struct("MappedPages")
339 .field("start", &self.start_vaddr)
340 .field("size", &self.size)
341 .field("owned", &self.owned)
342 .finish()
343 }
344}
345
346pub struct AllocatedPages {
350 start_vaddr: VirtAddr,
351 size: usize,
352}
353
354impl AllocatedPages {
355 pub fn start_address(&self) -> VirtAddr {
357 self.start_vaddr
358 }
359
360 pub fn size(&self) -> usize {
362 self.size
363 }
364
365 pub fn page_count(&self) -> usize {
367 (self.size + PAGE_SIZE - 1) / PAGE_SIZE
368 }
369}
370
371impl Drop for AllocatedPages {
372 fn drop(&mut self) {
374 let phys_addr = virt_to_phys(self.start_vaddr).as_u64();
377 let _ = phys_addr;
380 let _ = self.size;
381 }
383}
384
385#[derive(Debug, Clone, Copy)]
387pub struct MapFlags {
388 pub present: bool,
390 pub writable: bool,
392 pub user: bool,
394 pub write_through: bool,
396 pub cache_disabled: bool,
398 pub no_execute: bool,
400}
401
402impl MapFlags {
403 pub const fn read_only() -> Self {
405 Self {
406 present: true,
407 writable: false,
408 user: false,
409 write_through: false,
410 cache_disabled: false,
411 no_execute: false,
412 }
413 }
414
415 pub const fn read_write() -> Self {
417 Self {
418 present: true,
419 writable: true,
420 user: false,
421 write_through: false,
422 cache_disabled: false,
423 no_execute: false,
424 }
425 }
426
427 pub const fn user_read_write() -> Self {
429 Self {
430 present: true,
431 writable: true,
432 user: true,
433 write_through: false,
434 cache_disabled: false,
435 no_execute: false,
436 }
437 }
438
439 pub const fn mmio() -> Self {
441 Self {
442 present: true,
443 writable: true,
444 user: false,
445 write_through: false,
446 cache_disabled: true,
447 no_execute: true,
448 }
449 }
450}
451
452#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
453pub enum MapError {
454 #[error("address out of bounds")]
455 OutOfBounds,
456 #[error("not owner of mapping")]
457 NotOwner,
458 #[error("address already mapped")]
459 AlreadyMapped,
460 #[error("invalid address")]
461 InvalidAddress,
462 #[error("out of memory")]
463 OutOfMemory,
464 #[error("architecture error: {0}")]
465 ArchError(&'static str),
466}
467
468pub struct Vmar {
473 base: VirtAddr,
475 size: usize,
477 children: spin::Mutex<alloc::vec::Vec<VmarChild>>,
479}
480
481struct VmarChild {
482 offset: usize,
484 #[allow(dead_code)]
486 size: usize,
487 #[allow(dead_code)]
489 mapping: Option<MappedPages>,
490}
491
492impl Vmar {
493 pub fn new(base: VirtAddr, size: usize) -> Self {
495 Self {
496 base,
497 size,
498 children: spin::Mutex::new(alloc::vec![]),
499 }
500 }
501
502 pub fn base(&self) -> VirtAddr {
504 self.base
505 }
506
507 pub fn size(&self) -> usize {
509 self.size
510 }
511
512 pub fn alloc(&self, offset: usize, size: usize, flags: MapFlags) -> Result<VirtAddr, MapError> {
514 let vaddr = self.base.add(offset as u64);
516
517 let _ = flags; let mut children = self.children.lock();
521 children.push(VmarChild {
522 offset,
523 size,
524 mapping: None,
525 });
526
527 Ok(vaddr)
528 }
529
530 pub fn dealloc(&self, offset: usize) -> Result<(), MapError> {
532 let mut children = self.children.lock();
533 if let Some(pos) = children.iter().position(|c| c.offset == offset) {
534 children.remove(pos);
535 Ok(())
536 } else {
537 Err(MapError::InvalidAddress)
538 }
539 }
540}
541
542impl core::fmt::Debug for Vmar {
543 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
545 f.debug_struct("Vmar")
546 .field("base", &self.base)
547 .field("size", &self.size)
548 .finish()
549 }
550}
551
552pub fn tlb_flush_virt_addr(vaddr: VirtAddr) {
556 unsafe {
559 core::arch::asm!(
560 "invlpg [{}]",
561 in(reg) vaddr.as_u64(),
562 options(nostack, preserves_flags)
563 );
564 }
565}
566
567pub fn tlb_flush_all() {
571 unsafe {
574 let cr3: u64;
575 core::arch::asm!(
576 "mov {}, cr3",
577 out(reg) cr3,
578 options(nostack, preserves_flags)
579 );
580 core::arch::asm!(
581 "mov cr3, {}",
582 in(reg) cr3,
583 options(nostack)
584 );
585 }
586}