1use core::{ptr, slice};
4use x86_64::PhysAddr;
5
6#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8#[repr(u8)]
9pub enum ZoneType {
10 DMA = 0,
12 Normal = 1,
14 HighMem = 2,
16}
17
18impl ZoneType {
19 pub const COUNT: usize = 3;
21}
22
23pub const MAX_ORDER: usize = 11;
25
26pub const PAGEBLOCK_ORDER: usize = 9;
32
33pub const PAGEBLOCK_PAGES: usize = 1 << PAGEBLOCK_ORDER;
35
36#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43#[repr(u8)]
44pub enum Migratetype {
45 Unmovable = 0,
47 Movable = 1,
49}
50
51impl Migratetype {
52 pub const COUNT: usize = 2;
54
55 pub const ALL: [Self; Self::COUNT] = [Self::Unmovable, Self::Movable];
57
58 #[inline]
60 pub const fn index(self) -> usize {
61 self as usize
62 }
63
64 #[inline]
66 pub const fn fallback_order(self) -> [Self; Self::COUNT] {
67 match self {
68 Self::Unmovable => [Self::Unmovable, Self::Movable],
69 Self::Movable => [Self::Movable, Self::Unmovable],
70 }
71 }
72}
73
74#[derive(Debug, Clone, Copy)]
79pub struct BuddyBitmap {
80 pub data: *mut u8,
81 pub num_bits: usize,
82}
83
84impl BuddyBitmap {
85 pub const fn empty() -> Self {
87 Self {
88 data: core::ptr::null_mut(),
89 num_bits: 0,
90 }
91 }
92
93 #[inline]
95 pub fn is_empty(&self) -> bool {
96 self.data.is_null() || self.num_bits == 0
97 }
98
99 #[inline]
101 pub fn toggle(&self, idx: usize) -> bool {
102 debug_assert!(idx < self.num_bits);
103 let byte_idx = idx >> 3;
104 let mask = 1u8 << (idx & 7);
105 unsafe {
106 let byte = self.data.add(byte_idx);
107 let new_val = *byte ^ mask;
108 *byte = new_val;
109 (new_val & mask) != 0
110 }
111 }
112
113 #[inline]
115 pub fn test(&self, idx: usize) -> bool {
116 if idx >= self.num_bits || self.is_empty() {
117 return false;
118 }
119 let byte_idx = idx >> 3;
120 let mask = 1u8 << (idx & 7);
121 unsafe { (*self.data.add(byte_idx) & mask) != 0 }
122 }
123
124 #[inline]
126 pub fn set(&self, idx: usize) {
127 debug_assert!(idx < self.num_bits);
128 let byte_idx = idx >> 3;
129 let mask = 1u8 << (idx & 7);
130 unsafe {
131 *self.data.add(byte_idx) |= mask;
132 }
133 }
134
135 #[inline]
137 pub fn clear(&self, idx: usize) {
138 debug_assert!(idx < self.num_bits);
139 let byte_idx = idx >> 3;
140 let mask = 1u8 << (idx & 7);
141 unsafe {
142 *self.data.add(byte_idx) &= !mask;
143 }
144 }
145}
146
147#[derive(Clone, Copy)]
149pub struct ZoneSegment {
150 pub base: PhysAddr,
152
153 pub page_count: usize,
155
156 pub free_lists: [[u64; MAX_ORDER + 1]; Migratetype::COUNT],
158
159 pub buddy_bitmaps: [BuddyBitmap; MAX_ORDER + 1],
161
162 pub pageblock_tags: *mut u8,
164
165 pub pageblock_count: usize,
167
168 #[cfg(debug_assertions)]
170 pub alloc_bitmap: BuddyBitmap,
171}
172
173impl ZoneSegment {
174 pub const fn empty() -> Self {
176 Self {
177 base: PhysAddr::new(0),
178 page_count: 0,
179 free_lists: [[0; MAX_ORDER + 1]; Migratetype::COUNT],
180 buddy_bitmaps: [BuddyBitmap::empty(); MAX_ORDER + 1],
181 pageblock_tags: core::ptr::null_mut(),
182 pageblock_count: 0,
183 #[cfg(debug_assertions)]
184 alloc_bitmap: BuddyBitmap::empty(),
185 }
186 }
187
188 #[inline]
190 pub fn is_populated(&self) -> bool {
191 self.page_count != 0
192 }
193
194 #[inline]
196 pub fn contains_address(&self, addr: PhysAddr) -> bool {
197 if !self.is_populated() {
198 return false;
199 }
200 let start = self.base.as_u64();
201 let end = start + (self.page_count as u64 * 4096);
202 let value = addr.as_u64();
203 value >= start && value < end
204 }
205
206 #[inline]
208 pub fn end_address(&self) -> u64 {
209 self.base.as_u64() + (self.page_count as u64 * 4096)
210 }
211
212 pub fn free_list_count(&self, order: u8) -> usize {
214 Migratetype::ALL
215 .into_iter()
216 .map(|migratetype| self.free_list_count_for(order, migratetype))
217 .sum()
218 }
219
220 pub fn free_list_count_for(&self, order: u8, migratetype: Migratetype) -> usize {
222 let mut count = 0usize;
223 let mut phys = self.free_lists[migratetype.index()][order as usize];
224 while phys != 0 {
225 count += 1;
226 let meta = crate::memory::frame::get_meta(PhysAddr::new(phys));
227 phys = if meta.next() == crate::memory::frame::FRAME_META_LINK_NONE {
228 0
229 } else {
230 meta.next()
231 };
232 }
233 count
234 }
235}
236
237pub struct Zone {
239 pub zone_type: ZoneType,
241
242 pub base: PhysAddr,
244
245 pub page_count: usize,
247
248 pub present_pages: usize,
250
251 pub span_pages: usize,
255
256 pub allocated: usize,
258
259 pub reserved_pages: usize,
261
262 pub lowmem_reserve_pages: usize,
264
265 pub watermark_min: usize,
267
268 pub watermark_low: usize,
270
271 pub watermark_high: usize,
273
274 pub segment_count: usize,
276
277 pub segment_capacity: usize,
279
280 pub segments: *mut ZoneSegment,
282}
283
284impl Zone {
285 pub const fn new(zone_type: ZoneType) -> Self {
287 Zone {
288 zone_type,
289 base: PhysAddr::new(0),
290 page_count: 0,
291 present_pages: 0,
292 span_pages: 0,
293 allocated: 0,
294 reserved_pages: 0,
295 lowmem_reserve_pages: 0,
296 watermark_min: 0,
297 watermark_low: 0,
298 watermark_high: 0,
299 segment_count: 0,
300 segment_capacity: 0,
301 segments: ptr::null_mut(),
302 }
303 }
304
305 #[inline]
307 pub fn segments(&self) -> &[ZoneSegment] {
308 if self.segment_capacity == 0 || self.segments.is_null() {
309 &[]
310 } else {
311 unsafe { slice::from_raw_parts(self.segments, self.segment_capacity) }
312 }
313 }
314
315 #[inline]
317 pub fn segments_mut(&mut self) -> &mut [ZoneSegment] {
318 if self.segment_capacity == 0 || self.segments.is_null() {
319 &mut []
320 } else {
321 unsafe { slice::from_raw_parts_mut(self.segments, self.segment_capacity) }
322 }
323 }
324
325 #[inline]
327 pub fn clear_segments(&mut self) {
328 self.segment_count = 0;
329 self.segment_capacity = 0;
330 self.segments = ptr::null_mut();
331 }
332
333 pub fn contains_address(&self, addr: PhysAddr) -> bool {
335 self.segments()
336 .iter()
337 .take(self.segment_count)
338 .any(|segment| segment.contains_address(addr))
339 }
340
341 pub fn available_pages(&self) -> usize {
343 self.page_count.saturating_sub(self.allocated)
344 }
345
346 pub fn free_list_count(&self, order: u8) -> usize {
351 self.segments()
352 .iter()
353 .take(self.segment_count)
354 .map(|segment| segment.free_list_count(order))
355 .sum()
356 }
357
358 pub fn free_list_count_for(&self, order: u8, migratetype: Migratetype) -> usize {
360 self.segments()
361 .iter()
362 .take(self.segment_count)
363 .map(|segment| segment.free_list_count_for(order, migratetype))
364 .sum()
365 }
366
367 pub fn free_pages_by_migratetype(&self) -> [usize; Migratetype::COUNT] {
369 let mut totals = [0usize; Migratetype::COUNT];
370 for migratetype in Migratetype::ALL {
371 let idx = migratetype.index();
372 for order in 0..=MAX_ORDER {
373 let blocks = self.free_list_count_for(order as u8, migratetype);
374 totals[idx] = totals[idx].saturating_add(blocks << order);
375 }
376 }
377 totals
378 }
379
380 pub fn free_pages_at_or_above_order(&self, order: u8) -> usize {
386 let mut pages = 0usize;
387 for current_order in order as usize..=MAX_ORDER {
388 pages =
389 pages.saturating_add(self.free_list_count(current_order as u8) << current_order);
390 }
391 pages
392 }
393
394 pub fn fragmentation_score(&self, order: u8, cached_order0_pages: usize) -> usize {
401 if order == 0 {
402 return 0;
403 }
404
405 let total_free = self.available_pages().saturating_add(cached_order0_pages);
406 if total_free == 0 {
407 return 0;
408 }
409
410 let usable = self.free_pages_at_or_above_order(order);
411 let fragmented = total_free.saturating_sub(usable);
412 fragmented.saturating_mul(100) / total_free
413 }
414
415 pub fn largest_free_order(&self) -> Option<u8> {
417 for order in (0..=MAX_ORDER).rev() {
418 if self.free_list_count(order as u8) > 0 {
419 return Some(order as u8);
420 }
421 }
422 None
423 }
424}
425
426unsafe impl Send for BuddyBitmap {}
428unsafe impl Send for Zone {}