Skip to main content

alloc_freelist/
lib.rs

1#![no_std]
2
3#[macro_export]
4macro_rules! define_freelist_allocator {
5    ($vis:vis struct $name:ident; heap_size = $heap_size:expr;) => {
6        $vis struct $name;
7
8        #[repr(C)]
9        struct __FreeNode {
10            size: usize,
11            next: *mut __FreeNode,
12        }
13
14        static __ALLOC_LOCK: core::sync::atomic::AtomicUsize =
15            core::sync::atomic::AtomicUsize::new(0);
16        static __FREE_LIST_HEAD: core::sync::atomic::AtomicUsize =
17            core::sync::atomic::AtomicUsize::new(0);
18        static __HEAP_OFFSET: core::sync::atomic::AtomicUsize =
19            core::sync::atomic::AtomicUsize::new(0);
20        static mut __HEAP: [u8; $heap_size] = [0u8; $heap_size];
21
22        const __MIN_ALIGN: usize = core::mem::align_of::<usize>();
23        const __MIN_BLOCK_SIZE: usize = core::mem::size_of::<__FreeNode>();
24
25        #[inline]
26        /// Implements align up.
27        fn __align_up(v: usize, align: usize) -> usize {
28            (v + align - 1) & !(align - 1)
29        }
30
31        #[inline]
32        /// Implements lock alloc.
33        fn __lock_alloc() {
34            while __ALLOC_LOCK
35                .compare_exchange(
36                    0,
37                    1,
38                    core::sync::atomic::Ordering::Acquire,
39                    core::sync::atomic::Ordering::Relaxed,
40                )
41                .is_err()
42            {
43                core::hint::spin_loop();
44            }
45        }
46
47        #[inline]
48        /// Implements unlock alloc.
49        fn __unlock_alloc() {
50            __ALLOC_LOCK.store(0, core::sync::atomic::Ordering::Release);
51        }
52
53        #[inline]
54        /// Implements alloc size.
55        fn __alloc_size(layout: core::alloc::Layout) -> usize {
56            __align_up(layout.size().max(__MIN_BLOCK_SIZE), __MIN_BLOCK_SIZE)
57        }
58
59        unsafe impl core::alloc::GlobalAlloc for $name {
60            #[allow(unsafe_op_in_unsafe_fn)]
61            /// Implements alloc.
62            unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
63                let align = layout.align().max(__MIN_ALIGN);
64                let size = __alloc_size(layout);
65
66                __lock_alloc();
67
68                let mut prev: *mut __FreeNode = core::ptr::null_mut();
69                let mut cur = __FREE_LIST_HEAD.load(core::sync::atomic::Ordering::Relaxed)
70                    as *mut __FreeNode;
71                while !cur.is_null() {
72                    let cur_addr = cur as usize;
73                    if cur_addr % align == 0 && (*cur).size >= size {
74                        let cur_size = (*cur).size;
75                        let next = (*cur).next;
76                        let rem = cur_size - size;
77
78                        if rem >= __MIN_BLOCK_SIZE {
79                            let rem_ptr = (cur_addr + size) as *mut __FreeNode;
80                            (*rem_ptr).size = rem;
81                            (*rem_ptr).next = next;
82                            if prev.is_null() {
83                                __FREE_LIST_HEAD
84                                    .store(rem_ptr as usize, core::sync::atomic::Ordering::Relaxed);
85                            } else {
86                                (*prev).next = rem_ptr;
87                            }
88                        } else if prev.is_null() {
89                            __FREE_LIST_HEAD.store(next as usize, core::sync::atomic::Ordering::Relaxed);
90                        } else {
91                            (*prev).next = next;
92                        }
93
94                        __unlock_alloc();
95                        return cur as *mut u8;
96                    }
97                    prev = cur;
98                    cur = (*cur).next;
99                }
100
101                let raw_base = core::ptr::addr_of_mut!(__HEAP) as usize;
102                let base = __align_up(raw_base, __MIN_ALIGN);
103                let usable = ($heap_size as usize).saturating_sub(base - raw_base);
104
105                let mut offset = __HEAP_OFFSET.load(core::sync::atomic::Ordering::Relaxed);
106                loop {
107                    let aligned = __align_up(base + offset, align) - base;
108                    let next = match aligned.checked_add(size) {
109                        Some(v) => v,
110                        None => {
111                            __unlock_alloc();
112                            return core::ptr::null_mut();
113                        }
114                    };
115                    if next > usable {
116                        __unlock_alloc();
117                        return core::ptr::null_mut();
118                    }
119                    match __HEAP_OFFSET.compare_exchange(
120                        offset,
121                        next,
122                        core::sync::atomic::Ordering::SeqCst,
123                        core::sync::atomic::Ordering::Relaxed,
124                    ) {
125                        Ok(_) => {
126                            __unlock_alloc();
127                            return (base + aligned) as *mut u8;
128                        }
129                        Err(prev_off) => offset = prev_off,
130                    }
131                }
132            }
133
134            #[allow(unsafe_op_in_unsafe_fn)]
135            /// Implements dealloc.
136            unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
137                if ptr.is_null() {
138                    return;
139                }
140                let size = __alloc_size(layout);
141
142                __lock_alloc();
143
144                let addr = ptr as usize;
145                let node = ptr as *mut __FreeNode;
146                (*node).size = size;
147                (*node).next = core::ptr::null_mut();
148
149                let mut prev: *mut __FreeNode = core::ptr::null_mut();
150                let mut cur = __FREE_LIST_HEAD.load(core::sync::atomic::Ordering::Relaxed)
151                    as *mut __FreeNode;
152                while !cur.is_null() && (cur as usize) < addr {
153                    prev = cur;
154                    cur = (*cur).next;
155                }
156
157                (*node).next = cur;
158                if prev.is_null() {
159                    __FREE_LIST_HEAD.store(node as usize, core::sync::atomic::Ordering::Relaxed);
160                } else {
161                    (*prev).next = node;
162                }
163
164                if !cur.is_null() {
165                    let node_end = addr + (*node).size;
166                    if node_end == cur as usize {
167                        (*node).size += (*cur).size;
168                        (*node).next = (*cur).next;
169                    }
170                }
171
172                if !prev.is_null() {
173                    let prev_end = (prev as usize) + (*prev).size;
174                    if prev_end == addr {
175                        (*prev).size += (*node).size;
176                        (*prev).next = (*node).next;
177                    }
178                }
179
180                __unlock_alloc();
181            }
182        }
183    };
184}
185
186#[macro_export]
187macro_rules! define_freelist_brk_allocator {
188    ($vis:vis struct $name:ident; brk = $brk:path; heap_max = $heap_max:expr;) => {
189        $vis struct $name;
190
191        #[repr(C)]
192        struct __FreeNode {
193            size: usize,
194            next: *mut __FreeNode,
195        }
196
197        static __ALLOC_LOCK: core::sync::atomic::AtomicUsize =
198            core::sync::atomic::AtomicUsize::new(0);
199        static __FREE_LIST_HEAD: core::sync::atomic::AtomicUsize =
200            core::sync::atomic::AtomicUsize::new(0);
201        static __HEAP_OFFSET: core::sync::atomic::AtomicUsize =
202            core::sync::atomic::AtomicUsize::new(0);
203        static __HEAP_START: core::sync::atomic::AtomicUsize =
204            core::sync::atomic::AtomicUsize::new(0);
205
206        const __MIN_ALIGN: usize = core::mem::align_of::<usize>();
207        const __MIN_BLOCK_SIZE: usize = core::mem::size_of::<__FreeNode>();
208
209        #[inline]
210        /// Implements align up.
211        fn __align_up(v: usize, align: usize) -> usize {
212            (v + align - 1) & !(align - 1)
213        }
214
215        #[inline]
216        /// Implements lock alloc.
217        fn __lock_alloc() {
218            while __ALLOC_LOCK
219                .compare_exchange(
220                    0,
221                    1,
222                    core::sync::atomic::Ordering::Acquire,
223                    core::sync::atomic::Ordering::Relaxed,
224                )
225                .is_err()
226            {
227                core::hint::spin_loop();
228            }
229        }
230
231        #[inline]
232        /// Implements unlock alloc.
233        fn __unlock_alloc() {
234            __ALLOC_LOCK.store(0, core::sync::atomic::Ordering::Release);
235        }
236
237        #[inline]
238        /// Implements alloc size.
239        fn __alloc_size(layout: core::alloc::Layout) -> usize {
240            __align_up(layout.size().max(__MIN_BLOCK_SIZE), __MIN_BLOCK_SIZE)
241        }
242
243        unsafe impl core::alloc::GlobalAlloc for $name {
244            #[allow(unsafe_op_in_unsafe_fn)]
245            /// Implements alloc.
246            unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
247                let align = layout.align().max(__MIN_ALIGN);
248                let size = __alloc_size(layout);
249
250                __lock_alloc();
251
252                let mut prev: *mut __FreeNode = core::ptr::null_mut();
253                let mut cur = __FREE_LIST_HEAD.load(core::sync::atomic::Ordering::Relaxed)
254                    as *mut __FreeNode;
255                while !cur.is_null() {
256                    let cur_addr = cur as usize;
257                    if cur_addr % align == 0 && (*cur).size >= size {
258                        let cur_size = (*cur).size;
259                        let next = (*cur).next;
260                        let rem = cur_size - size;
261
262                        if rem >= __MIN_BLOCK_SIZE {
263                            let rem_ptr = (cur_addr + size) as *mut __FreeNode;
264                            (*rem_ptr).size = rem;
265                            (*rem_ptr).next = next;
266                            if prev.is_null() {
267                                __FREE_LIST_HEAD
268                                    .store(rem_ptr as usize, core::sync::atomic::Ordering::Relaxed);
269                            } else {
270                                (*prev).next = rem_ptr;
271                            }
272                        } else if prev.is_null() {
273                            __FREE_LIST_HEAD.store(next as usize, core::sync::atomic::Ordering::Relaxed);
274                        } else {
275                            (*prev).next = next;
276                        }
277
278                        __unlock_alloc();
279                        return cur as *mut u8;
280                    }
281                    prev = cur;
282                    cur = (*cur).next;
283                }
284
285                let mut start = __HEAP_START.load(core::sync::atomic::Ordering::Relaxed);
286                if start == 0 {
287                    if let Ok(cur) = $brk(0) {
288                        if $brk(cur + ($heap_max as usize)).is_ok() {
289                            __HEAP_START.store(cur, core::sync::atomic::Ordering::SeqCst);
290                            start = cur;
291                        } else {
292                            __unlock_alloc();
293                            return core::ptr::null_mut();
294                        }
295                    } else {
296                        __unlock_alloc();
297                        return core::ptr::null_mut();
298                    }
299                }
300
301                let base = __align_up(start, __MIN_ALIGN);
302                let usable = ($heap_max as usize).saturating_sub(base - start);
303                let mut offset = __HEAP_OFFSET.load(core::sync::atomic::Ordering::Relaxed);
304                loop {
305                    let aligned = __align_up(base + offset, align) - base;
306                    let next = match aligned.checked_add(size) {
307                        Some(v) => v,
308                        None => {
309                            __unlock_alloc();
310                            return core::ptr::null_mut();
311                        }
312                    };
313                    if next > usable {
314                        __unlock_alloc();
315                        return core::ptr::null_mut();
316                    }
317                    match __HEAP_OFFSET.compare_exchange(
318                        offset,
319                        next,
320                        core::sync::atomic::Ordering::SeqCst,
321                        core::sync::atomic::Ordering::Relaxed,
322                    ) {
323                        Ok(_) => {
324                            __unlock_alloc();
325                            return (base + aligned) as *mut u8;
326                        }
327                        Err(prev_off) => offset = prev_off,
328                    }
329                }
330            }
331
332            #[allow(unsafe_op_in_unsafe_fn)]
333            /// Implements dealloc.
334            unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
335                if ptr.is_null() {
336                    return;
337                }
338                let size = __alloc_size(layout);
339
340                __lock_alloc();
341
342                let addr = ptr as usize;
343                let node = ptr as *mut __FreeNode;
344                (*node).size = size;
345                (*node).next = core::ptr::null_mut();
346
347                let mut prev: *mut __FreeNode = core::ptr::null_mut();
348                let mut cur = __FREE_LIST_HEAD.load(core::sync::atomic::Ordering::Relaxed)
349                    as *mut __FreeNode;
350                while !cur.is_null() && (cur as usize) < addr {
351                    prev = cur;
352                    cur = (*cur).next;
353                }
354
355                (*node).next = cur;
356                if prev.is_null() {
357                    __FREE_LIST_HEAD.store(node as usize, core::sync::atomic::Ordering::Relaxed);
358                } else {
359                    (*prev).next = node;
360                }
361
362                if !cur.is_null() {
363                    let node_end = addr + (*node).size;
364                    if node_end == cur as usize {
365                        (*node).size += (*cur).size;
366                        (*node).next = (*cur).next;
367                    }
368                }
369
370                if !prev.is_null() {
371                    let prev_end = (prev as usize) + (*prev).size;
372                    if prev_end == addr {
373                        (*prev).size += (*node).size;
374                        (*prev).next = (*node).next;
375                    }
376                }
377
378                __unlock_alloc();
379            }
380        }
381    };
382}