1use super::{
29 guardian::{Guardian, GuardianState, IrqDisabled},
30 IrqDisabledToken,
31};
32use core::{
33 cell::UnsafeCell,
34 marker::PhantomData,
35 mem::ManuallyDrop,
36 ops::{Deref, DerefMut},
37 sync::atomic::{AtomicBool, AtomicUsize, Ordering},
38};
39
40static DEBUG_WATCH_LOCK_ADDR: AtomicUsize = AtomicUsize::new(usize::MAX);
41
42pub struct SpinLock<T: ?Sized, G: Guardian = IrqDisabled> {
52 locked: AtomicBool,
53 owner_cpu: AtomicUsize,
56 _guardian: PhantomData<G>,
57 data: UnsafeCell<T>,
60}
61
62unsafe impl<T: ?Sized + Send, G: Guardian> Sync for SpinLock<T, G> {}
64unsafe impl<T: ?Sized + Send, G: Guardian> Send for SpinLock<T, G> {}
65
66impl<T, G: Guardian> SpinLock<T, G> {
67 pub const fn new(data: T) -> Self {
71 SpinLock {
72 locked: AtomicBool::new(false),
73 owner_cpu: AtomicUsize::new(usize::MAX),
74 _guardian: PhantomData,
75 data: UnsafeCell::new(data),
76 }
77 }
78}
79
80impl<T: ?Sized, G: Guardian> SpinLock<T, G> {
81 pub fn lock(&self) -> SpinLockGuard<'_, T, G> {
88 let state = G::enter();
89 let mut spins: usize = 0;
90 let this_cpu = crate::arch::x86_64::percpu::current_cpu_index();
91
92 while self
93 .locked
94 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
95 .is_err()
96 {
97 spins = spins.saturating_add(1);
98 if spins == 5_000_000 {
99 let owner = self.owner_cpu.load(Ordering::Relaxed);
100 crate::serial_println!(
101 "[trace][spin] long-wait lock={:#x} cpu={} owner_cpu={}",
102 self as *const _ as *const () as usize,
103 this_cpu,
104 owner,
105 );
106 spins = 0;
107 }
108 core::hint::spin_loop();
109 }
110 self.owner_cpu.store(this_cpu, Ordering::Relaxed);
111 let self_addr = self as *const _ as *const () as usize;
113 let trace_addr = DEBUG_TRACE_LOCK_ADDR.load(Ordering::Relaxed);
114 if trace_addr != usize::MAX && trace_addr == self_addr {
115 unsafe { core::arch::asm!("mov al, 'A'; out 0xe9, al", out("al") _) };
116 }
117 let buddy_addr = DEBUG_TRACE_BUDDY_ADDR.load(Ordering::Relaxed);
118 if buddy_addr != usize::MAX && buddy_addr == self_addr {
119 unsafe { core::arch::asm!("mov al, 'B'; out 0xe9, al", out("al") _) };
120 }
121 let slab_addr = DEBUG_TRACE_SLAB_ADDR.load(Ordering::Relaxed);
122 if slab_addr != usize::MAX && slab_addr == self_addr {
123 unsafe { core::arch::asm!("mov al, 'S'; out 0xe9, al", out("al") _) };
124 }
125
126 SpinLockGuard {
127 lock: self,
128 state: ManuallyDrop::new(state),
129 }
130 }
131
132 pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T, G>> {
134 let state = G::enter();
135 if self
136 .locked
137 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
138 .is_ok()
139 {
140 let this_cpu = crate::arch::x86_64::percpu::current_cpu_index();
141 self.owner_cpu.store(this_cpu, Ordering::Relaxed);
142 let trace_addr = DEBUG_TRACE_LOCK_ADDR.load(Ordering::Relaxed);
143 let self_addr = self as *const _ as *const () as usize;
144 if trace_addr != usize::MAX && trace_addr == self_addr {
145 unsafe { core::arch::asm!("mov al, 'A'; out 0xe9, al", out("al") _) };
146 }
147 let buddy_addr = DEBUG_TRACE_BUDDY_ADDR.load(Ordering::Relaxed);
148 if buddy_addr != usize::MAX && buddy_addr == self_addr {
149 unsafe { core::arch::asm!("mov al, 'B'; out 0xe9, al", out("al") _) };
150 }
151 let slab_addr = DEBUG_TRACE_SLAB_ADDR.load(Ordering::Relaxed);
152 if slab_addr != usize::MAX && slab_addr == self_addr {
153 unsafe { core::arch::asm!("mov al, 'S'; out 0xe9, al", out("al") _) };
154 }
155 Some(SpinLockGuard {
156 lock: self,
157 state: ManuallyDrop::new(state),
158 })
159 } else {
160 G::exit(state);
161 None
162 }
163 }
164
165 pub fn owner_cpu(&self) -> usize {
167 self.owner_cpu.load(Ordering::Relaxed)
168 }
169
170 pub fn get_mut(&mut self) -> &mut T {
176 self.data.get_mut()
177 }
178}
179
180impl<T: ?Sized> SpinLock<T, IrqDisabled> {
183 pub fn try_lock_no_irqsave(&self) -> Option<SpinLockGuard<'_, T, IrqDisabled>> {
189 let token = match IrqDisabledToken::verify() {
190 Some(token) => token,
191 None => {
192 unsafe { core::arch::asm!("mov al, 'V'; out 0xe9, al", out("al") _) };
196 return None;
197 }
198 };
199 if self
200 .locked
201 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
202 .is_ok()
203 {
204 let this_cpu = crate::arch::x86_64::percpu::current_cpu_index();
205 self.owner_cpu.store(this_cpu, Ordering::Relaxed);
206 let self_addr = self as *const _ as *const () as usize;
207 let trace_addr = DEBUG_TRACE_LOCK_ADDR.load(Ordering::Relaxed);
208 if trace_addr != usize::MAX && trace_addr == self_addr {
209 unsafe { core::arch::asm!("mov al, 'A'; out 0xe9, al", out("al") _) };
210 }
211 let buddy_addr = DEBUG_TRACE_BUDDY_ADDR.load(Ordering::Relaxed);
212 if buddy_addr != usize::MAX && buddy_addr == self_addr {
213 unsafe { core::arch::asm!("mov al, 'B'; out 0xe9, al", out("al") _) };
214 }
215 let slab_addr = DEBUG_TRACE_SLAB_ADDR.load(Ordering::Relaxed);
216 if slab_addr != usize::MAX && slab_addr == self_addr {
217 unsafe { core::arch::asm!("mov al, 'S'; out 0xe9, al", out("al") _) };
218 }
219 Some(SpinLockGuard {
220 lock: self,
221 state: ManuallyDrop::new(GuardianState {
222 token,
223 saved_flags: 0,
224 restore_flags: false,
225 }),
226 })
227 } else {
228 None
229 }
230 }
231}
232
233pub fn debug_set_watch_lock_addr(addr: usize) {
237 DEBUG_WATCH_LOCK_ADDR.store(addr, Ordering::Relaxed);
238}
239
240static DEBUG_TRACE_LOCK_ADDR: AtomicUsize = AtomicUsize::new(usize::MAX);
243static DEBUG_TRACE_BUDDY_ADDR: AtomicUsize = AtomicUsize::new(usize::MAX);
245static DEBUG_TRACE_SLAB_ADDR: AtomicUsize = AtomicUsize::new(usize::MAX);
247
248pub fn debug_set_trace_lock_addr(addr: usize) {
250 DEBUG_TRACE_LOCK_ADDR.store(addr, Ordering::Relaxed);
251}
252
253pub fn debug_set_trace_buddy_addr(addr: usize) {
255 DEBUG_TRACE_BUDDY_ADDR.store(addr, Ordering::Relaxed);
256}
257
258pub fn debug_set_trace_slab_addr(addr: usize) {
260 DEBUG_TRACE_SLAB_ADDR.store(addr, Ordering::Relaxed);
261}
262
263pub fn debug_clear_watch_lock_addr() {
265 DEBUG_WATCH_LOCK_ADDR.store(usize::MAX, Ordering::Relaxed);
266}
267
268pub struct SpinLockGuard<'a, T: ?Sized, G: Guardian = IrqDisabled> {
272 lock: &'a SpinLock<T, G>,
273 state: ManuallyDrop<GuardianState<G::Token>>,
277}
278
279impl<'a, T: ?Sized> SpinLockGuard<'a, T, IrqDisabled> {
280 #[inline]
282 pub fn token(&self) -> &IrqDisabledToken {
283 &self.state.token
284 }
285
286 #[inline]
287 pub(crate) fn with_mut_and_token<R>(
288 &mut self,
289 f: impl FnOnce(&mut T, &IrqDisabledToken) -> R,
290 ) -> R {
291 let token = &self.state.token;
292 let data = unsafe { &mut *self.lock.data.get() };
294 f(data, token)
295 }
296}
297
298impl<'a, T: ?Sized, G: Guardian> Deref for SpinLockGuard<'a, T, G> {
299 type Target = T;
300
301 fn deref(&self) -> &T {
302 unsafe { &*self.lock.data.get() }
304 }
305}
306
307impl<'a, T: ?Sized, G: Guardian> DerefMut for SpinLockGuard<'a, T, G> {
308 fn deref_mut(&mut self) -> &mut T {
309 unsafe { &mut *self.lock.data.get() }
311 }
312}
313
314impl<'a, T: ?Sized, G: Guardian> Drop for SpinLockGuard<'a, T, G> {
315 fn drop(&mut self) {
316 let lock_addr = self.lock as *const _ as *const () as usize;
317 let watched = DEBUG_WATCH_LOCK_ADDR.load(Ordering::Relaxed);
318 let trace = watched == lock_addr;
319
320 if trace {
321 crate::serial_force_println!(
322 "[trace][spin] drop begin lock={:#x} owner_cpu={} saved_flags={:#x}",
323 lock_addr,
324 self.lock.owner_cpu.load(Ordering::Relaxed),
325 self.state.saved_flags,
326 );
327 }
328
329 self.lock.owner_cpu.store(usize::MAX, Ordering::Relaxed);
330 self.lock.locked.store(false, Ordering::Release);
331
332 if trace {
333 crate::serial_force_println!("[trace][spin] drop unlocked lock={:#x}", lock_addr);
334 }
335 let trace_addr = DEBUG_TRACE_LOCK_ADDR.load(Ordering::Relaxed);
337 if trace_addr != usize::MAX && trace_addr == lock_addr {
338 let cpu = crate::arch::x86_64::percpu::current_cpu_index();
339 unsafe { core::arch::asm!("mov al, 'a'; out 0xe9, al", out("al") _) };
340 }
341 let buddy_addr = DEBUG_TRACE_BUDDY_ADDR.load(Ordering::Relaxed);
342 if buddy_addr != usize::MAX && buddy_addr == lock_addr {
343 let cpu = crate::arch::x86_64::percpu::current_cpu_index();
344 unsafe { core::arch::asm!("mov al, 'b'; out 0xe9, al", out("al") _) };
345 }
346
347 let state = unsafe { ManuallyDrop::take(&mut self.state) };
352 G::exit(state);
353
354 if trace {
355 crate::serial_force_println!(
356 "[trace][spin] drop guardian-exit done lock={:#x}",
357 lock_addr
358 );
359 }
360 }
361}
362
363impl<T: ?Sized, G: Guardian> !Send for SpinLockGuard<'_, T, G> {}