Skip to main content

strat9_kernel/sync/
spinlock.rs

1// Spinlock implementation for kernel synchronization
2
3use super::IrqDisabledToken;
4use core::{
5    cell::UnsafeCell,
6    ops::{Deref, DerefMut},
7    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
8};
9
10static DEBUG_WATCH_LOCK_ADDR: AtomicUsize = AtomicUsize::new(usize::MAX);
11
12/// A simple spinlock
13pub struct SpinLock<T> {
14    locked: AtomicBool,
15    owner_cpu: AtomicUsize,
16    data: UnsafeCell<T>,
17}
18
19// SAFETY: SpinLock can be safely shared between threads
20// because it uses atomic operations for synchronization
21unsafe impl<T: Send> Sync for SpinLock<T> {}
22unsafe impl<T: Send> Send for SpinLock<T> {}
23
24impl<T> SpinLock<T> {
25    /// Create a new unlocked spinlock
26    pub const fn new(data: T) -> Self {
27        SpinLock {
28            locked: AtomicBool::new(false),
29            owner_cpu: AtomicUsize::new(usize::MAX),
30            data: UnsafeCell::new(data),
31        }
32    }
33
34    /// Acquire the lock, spinning until it's available
35    pub fn lock(&self) -> SpinLockGuard<'_, T> {
36        let saved_flags = crate::arch::x86_64::save_flags_and_cli();
37        // SAFETY: `save_flags_and_cli()` has just disabled interrupts on this CPU.
38        let irq_token = unsafe { IrqDisabledToken::new_unchecked() };
39        let mut spins: usize = 0;
40        let this_cpu = crate::arch::x86_64::percpu::current_cpu_index();
41        // Spin until we can set locked from false to true
42        while self
43            .locked
44            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
45            .is_err()
46        {
47            spins = spins.saturating_add(1);
48            if spins == 5_000_000 {
49                let owner = self.owner_cpu.load(Ordering::Relaxed);
50                crate::serial_println!(
51                    "[trace][spin] long-wait lock={:#x} cpu={} owner_cpu={}",
52                    self as *const _ as usize,
53                    this_cpu,
54                    owner
55                );
56                spins = 0;
57            }
58            // Hint to CPU that we're spinning
59            core::hint::spin_loop();
60        }
61        self.owner_cpu.store(this_cpu, Ordering::Relaxed);
62
63        SpinLockGuard {
64            lock: self,
65            saved_flags,
66            restore_flags_on_drop: true,
67            irq_token,
68        }
69    }
70
71    /// Try to acquire the lock without spinning.
72    pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T>> {
73        let saved_flags = crate::arch::x86_64::save_flags_and_cli();
74        // SAFETY: `save_flags_and_cli()` has just disabled interrupts on this CPU.
75        let irq_token = unsafe { IrqDisabledToken::new_unchecked() };
76        if self
77            .locked
78            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
79            .is_ok()
80        {
81            self.owner_cpu
82                .store(crate::arch::x86_64::percpu::current_cpu_index(), Ordering::Relaxed);
83            Some(SpinLockGuard {
84                lock: self,
85                saved_flags,
86                restore_flags_on_drop: true,
87                irq_token,
88            })
89        } else {
90            crate::arch::x86_64::restore_flags(saved_flags);
91            None
92        }
93    }
94
95    /// Try to acquire the lock without touching interrupt flags.
96    ///
97    /// Returns `None` if IRQs are currently enabled (i.e. no `IrqDisabledToken`
98    /// can be constructed) or if the lock is already held.
99    /// Caller must ensure IRQs remain disabled for the lifetime of the guard.
100    pub fn try_lock_no_irqsave(&self) -> Option<SpinLockGuard<'_, T>> {
101        let irq_token = IrqDisabledToken::verify()?;
102        if self
103            .locked
104            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
105            .is_ok()
106        {
107            self.owner_cpu
108                .store(crate::arch::x86_64::percpu::current_cpu_index(), Ordering::Relaxed);
109            Some(SpinLockGuard {
110                lock: self,
111                saved_flags: 0,
112                restore_flags_on_drop: false,
113                irq_token,
114            })
115        } else {
116            None
117        }
118    }
119
120    /// Returns the owner CPU index for deadlock tracing (`usize::MAX` if unlocked).
121    pub fn owner_cpu(&self) -> usize {
122        self.owner_cpu.load(Ordering::Relaxed)
123    }
124}
125
126/// Set a lock address to trace during `SpinLockGuard::drop`.
127pub fn debug_set_watch_lock_addr(addr: usize) {
128    DEBUG_WATCH_LOCK_ADDR.store(addr, Ordering::Relaxed);
129}
130
131/// Clear the watched lock address.
132pub fn debug_clear_watch_lock_addr() {
133    DEBUG_WATCH_LOCK_ADDR.store(usize::MAX, Ordering::Relaxed);
134}
135
136/// RAII guard for SpinLock
137pub struct SpinLockGuard<'a, T> {
138    lock: &'a SpinLock<T>,
139    saved_flags: u64,
140    restore_flags_on_drop: bool,
141    irq_token: IrqDisabledToken,
142}
143
144impl<'a, T> SpinLockGuard<'a, T> {
145    /// Retourne la preuve typée que les interruptions sont désactivées.
146    #[inline]
147    pub fn token(&self) -> &IrqDisabledToken {
148        &self.irq_token
149    }
150
151    #[inline]
152    pub(crate) fn with_mut_and_token<R>(
153        &mut self,
154        f: impl FnOnce(&mut T, &IrqDisabledToken) -> R,
155    ) -> R {
156        let token = &self.irq_token;
157        // SAFETY: ce guard possède le verrou et protège l'accès exclusif à `data`.
158        let data = unsafe { &mut *self.lock.data.get() };
159        f(data, token)
160    }
161}
162
163impl<'a, T> Deref for SpinLockGuard<'a, T> {
164    type Target = T;
165
166    /// Performs the deref operation.
167    fn deref(&self) -> &T {
168        // SAFETY: We hold the lock, so exclusive access is guaranteed
169        unsafe { &*self.lock.data.get() }
170    }
171}
172
173impl<'a, T> DerefMut for SpinLockGuard<'a, T> {
174    /// Performs the deref mut operation.
175    fn deref_mut(&mut self) -> &mut T {
176        // SAFETY: We hold the lock, so exclusive access is guaranteed
177        unsafe { &mut *self.lock.data.get() }
178    }
179}
180
181impl<'a, T> Drop for SpinLockGuard<'a, T> {
182    /// Performs the drop operation.
183    fn drop(&mut self) {
184        let lock_addr = self.lock as *const _ as usize;
185        let watched = DEBUG_WATCH_LOCK_ADDR.load(Ordering::Relaxed);
186        let trace = watched == lock_addr;
187        if trace {
188            crate::serial_force_println!(
189                "[trace][spin] drop begin lock={:#x} owner_cpu={} saved_flags={:#x}",
190                lock_addr,
191                self.lock.owner_cpu.load(Ordering::Relaxed),
192                self.saved_flags
193            );
194        }
195        // Release the lock
196        self.lock.owner_cpu.store(usize::MAX, Ordering::Relaxed);
197        self.lock.locked.store(false, Ordering::Release);
198        if trace {
199            crate::serial_force_println!("[trace][spin] drop unlocked lock={:#x}", lock_addr);
200            crate::serial_force_println!(
201                "[trace][spin] drop restore_flags begin lock={:#x}",
202                lock_addr
203            );
204        }
205        if self.restore_flags_on_drop {
206            crate::arch::x86_64::restore_flags(self.saved_flags);
207            if trace {
208                crate::serial_force_println!(
209                    "[trace][spin] drop restore_flags done lock={:#x}",
210                    lock_addr
211                );
212            }
213        } else if trace {
214            crate::serial_force_println!(
215                "[trace][spin] drop restore_flags skipped lock={:#x}",
216                lock_addr
217            );
218        }
219    }
220}