Skip to main content

strat9_kernel/trace/
mod.rs

1//! Lightweight kernel trace buffers for low-level debugging.
2//!
3//! Design goals:
4//! - no_std friendly
5//! - fixed-size per-CPU ring buffers
6//! - category filtering
7//! - minimal lock contention (`try_lock` drops on contention)
8
9use alloc::vec::Vec;
10use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
11
12use crate::{arch::x86_64::percpu, process::scheduler, sync::SpinLock};
13
14pub mod category {
15    pub const MEM_PF: u64 = 1 << 0;
16    pub const MEM_MAP: u64 = 1 << 1;
17    pub const MEM_UNMAP: u64 = 1 << 2;
18    pub const MEM_COW: u64 = 1 << 3;
19    pub const MEM_COPY: u64 = 1 << 4;
20    pub const MEM_ALL: u64 = MEM_PF | MEM_MAP | MEM_UNMAP | MEM_COW | MEM_COPY;
21}
22
23#[repr(u16)]
24#[derive(Clone, Copy, Debug, PartialEq, Eq)]
25pub enum TraceKind {
26    Unknown = 0,
27    MemPageFault = 1,
28    MemMap = 2,
29    MemUnmap = 3,
30    MemCow = 4,
31    MemCopy = 5,
32}
33
34#[derive(Clone, Copy, Debug)]
35pub struct TraceEvent {
36    pub seq: u64,
37    pub ticks: u64,
38    pub cpu: u16,
39    pub kind: u16,
40    pub category: u64,
41    pub flags: u64,
42    pub task_id: u64,
43    pub pid: u32,
44    pub tid: u32,
45    pub cr3: u64,
46    pub rip: u64,
47    pub vaddr: u64,
48    pub arg0: u64,
49    pub arg1: u64,
50}
51
52impl TraceEvent {
53    pub const EMPTY: Self = Self {
54        seq: 0,
55        ticks: 0,
56        cpu: 0,
57        kind: TraceKind::Unknown as u16,
58        category: 0,
59        flags: 0,
60        task_id: 0,
61        pid: 0,
62        tid: 0,
63        cr3: 0,
64        rip: 0,
65        vaddr: 0,
66        arg0: 0,
67        arg1: 0,
68    };
69}
70
71#[derive(Clone, Copy, Debug)]
72pub struct TraceTaskCtx {
73    pub task_id: u64,
74    pub pid: u32,
75    pub tid: u32,
76    pub cr3: u64,
77}
78
79impl TraceTaskCtx {
80    /// Performs the empty operation.
81    pub const fn empty() -> Self {
82        Self {
83            task_id: 0,
84            pid: 0,
85            tid: 0,
86            cr3: 0,
87        }
88    }
89}
90
91#[derive(Clone, Copy, Debug, Default)]
92pub struct TraceStats {
93    pub dropped: u64,
94    pub stored: u64,
95}
96
97const TRACE_CAPACITY: usize = 512;
98
99struct CpuTraceRing {
100    head: usize,
101    len: usize,
102    stored: u64,
103    events: [TraceEvent; TRACE_CAPACITY],
104}
105
106impl CpuTraceRing {
107    /// Creates a new instance.
108    const fn new() -> Self {
109        Self {
110            head: 0,
111            len: 0,
112            stored: 0,
113            events: [TraceEvent::EMPTY; TRACE_CAPACITY],
114        }
115    }
116
117    /// Performs the push operation.
118    fn push(&mut self, event: TraceEvent) {
119        self.events[self.head] = event;
120        self.head = (self.head + 1) % TRACE_CAPACITY;
121        if self.len < TRACE_CAPACITY {
122            self.len += 1;
123        }
124        self.stored = self.stored.saturating_add(1);
125    }
126
127    /// Performs the clear operation.
128    fn clear(&mut self) {
129        self.head = 0;
130        self.len = 0;
131        self.stored = 0;
132    }
133
134    /// Performs the snapshot operation.
135    fn snapshot(&self, limit: usize) -> Vec<TraceEvent> {
136        let n = self.len.min(limit);
137        let mut out = Vec::with_capacity(n);
138        if n == 0 {
139            return out;
140        }
141
142        let start = (self.head + TRACE_CAPACITY - n) % TRACE_CAPACITY;
143        for i in 0..n {
144            let idx = (start + i) % TRACE_CAPACITY;
145            out.push(self.events[idx]);
146        }
147        out
148    }
149}
150
151static TRACE_SEQ: AtomicU64 = AtomicU64::new(1);
152// Default: keep early boot noise low but always surface PF/COW instantly.
153static TRACE_MASK: AtomicU64 = AtomicU64::new(category::MEM_PF | category::MEM_COW);
154static TRACE_SERIAL_ECHO: AtomicBool = AtomicBool::new(true);
155static TRACE_DROPPED_TOTAL: AtomicU64 = AtomicU64::new(0);
156static TRACE_RINGS: [SpinLock<CpuTraceRing>; percpu::MAX_CPUS] =
157    [const { SpinLock::new(CpuTraceRing::new()) }; percpu::MAX_CPUS];
158
159/// Performs the mask operation.
160#[inline]
161pub fn mask() -> u64 {
162    TRACE_MASK.load(Ordering::Relaxed)
163}
164
165/// Sets mask.
166#[inline]
167pub fn set_mask(new_mask: u64) {
168    TRACE_MASK.store(new_mask, Ordering::Relaxed);
169}
170
171/// Performs the enable operation.
172#[inline]
173pub fn enable(bits: u64) {
174    TRACE_MASK.fetch_or(bits, Ordering::Relaxed);
175}
176
177/// Performs the disable operation.
178#[inline]
179pub fn disable(bits: u64) {
180    TRACE_MASK.fetch_and(!bits, Ordering::Relaxed);
181}
182
183/// Performs the enabled operation.
184#[inline]
185pub fn enabled(category: u64) -> bool {
186    (mask() & category) != 0
187}
188
189/// Sets serial echo.
190#[inline]
191pub fn set_serial_echo(on: bool) {
192    TRACE_SERIAL_ECHO.store(on, Ordering::Relaxed);
193}
194
195/// Performs the serial echo operation.
196#[inline]
197pub fn serial_echo() -> bool {
198    TRACE_SERIAL_ECHO.load(Ordering::Relaxed)
199}
200
201/// Performs the clear all operation.
202pub fn clear_all() {
203    for ring in TRACE_RINGS.iter() {
204        if let Some(mut guard) = ring.try_lock() {
205            guard.clear();
206        }
207    }
208}
209
210/// Performs the stats operation.
211pub fn stats() -> TraceStats {
212    let mut out = TraceStats::default();
213    out.dropped = TRACE_DROPPED_TOTAL.load(Ordering::Relaxed);
214    for ring in TRACE_RINGS.iter() {
215        if let Some(guard) = ring.try_lock() {
216            out.stored = out.stored.saturating_add(guard.stored);
217        }
218    }
219    out
220}
221
222/// Performs the snapshot all operation.
223pub fn snapshot_all(limit_per_cpu: usize) -> Vec<TraceEvent> {
224    let mut out = Vec::new();
225    let limit = limit_per_cpu.max(1);
226
227    for ring in TRACE_RINGS.iter() {
228        if let Some(guard) = ring.try_lock() {
229            let mut events = guard.snapshot(limit);
230            out.append(&mut events);
231        }
232    }
233
234    out.sort_unstable_by_key(|e| e.seq);
235    out
236}
237
238/// Performs the current cpu operation.
239#[inline]
240fn current_cpu() -> usize {
241    percpu::cpu_index_from_gs().unwrap_or(0)
242}
243
244/// Performs the record operation.
245pub fn record(
246    category: u64,
247    kind: TraceKind,
248    flags: u64,
249    ctx: TraceTaskCtx,
250    rip: u64,
251    vaddr: u64,
252    arg0: u64,
253    arg1: u64,
254) {
255    if !enabled(category) {
256        return;
257    }
258
259    let cpu = current_cpu();
260    let seq = TRACE_SEQ.fetch_add(1, Ordering::Relaxed);
261    let event = TraceEvent {
262        seq,
263        ticks: scheduler::ticks(),
264        cpu: cpu as u16,
265        kind: kind as u16,
266        category,
267        flags,
268        task_id: ctx.task_id,
269        pid: ctx.pid,
270        tid: ctx.tid,
271        cr3: ctx.cr3,
272        rip,
273        vaddr,
274        arg0,
275        arg1,
276    };
277
278    if let Some(mut ring) = TRACE_RINGS[cpu].try_lock() {
279        ring.push(event);
280    } else {
281        TRACE_DROPPED_TOTAL.fetch_add(1, Ordering::Relaxed);
282        return;
283    }
284
285    if serial_echo() {
286        crate::serial_println!(
287            "\x1b[90m[trace] seq={}\x1b[0m cpu={} kind={} \x1b[36mpid={}\x1b[0m tid={} \x1b[35mrip={:#x}\x1b[0m \x1b[35mvaddr={:#x}\x1b[0m a0={:#x} a1={:#x} fl={:#x}",
288            event.seq,
289            event.cpu,
290            kind_name(event.kind),
291            event.pid,
292            event.tid,
293            event.rip,
294            event.vaddr,
295            event.arg0,
296            event.arg1,
297            event.flags
298        );
299    }
300}
301
302/// Performs the kind name operation.
303#[inline]
304pub fn kind_name(kind: u16) -> &'static str {
305    match kind {
306        x if x == TraceKind::MemPageFault as u16 => "mem_pf",
307        x if x == TraceKind::MemMap as u16 => "mem_map",
308        x if x == TraceKind::MemUnmap as u16 => "mem_unmap",
309        x if x == TraceKind::MemCow as u16 => "mem_cow",
310        x if x == TraceKind::MemCopy as u16 => "mem_copy",
311        _ => "unknown",
312    }
313}
314
315/// Performs the mask human operation.
316pub fn mask_human(mask: u64) -> &'static str {
317    if mask == 0 {
318        "none"
319    } else if (mask & category::MEM_ALL) == category::MEM_ALL {
320        "mem:*"
321    } else {
322        "custom"
323    }
324}
325
326#[macro_export]
327macro_rules! trace_mem {
328    ($cat:expr, $kind:expr, $flags:expr, $ctx:expr, $rip:expr, $vaddr:expr, $arg0:expr, $arg1:expr) => {
329        $crate::trace::record($cat, $kind, $flags, $ctx, $rip, $vaddr, $arg0, $arg1)
330    };
331}