Skip to main content

strat9_kernel/trace/
mod.rs

1//! Lightweight kernel trace buffers for low-level debugging.
2//!
3//! Design goals:
4//! - no_std friendly
5//! - fixed-size per-CPU ring buffers
6//! - category filtering
7//! - minimal lock contention (`try_lock` drops on contention)
8
9use alloc::vec::Vec;
10use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
11
12use crate::{arch::x86_64::percpu, process::scheduler, sync::SpinLock};
13
14pub mod category {
15    pub const MEM_PF: u64 = 1 << 0;
16    pub const MEM_MAP: u64 = 1 << 1;
17    pub const MEM_UNMAP: u64 = 1 << 2;
18    pub const MEM_COW: u64 = 1 << 3;
19    pub const MEM_COPY: u64 = 1 << 4;
20    pub const MEM_ALL: u64 = MEM_PF | MEM_MAP | MEM_UNMAP | MEM_COW | MEM_COPY;
21}
22
23#[repr(u16)]
24#[derive(Clone, Copy, Debug, PartialEq, Eq)]
25pub enum TraceKind {
26    Unknown = 0,
27    MemPageFault = 1,
28    MemMap = 2,
29    MemUnmap = 3,
30    MemCow = 4,
31    MemCopy = 5,
32}
33
34#[derive(Clone, Copy, Debug)]
35pub struct TraceEvent {
36    pub seq: u64,
37    pub ticks: u64,
38    pub cpu: u16,
39    pub kind: u16,
40    pub category: u64,
41    pub flags: u64,
42    pub task_id: u64,
43    pub pid: u32,
44    pub tid: u32,
45    pub cr3: u64,
46    pub rip: u64,
47    pub vaddr: u64,
48    pub arg0: u64,
49    pub arg1: u64,
50}
51
52impl TraceEvent {
53    pub const EMPTY: Self = Self {
54        seq: 0,
55        ticks: 0,
56        cpu: 0,
57        kind: TraceKind::Unknown as u16,
58        category: 0,
59        flags: 0,
60        task_id: 0,
61        pid: 0,
62        tid: 0,
63        cr3: 0,
64        rip: 0,
65        vaddr: 0,
66        arg0: 0,
67        arg1: 0,
68    };
69}
70
71#[derive(Clone, Copy, Debug)]
72pub struct TraceTaskCtx {
73    pub task_id: u64,
74    pub pid: u32,
75    pub tid: u32,
76    pub cr3: u64,
77}
78
79impl TraceTaskCtx {
80    /// Performs the empty operation.
81    pub const fn empty() -> Self {
82        Self {
83            task_id: 0,
84            pid: 0,
85            tid: 0,
86            cr3: 0,
87        }
88    }
89}
90
91#[derive(Clone, Copy, Debug, Default)]
92pub struct TraceStats {
93    pub dropped: u64,
94    pub stored: u64,
95}
96
97const TRACE_CAPACITY: usize = 512;
98
99struct CpuTraceRing {
100    head: usize,
101    len: usize,
102    stored: u64,
103    events: [TraceEvent; TRACE_CAPACITY],
104}
105
106impl CpuTraceRing {
107    /// Creates a new instance.
108    const fn new() -> Self {
109        Self {
110            head: 0,
111            len: 0,
112            stored: 0,
113            events: [TraceEvent::EMPTY; TRACE_CAPACITY],
114        }
115    }
116
117    /// Performs the push operation.
118    fn push(&mut self, event: TraceEvent) {
119        self.events[self.head] = event;
120        self.head = (self.head + 1) % TRACE_CAPACITY;
121        if self.len < TRACE_CAPACITY {
122            self.len += 1;
123        }
124        self.stored = self.stored.saturating_add(1);
125    }
126
127    /// Performs the clear operation.
128    fn clear(&mut self) {
129        self.head = 0;
130        self.len = 0;
131        self.stored = 0;
132    }
133
134    /// Performs the snapshot operation.
135    fn snapshot(&self, limit: usize) -> Vec<TraceEvent> {
136        let n = self.len.min(limit);
137        let mut out = Vec::with_capacity(n);
138        if n == 0 {
139            return out;
140        }
141
142        let start = (self.head + TRACE_CAPACITY - n) % TRACE_CAPACITY;
143        for i in 0..n {
144            let idx = (start + i) % TRACE_CAPACITY;
145            out.push(self.events[idx]);
146        }
147        out
148    }
149}
150
151static TRACE_SEQ: AtomicU64 = AtomicU64::new(1);
152// Default: keep trace buffers available for explicit debugging, but do not
153// echo demand-paging traffic to serial during boot. Lazy user mappings can
154// generate a large amount of legitimate page faults before userspace settles.
155static TRACE_MASK: AtomicU64 = AtomicU64::new(category::MEM_COW);
156static TRACE_SERIAL_ECHO: AtomicBool = AtomicBool::new(false);
157static TRACE_DROPPED_TOTAL: AtomicU64 = AtomicU64::new(0);
158static TRACE_RINGS: [SpinLock<CpuTraceRing>; percpu::MAX_CPUS] =
159    [const { SpinLock::new(CpuTraceRing::new()) }; percpu::MAX_CPUS];
160
161/// Performs the mask operation.
162#[inline]
163pub fn mask() -> u64 {
164    TRACE_MASK.load(Ordering::Relaxed)
165}
166
167/// Sets mask.
168#[inline]
169pub fn set_mask(new_mask: u64) {
170    TRACE_MASK.store(new_mask, Ordering::Relaxed);
171}
172
173/// Performs the enable operation.
174#[inline]
175pub fn enable(bits: u64) {
176    TRACE_MASK.fetch_or(bits, Ordering::Relaxed);
177}
178
179/// Performs the disable operation.
180#[inline]
181pub fn disable(bits: u64) {
182    TRACE_MASK.fetch_and(!bits, Ordering::Relaxed);
183}
184
185/// Performs the enabled operation.
186#[inline]
187pub fn enabled(category: u64) -> bool {
188    (mask() & category) != 0
189}
190
191/// Sets serial echo.
192#[inline]
193pub fn set_serial_echo(on: bool) {
194    TRACE_SERIAL_ECHO.store(on, Ordering::Relaxed);
195}
196
197/// Performs the serial echo operation.
198#[inline]
199pub fn serial_echo() -> bool {
200    TRACE_SERIAL_ECHO.load(Ordering::Relaxed)
201}
202
203/// Performs the clear all operation.
204pub fn clear_all() {
205    for ring in TRACE_RINGS.iter() {
206        if let Some(mut guard) = ring.try_lock() {
207            guard.clear();
208        }
209    }
210}
211
212/// Performs the stats operation.
213pub fn stats() -> TraceStats {
214    let mut out = TraceStats::default();
215    out.dropped = TRACE_DROPPED_TOTAL.load(Ordering::Relaxed);
216    for ring in TRACE_RINGS.iter() {
217        if let Some(guard) = ring.try_lock() {
218            out.stored = out.stored.saturating_add(guard.stored);
219        }
220    }
221    out
222}
223
224/// Performs the snapshot all operation.
225pub fn snapshot_all(limit_per_cpu: usize) -> Vec<TraceEvent> {
226    let mut out = Vec::new();
227    let limit = limit_per_cpu.max(1);
228
229    for ring in TRACE_RINGS.iter() {
230        if let Some(guard) = ring.try_lock() {
231            let mut events = guard.snapshot(limit);
232            out.append(&mut events);
233        }
234    }
235
236    out.sort_unstable_by_key(|e| e.seq);
237    out
238}
239
240/// Performs the current cpu operation.
241#[inline]
242fn current_cpu() -> usize {
243    percpu::cpu_index_from_gs().unwrap_or(0)
244}
245
246/// Performs the record operation.
247pub fn record(
248    category: u64,
249    kind: TraceKind,
250    flags: u64,
251    ctx: TraceTaskCtx,
252    rip: u64,
253    vaddr: u64,
254    arg0: u64,
255    arg1: u64,
256) {
257    if !enabled(category) {
258        return;
259    }
260
261    let cpu = current_cpu();
262    let seq = TRACE_SEQ.fetch_add(1, Ordering::Relaxed);
263    let event = TraceEvent {
264        seq,
265        ticks: scheduler::ticks(),
266        cpu: cpu as u16,
267        kind: kind as u16,
268        category,
269        flags,
270        task_id: ctx.task_id,
271        pid: ctx.pid,
272        tid: ctx.tid,
273        cr3: ctx.cr3,
274        rip,
275        vaddr,
276        arg0,
277        arg1,
278    };
279
280    if let Some(mut ring) = TRACE_RINGS[cpu].try_lock() {
281        ring.push(event);
282    } else {
283        TRACE_DROPPED_TOTAL.fetch_add(1, Ordering::Relaxed);
284        return;
285    }
286
287    if serial_echo() {
288        crate::serial_println!(
289            "\x1b[90m[trace] seq={}\x1b[0m cpu={} kind={} \x1b[36mpid={}\x1b[0m tid={} \x1b[35mrip={:#x}\x1b[0m \x1b[35mvaddr={:#x}\x1b[0m a0={:#x} a1={:#x} fl={:#x}",
290            event.seq,
291            event.cpu,
292            kind_name(event.kind),
293            event.pid,
294            event.tid,
295            event.rip,
296            event.vaddr,
297            event.arg0,
298            event.arg1,
299            event.flags
300        );
301    }
302}
303
304/// Performs the kind name operation.
305#[inline]
306pub fn kind_name(kind: u16) -> &'static str {
307    match kind {
308        x if x == TraceKind::MemPageFault as u16 => "mem_pf",
309        x if x == TraceKind::MemMap as u16 => "mem_map",
310        x if x == TraceKind::MemUnmap as u16 => "mem_unmap",
311        x if x == TraceKind::MemCow as u16 => "mem_cow",
312        x if x == TraceKind::MemCopy as u16 => "mem_copy",
313        _ => "unknown",
314    }
315}
316
317/// Performs the mask human operation.
318pub fn mask_human(mask: u64) -> &'static str {
319    if mask == 0 {
320        "none"
321    } else if (mask & category::MEM_ALL) == category::MEM_ALL {
322        "mem:*"
323    } else {
324        "custom"
325    }
326}
327
328#[macro_export]
329macro_rules! trace_mem {
330    ($cat:expr, $kind:expr, $flags:expr, $ctx:expr, $rip:expr, $vaddr:expr, $arg0:expr, $arg1:expr) => {
331        $crate::trace::record($cat, $kind, $flags, $ctx, $rip, $vaddr, $arg0, $arg1)
332    };
333}