strat9_kernel/process/scheduler/
timer_ops.rs1use super::*;
2
3pub fn timer_tick() {
15 let cpu_idx = crate::arch::x86_64::percpu::current_cpu_index();
16
17 if cpu_is_valid(cpu_idx) {
18 CPU_TOTAL_TICKS[cpu_idx].fetch_add(1, Ordering::Relaxed);
19 }
20
21 if cpu_idx == 0 {
23 TICK_COUNT.fetch_add(1, Ordering::Relaxed);
24 }
25
26 if cpu_idx == 0 {
31 let tick = TICK_COUNT.load(Ordering::Relaxed);
32 let current_time_ns = tick * NS_PER_TICK;
33 crate::process::timer::tick_all_timers(current_time_ns);
34 check_wake_deadlines(current_time_ns);
35 }
36
37 if let Some(mut guard) = SCHEDULER.try_lock() {
39 if let Some(ref mut sched) = *guard {
40 if let Some(cpu) = sched.cpus.get_mut(cpu_idx) {
41 let should_resched = if let Some(ref current_task) = cpu.current_task {
42 if cpu_is_valid(cpu_idx) {
43 match sched.class_table.class_for_task(current_task) {
44 crate::process::sched::SchedClassId::RealTime => {
45 CPU_RT_RUNTIME_TICKS[cpu_idx].fetch_add(1, Ordering::Relaxed);
46 }
47 crate::process::sched::SchedClassId::Fair => {
48 CPU_FAIR_RUNTIME_TICKS[cpu_idx].fetch_add(1, Ordering::Relaxed);
49 }
50 crate::process::sched::SchedClassId::Idle => {
51 CPU_IDLE_TICKS[cpu_idx].fetch_add(1, Ordering::Relaxed);
52 }
53 }
54 }
55 current_task.ticks.fetch_add(1, Ordering::Relaxed);
56 cpu.current_runtime.update();
57 cpu.class_rqs.update_current(
58 &cpu.current_runtime,
59 current_task,
60 false,
61 &sched.class_table,
62 )
63 } else {
64 false
65 };
66 if should_resched {
67 cpu.need_resched = true;
68 }
69 }
70 }
71 } else {
72 note_try_lock_fail_on_cpu(cpu_idx);
73 }
74}
75
76fn check_wake_deadlines(current_time_ns: u64) {
81 let mut ipi_targets = [false; crate::arch::x86_64::percpu::MAX_CPUS];
82 let my_cpu = current_cpu_index();
83 let mut scheduler = match SCHEDULER.try_lock() {
84 Some(guard) => guard,
85 None => return,
86 };
87
88 if let Some(ref mut sched) = *scheduler {
89 const BATCH: usize = 128;
90 let mut to_wake = [TaskId::from_u64(0); BATCH];
91 let mut count = 0usize;
92 for (id, task) in sched.blocked_tasks.iter() {
93 let deadline = task.wake_deadline_ns.load(Ordering::Relaxed);
94 if deadline != 0 && current_time_ns >= deadline {
95 if count < BATCH {
96 to_wake[count] = *id;
97 count += 1;
98 } else {
99 break;
100 }
101 }
102 }
103
104 for id in to_wake.iter().copied().take(count) {
105 if let Some(blocked_task) = sched.blocked_tasks.remove(&id) {
106 blocked_task.wake_deadline_ns.store(0, Ordering::Relaxed);
107 unsafe { *blocked_task.state.get() = TaskState::Ready };
108 let cpu = sched.task_cpu.get(&id).copied().unwrap_or(0);
109 let class = sched.class_table.class_for_task(&blocked_task);
110 if let Some(cpu_sched) = sched.cpus.get_mut(cpu) {
111 cpu_sched.class_rqs.enqueue(class, blocked_task);
112 cpu_sched.need_resched = true;
113 if cpu != my_cpu && cpu_is_valid(cpu) {
114 ipi_targets[cpu] = true;
115 }
116 }
117 }
118 }
119 }
120
121 drop(scheduler);
122 for (cpu, send) in ipi_targets.iter().copied().enumerate() {
123 if send {
124 send_resched_ipi_to_cpu(cpu);
125 }
126 }
127}
128
129pub fn ticks() -> u64 {
131 TICK_COUNT.load(Ordering::Relaxed)
132}
133
134pub fn get_all_tasks() -> Option<alloc::vec::Vec<Arc<Task>>> {
137 use alloc::vec::Vec;
138 let scheduler = match SCHEDULER.try_lock() {
139 Some(guard) => guard,
140 None => {
141 note_try_lock_fail();
142 return None;
143 }
144 };
145 if let Some(ref sched) = *scheduler {
146 let mut tasks = Vec::with_capacity(sched.all_tasks.len());
147 for (_, task) in sched.all_tasks.iter() {
148 tasks.push(task.clone());
149 }
150 Some(tasks)
151 } else {
152 None
153 }
154}