nic_queues/lib.rs
1#![no_std]
2
3pub trait RxDescriptor: Copy {
4 /// Sets buffer addr.
5 fn set_buffer_addr(&mut self, phys: u64);
6 /// Returns whether done.
7 fn is_done(&self) -> bool;
8 /// Performs the packet length operation.
9 fn packet_length(&self) -> u16;
10 /// Performs the clear status operation.
11 fn clear_status(&mut self);
12}
13
14pub trait TxDescriptor: Copy {
15 /// Sets buffer.
16 fn set_buffer(&mut self, phys: u64, len: u16);
17 /// Sets eop ifcs rs.
18 fn set_eop_ifcs_rs(&mut self);
19 /// Returns whether done.
20 fn is_done(&self) -> bool;
21 /// Performs the clear operation.
22 fn clear(&mut self);
23}
24
25pub struct RxRing<D: RxDescriptor> {
26 descs: *mut D,
27 count: usize,
28 tail: usize,
29}
30
31// SAFETY: RxRing only contains a raw pointer to a descriptor ring and plain
32// indices. Sending it to another thread is safe as long as external code
33// upholds exclusive mutation/aliasing rules for the underlying ring memory.
34unsafe impl<D: RxDescriptor> Send for RxRing<D> {}
35
36impl<D: RxDescriptor> RxRing<D> {
37 /// # Safety
38 /// `descs` must point to `count` valid, zero-initialised descriptors.
39 pub unsafe fn new(descs: *mut D, count: usize) -> Self {
40 Self {
41 descs,
42 count,
43 tail: count - 1,
44 }
45 }
46
47 /// Performs the count operation.
48 pub fn count(&self) -> usize {
49 self.count
50 }
51
52 /// Performs the tail operation.
53 pub fn tail(&self) -> usize {
54 self.tail
55 }
56
57 /// Performs the desc mut operation.
58 pub fn desc_mut(&mut self, idx: usize) -> &mut D {
59 // SAFETY: `new` guarantees `descs` points to `count` valid descriptors.
60 // Indexing is wrapped modulo `count`, so pointer arithmetic stays in ring bounds.
61 unsafe { &mut *self.descs.add(idx % self.count) }
62 }
63
64 /// Check the next descriptor; returns `(index, packet_length)` if ready.
65 pub fn poll(&self) -> Option<(usize, u16)> {
66 let next = (self.tail + 1) % self.count;
67 // SAFETY: `next < count` and `descs` points to `count` valid descriptors.
68 // Immutable access does not violate aliasing.
69 let desc = unsafe { &*self.descs.add(next) };
70 if desc.is_done() {
71 Some((next, desc.packet_length()))
72 } else {
73 None
74 }
75 }
76
77 /// Advance tail after consuming a packet. Returns new tail.
78 pub fn advance(&mut self) -> usize {
79 self.tail = (self.tail + 1) % self.count;
80 self.tail
81 }
82
83 /// Set up one RX descriptor with a pre-allocated buffer.
84 pub fn setup_desc(&mut self, idx: usize, buf_phys: u64) {
85 let d = self.desc_mut(idx);
86 d.clear_status();
87 d.set_buffer_addr(buf_phys);
88 }
89}
90
91pub struct TxRing<D: TxDescriptor> {
92 descs: *mut D,
93 count: usize,
94 tail: usize,
95}
96
97// SAFETY: TxRing stores a raw descriptor pointer and indices only. Transfer
98// across threads is sound when callers synchronize ownership of descriptor memory.
99unsafe impl<D: TxDescriptor> Send for TxRing<D> {}
100
101impl<D: TxDescriptor> TxRing<D> {
102 /// # Safety
103 /// `descs` must point to `count` valid, zero-initialised descriptors.
104 pub unsafe fn new(descs: *mut D, count: usize) -> Self {
105 Self {
106 descs,
107 count,
108 tail: 0,
109 }
110 }
111
112 /// Performs the count operation.
113 pub fn count(&self) -> usize {
114 self.count
115 }
116
117 /// Performs the tail operation.
118 pub fn tail(&self) -> usize {
119 self.tail
120 }
121
122 /// Performs the desc operation.
123 pub fn desc(&self, idx: usize) -> &D {
124 // SAFETY: `idx % count` is in bounds and `descs` points to `count` valid descriptors.
125 unsafe { &*self.descs.add(idx % self.count) }
126 }
127
128 /// Performs the desc mut operation.
129 pub fn desc_mut(&mut self, idx: usize) -> &mut D {
130 // SAFETY: `idx % count` is in bounds and mutable access is gated by `&mut self`.
131 unsafe { &mut *self.descs.add(idx % self.count) }
132 }
133
134 /// Prepare and submit a packet at the current tail slot.
135 /// Returns the descriptor index used.
136 pub fn submit(&mut self, phys: u64, len: u16) -> usize {
137 let idx = self.tail;
138 let d = self.desc_mut(idx);
139 d.clear();
140 d.set_buffer(phys, len);
141 d.set_eop_ifcs_rs();
142 self.tail = (idx + 1) % self.count;
143 idx
144 }
145
146 /// Returns whether done.
147 pub fn is_done(&self, idx: usize) -> bool {
148 self.desc(idx).is_done()
149 }
150}