1#![no_std]
2
3use core::ptr;
4use intel_ethernet::{ctrl, eerd, int_bits, rctl, regs, tctl, LegacyRxDesc, LegacyTxDesc};
5use net_core::NetError;
6use nic_buffers::{DmaAllocator, DmaRegion};
7use nic_queues::{RxDescriptor, RxRing, TxRing};
8
9pub const NUM_RX: usize = 128; pub const NUM_TX: usize = 128; pub const RX_BUF_SIZE: usize = 4096; pub const E1000_DEVICE_IDS: &[u16] = &[0x100E, 0x100F, 0x10D3, 0x153A, 0x1539];
14pub const INTEL_VENDOR: u16 = 0x8086;
15
16pub struct E1000Nic {
18 mmio: u64,
19 rx: RxRing<LegacyRxDesc>,
20 rx_bufs: [DmaRegion; NUM_RX],
21 tx: TxRing<LegacyTxDesc>,
22 tx_bufs: [Option<DmaRegion>; NUM_TX],
23 mac: [u8; 6],
24 link_up: bool,
25}
26
27unsafe impl Send for E1000Nic {}
30
31#[inline]
36unsafe fn rd(base: u64, reg: usize) -> u32 {
37 ptr::read_volatile((base + reg as u64) as *const u32)
38}
39#[inline]
43unsafe fn wr(base: u64, reg: usize, val: u32) {
44 ptr::write_volatile((base + reg as u64) as *mut u32, val)
45}
46
47impl E1000Nic {
48 pub fn init(mmio_base: u64, alloc: &dyn DmaAllocator) -> Result<Self, NetError> {
53 unsafe {
56 let c = rd(mmio_base, regs::CTRL);
58 wr(mmio_base, regs::CTRL, c | ctrl::RST);
59 for _ in 0..10_000_000u64 {
60 core::hint::spin_loop();
61 }
62
63 wr(mmio_base, regs::IMC, 0xFFFF_FFFF);
65 let _ = rd(mmio_base, regs::ICR);
66
67 let mac = Self::read_mac(mmio_base);
68
69 let rx_ring_region = alloc
71 .alloc_dma(NUM_RX * core::mem::size_of::<LegacyRxDesc>())
72 .map_err(|_| NetError::NotReady)?;
73 ptr::write_bytes(rx_ring_region.virt, 0, rx_ring_region.size);
74 let rx_descs = rx_ring_region.virt as *mut LegacyRxDesc;
75
76 let mut rx_bufs = [DmaRegion::ZERO; NUM_RX];
77 for rx_buf in rx_bufs.iter_mut().take(NUM_RX) {
78 let buf = alloc
79 .alloc_dma(RX_BUF_SIZE)
80 .map_err(|_| NetError::NotReady)?;
81 ptr::write_bytes(buf.virt, 0, RX_BUF_SIZE);
82 *rx_buf = buf;
83 }
84
85 wr(mmio_base, regs::RDBAL, rx_ring_region.phys as u32);
86 wr(mmio_base, regs::RDBAH, (rx_ring_region.phys >> 32) as u32);
87 wr(mmio_base, regs::RDLEN, rx_ring_region.size as u32);
88 wr(mmio_base, regs::RDH, 0);
89 wr(mmio_base, regs::RDT, (NUM_RX - 1) as u32);
90
91 for (i, buf) in rx_bufs.iter().enumerate().take(NUM_RX) {
93 (*rx_descs.add(i)).addr = buf.phys;
94 }
95
96 let tx_ring_region = alloc
98 .alloc_dma(NUM_TX * core::mem::size_of::<LegacyTxDesc>())
99 .map_err(|_| NetError::NotReady)?;
100 ptr::write_bytes(tx_ring_region.virt, 0, tx_ring_region.size);
101 let tx_descs = tx_ring_region.virt as *mut LegacyTxDesc;
102
103 wr(mmio_base, regs::TDBAL, tx_ring_region.phys as u32);
104 wr(mmio_base, regs::TDBAH, (tx_ring_region.phys >> 32) as u32);
105 wr(mmio_base, regs::TDLEN, tx_ring_region.size as u32);
106 wr(mmio_base, regs::TDH, 0);
107 wr(mmio_base, regs::TDT, 0);
108
109 wr(
111 mmio_base,
112 regs::TCTL,
113 tctl::EN | tctl::PSP | (0x10 << tctl::CT_SHIFT) | (0x40 << tctl::COLD_SHIFT),
114 );
115
116 wr(
118 mmio_base,
119 regs::RCTL,
120 rctl::EN | rctl::BAM | rctl::BSIZE_2048 | rctl::SECRC,
121 );
122
123 let c = rd(mmio_base, regs::CTRL);
125 wr(mmio_base, regs::CTRL, c | ctrl::SLU);
126 wr(
127 mmio_base,
128 regs::IMS,
129 int_bits::RXT0 | int_bits::LSC | int_bits::RXDMT0 | int_bits::RXO | int_bits::TXDW,
130 );
131 let status = rd(mmio_base, regs::STATUS);
132 let link_up = (status & 0x02) != 0;
133
134 Ok(Self {
135 mmio: mmio_base,
136 rx: RxRing::new(rx_descs, NUM_RX),
137 rx_bufs,
138 tx: TxRing::new(tx_descs, NUM_TX),
139 tx_bufs: [None; NUM_TX],
140 mac,
141 link_up,
142 })
143 }
144 }
145
146 pub fn mac_address(&self) -> [u8; 6] {
148 self.mac
149 }
150
151 pub fn link_up(&self) -> bool {
153 self.link_up
154 }
155
156 pub fn check_link(&mut self) -> bool {
158 unsafe {
160 let status = rd(self.mmio, regs::STATUS);
161 self.link_up = (status & 0x02) != 0;
162 }
163 self.link_up
164 }
165
166 pub fn receive(&mut self, buf: &mut [u8]) -> Result<usize, NetError> {
168 if !self.check_link() {
170 return Err(NetError::LinkDown);
171 }
172
173 let (idx, pkt_len) = self.rx.poll().ok_or(NetError::NoPacket)?;
174 let len = pkt_len as usize;
175 if buf.len() < len {
176 return Err(NetError::BufferTooSmall);
177 }
178
179 unsafe {
181 ptr::copy_nonoverlapping(self.rx_bufs[idx].virt, buf.as_mut_ptr(), len);
182 }
183
184 self.rx.desc_mut(idx).clear_status();
186 self.rx
187 .desc_mut(idx)
188 .set_buffer_addr(self.rx_bufs[idx].phys);
189 let new_tail = self.rx.advance();
190 unsafe {
192 wr(self.mmio, regs::RDT, new_tail as u32);
193 }
194
195 Ok(len)
196 }
197
198 pub fn transmit(&mut self, buf: &[u8], alloc: &dyn DmaAllocator) -> Result<(), NetError> {
200 if !self.check_link() {
202 return Err(NetError::LinkDown);
203 }
204
205 if buf.len() > net_core::MTU {
206 return Err(NetError::BufferTooSmall);
207 }
208
209 let idx = self.tx.tail();
210
211 if self.tx.desc(idx).cmd != 0 && !self.tx.is_done(idx) {
213 return Err(NetError::TxQueueFull);
214 }
215
216 if let Some(old) = self.tx_bufs[idx].take() {
218 unsafe {
220 alloc.free_dma(old);
221 }
222 }
223
224 let region = alloc.alloc_dma(buf.len()).map_err(|_| NetError::NotReady)?;
226 unsafe {
228 ptr::copy_nonoverlapping(buf.as_ptr(), region.virt, buf.len());
229 }
230
231 self.tx_bufs[idx] = Some(region);
232 let _submitted = self.tx.submit(region.phys, buf.len() as u16);
233 unsafe {
235 wr(self.mmio, regs::TDT, self.tx.tail() as u32);
236 }
237
238 Ok(())
247 }
248
249 pub fn is_transmit_complete(&self) -> bool {
251 let idx = self.tx.tail();
252 self.tx.is_done(idx)
253 }
254
255 pub fn wait_for_transmit(&self) {
257 while !self.is_transmit_complete() {
258 core::hint::spin_loop();
259 }
260 }
261
262 pub fn handle_interrupt(&self) {
264 let icr = unsafe { rd(self.mmio, regs::ICR) };
267
268 if (icr & int_bits::LSC) != 0 {
270 let _status = unsafe { rd(self.mmio, regs::STATUS) };
273 }
274
275 if (icr & (int_bits::RXT0 | int_bits::RXDMT0 | int_bits::RXO)) != 0 {
276 }
278
279 if (icr & int_bits::TXDW) != 0 {
280 }
282 }
283
284 unsafe fn read_mac(base: u64) -> [u8; 6] {
288 let ral = rd(base, regs::RAL0);
290 let rah = rd(base, regs::RAH0);
291
292 if ral != 0 || rah != 0 {
294 return [
295 (ral) as u8,
296 (ral >> 8) as u8,
297 (ral >> 16) as u8,
298 (ral >> 24) as u8,
299 (rah) as u8,
300 (rah >> 8) as u8,
301 ];
302 }
303
304 let mut mac = [0u8; 6];
306 for i in 0u32..3 {
307 let w = Self::eeprom_read(base, i as u8);
308 mac[(i * 2) as usize] = w as u8;
309 mac[(i * 2 + 1) as usize] = (w >> 8) as u8;
310 }
311 mac
312 }
313
314 unsafe fn eeprom_read(base: u64, addr: u8) -> u16 {
318 wr(
319 base,
320 regs::EERD,
321 eerd::START | ((addr as u32) << eerd::ADDR_SHIFT),
322 );
323 loop {
324 let v = rd(base, regs::EERD);
325 if v & eerd::DONE != 0 {
326 return ((v >> eerd::DATA_SHIFT) & 0xFFFF) as u16;
327 }
328 core::hint::spin_loop();
329 }
330 }
331}