1#![no_std]
2
3use core::ptr;
4use intel_ethernet::{ctrl, eerd, int_bits, rctl, regs, tctl, LegacyRxDesc, LegacyTxDesc};
5use net_core::NetError;
6use nic_buffers::{DmaAllocator, DmaRegion};
7use nic_queues::{RxDescriptor, RxRing, TxRing};
8
9pub const NUM_RX: usize = 128; pub const NUM_TX: usize = 128; pub const RX_BUF_SIZE: usize = 4096; const RESET_MAX_POLLS: u32 = 200_000;
15const EEPROM_MAX_POLLS: u32 = 50_000;
17
18pub const E1000_DEVICE_IDS: &[u16] = &[0x100E, 0x100F, 0x10D3, 0x153A, 0x1539];
19pub const INTEL_VENDOR: u16 = 0x8086;
20
21pub struct E1000Nic {
23 mmio: u64,
24 rx: RxRing<LegacyRxDesc>,
25 rx_bufs: [DmaRegion; NUM_RX],
26 tx: TxRing<LegacyTxDesc>,
27 tx_bufs: [Option<DmaRegion>; NUM_TX],
28 mac: [u8; 6],
29 link_up: bool,
30}
31
32unsafe impl Send for E1000Nic {}
35
36#[inline]
41unsafe fn rd(base: u64, reg: usize) -> u32 {
42 ptr::read_volatile((base + reg as u64) as *const u32)
43}
44#[inline]
48unsafe fn wr(base: u64, reg: usize, val: u32) {
49 ptr::write_volatile((base + reg as u64) as *mut u32, val)
50}
51
52impl E1000Nic {
53 pub fn init(mmio_base: u64, alloc: &dyn DmaAllocator) -> Result<Self, NetError> {
58 unsafe {
61 let c = rd(mmio_base, regs::CTRL);
63 log::trace!("e1000: assert CTRL.RST (ctrl={:#x})", c);
64 wr(mmio_base, regs::CTRL, c | ctrl::RST);
65 let mut reset_done = false;
66 for poll in 0..RESET_MAX_POLLS {
67 let ctrl = rd(mmio_base, regs::CTRL);
68 if ctrl & ctrl::RST == 0 {
69 log::trace!(
70 "e1000: reset complete after {} polls (ctrl={:#x})",
71 poll + 1,
72 ctrl
73 );
74 reset_done = true;
75 break;
76 }
77 core::hint::spin_loop();
78 }
79 if !reset_done {
80 log::warn!(
81 "e1000: reset timeout after {} CTRL polls (RST never cleared)",
82 RESET_MAX_POLLS
83 );
84 return Err(NetError::NotReady);
85 }
86
87 wr(mmio_base, regs::IMC, 0xFFFF_FFFF);
89 let _ = rd(mmio_base, regs::ICR);
90
91 log::trace!("e1000: read MAC");
92 let mac = Self::read_mac(mmio_base)?;
93 log::trace!(
94 "e1000: MAC {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
95 mac[0],
96 mac[1],
97 mac[2],
98 mac[3],
99 mac[4],
100 mac[5]
101 );
102
103 let rx_ring_region = alloc
105 .alloc_dma(NUM_RX * core::mem::size_of::<LegacyRxDesc>())
106 .map_err(|_| NetError::NotReady)?;
107 ptr::write_bytes(rx_ring_region.virt, 0, rx_ring_region.size);
108 let rx_descs = rx_ring_region.virt as *mut LegacyRxDesc;
109
110 let mut rx_bufs = [DmaRegion::ZERO; NUM_RX];
111 for rx_buf in rx_bufs.iter_mut().take(NUM_RX) {
112 let buf = alloc
113 .alloc_dma(RX_BUF_SIZE)
114 .map_err(|_| NetError::NotReady)?;
115 ptr::write_bytes(buf.virt, 0, RX_BUF_SIZE);
116 *rx_buf = buf;
117 }
118
119 wr(mmio_base, regs::RDBAL, rx_ring_region.phys as u32);
120 wr(mmio_base, regs::RDBAH, (rx_ring_region.phys >> 32) as u32);
121 wr(mmio_base, regs::RDLEN, rx_ring_region.size as u32);
122 wr(mmio_base, regs::RDH, 0);
123 wr(mmio_base, regs::RDT, (NUM_RX - 1) as u32);
124
125 for (i, buf) in rx_bufs.iter().enumerate().take(NUM_RX) {
127 (*rx_descs.add(i)).addr = buf.phys;
128 }
129
130 let tx_ring_region = alloc
132 .alloc_dma(NUM_TX * core::mem::size_of::<LegacyTxDesc>())
133 .map_err(|_| NetError::NotReady)?;
134 ptr::write_bytes(tx_ring_region.virt, 0, tx_ring_region.size);
135 let tx_descs = tx_ring_region.virt as *mut LegacyTxDesc;
136
137 wr(mmio_base, regs::TDBAL, tx_ring_region.phys as u32);
138 wr(mmio_base, regs::TDBAH, (tx_ring_region.phys >> 32) as u32);
139 wr(mmio_base, regs::TDLEN, tx_ring_region.size as u32);
140 wr(mmio_base, regs::TDH, 0);
141 wr(mmio_base, regs::TDT, 0);
142
143 wr(
145 mmio_base,
146 regs::TCTL,
147 tctl::EN | tctl::PSP | (0x10 << tctl::CT_SHIFT) | (0x40 << tctl::COLD_SHIFT),
148 );
149
150 wr(
152 mmio_base,
153 regs::RCTL,
154 rctl::EN | rctl::BAM | rctl::BSIZE_2048 | rctl::SECRC,
155 );
156
157 let c = rd(mmio_base, regs::CTRL);
159 wr(mmio_base, regs::CTRL, c | ctrl::SLU);
160 wr(
161 mmio_base,
162 regs::IMS,
163 int_bits::RXT0 | int_bits::LSC | int_bits::RXDMT0 | int_bits::RXO | int_bits::TXDW,
164 );
165 let status = rd(mmio_base, regs::STATUS);
166 let link_up = (status & 0x02) != 0;
167
168 Ok(Self {
169 mmio: mmio_base,
170 rx: RxRing::new(rx_descs, NUM_RX),
171 rx_bufs,
172 tx: TxRing::new(tx_descs, NUM_TX),
173 tx_bufs: [None; NUM_TX],
174 mac,
175 link_up,
176 })
177 }
178 }
179
180 pub fn mac_address(&self) -> [u8; 6] {
182 self.mac
183 }
184
185 pub fn link_up(&self) -> bool {
187 self.link_up
188 }
189
190 pub fn check_link(&mut self) -> bool {
192 unsafe {
194 let status = rd(self.mmio, regs::STATUS);
195 self.link_up = (status & 0x02) != 0;
196 }
197 self.link_up
198 }
199
200 pub fn receive(&mut self, buf: &mut [u8]) -> Result<usize, NetError> {
202 if !self.check_link() {
204 return Err(NetError::LinkDown);
205 }
206
207 let (idx, pkt_len) = self.rx.poll().ok_or(NetError::NoPacket)?;
208 let len = pkt_len as usize;
209 if buf.len() < len {
210 return Err(NetError::BufferTooSmall);
211 }
212
213 unsafe {
215 ptr::copy_nonoverlapping(self.rx_bufs[idx].virt, buf.as_mut_ptr(), len);
216 }
217
218 self.rx.desc_mut(idx).clear_status();
220 self.rx
221 .desc_mut(idx)
222 .set_buffer_addr(self.rx_bufs[idx].phys);
223 let new_tail = self.rx.advance();
224 unsafe {
226 wr(self.mmio, regs::RDT, new_tail as u32);
227 }
228
229 Ok(len)
230 }
231
232 pub fn transmit(&mut self, buf: &[u8], alloc: &dyn DmaAllocator) -> Result<(), NetError> {
234 if !self.check_link() {
236 return Err(NetError::LinkDown);
237 }
238
239 if buf.len() > net_core::MTU {
240 return Err(NetError::BufferTooSmall);
241 }
242
243 let idx = self.tx.tail();
244
245 if self.tx.desc(idx).cmd != 0 && !self.tx.is_done(idx) {
247 return Err(NetError::TxQueueFull);
248 }
249
250 if let Some(old) = self.tx_bufs[idx].take() {
252 unsafe {
254 alloc.free_dma(old);
255 }
256 }
257
258 let region = alloc.alloc_dma(buf.len()).map_err(|_| NetError::NotReady)?;
260 unsafe {
262 ptr::copy_nonoverlapping(buf.as_ptr(), region.virt, buf.len());
263 }
264
265 self.tx_bufs[idx] = Some(region);
266 let _submitted = self.tx.submit(region.phys, buf.len() as u16);
267 unsafe {
269 wr(self.mmio, regs::TDT, self.tx.tail() as u32);
270 }
271
272 Ok(())
281 }
282
283 pub fn is_transmit_complete(&self) -> bool {
285 let idx = self.tx.tail();
286 self.tx.is_done(idx)
287 }
288
289 pub fn wait_for_transmit(&self) {
291 while !self.is_transmit_complete() {
292 core::hint::spin_loop();
293 }
294 }
295
296 pub fn handle_interrupt(&self) {
298 let icr = unsafe { rd(self.mmio, regs::ICR) };
301
302 if (icr & int_bits::LSC) != 0 {
304 let _status = unsafe { rd(self.mmio, regs::STATUS) };
307 }
308
309 if (icr & (int_bits::RXT0 | int_bits::RXDMT0 | int_bits::RXO)) != 0 {
310 }
312
313 if (icr & int_bits::TXDW) != 0 {
314 }
316 }
317
318 unsafe fn read_mac(base: u64) -> Result<[u8; 6], NetError> {
322 let ral = rd(base, regs::RAL0);
324 let rah = rd(base, regs::RAH0);
325 log::trace!("e1000: RAL0={:#x} RAH0={:#x}", ral, rah);
326
327 if ral != 0 || rah != 0 {
329 return Ok([
330 (ral) as u8,
331 (ral >> 8) as u8,
332 (ral >> 16) as u8,
333 (ral >> 24) as u8,
334 (rah) as u8,
335 (rah >> 8) as u8,
336 ]);
337 }
338
339 log::trace!("e1000: RAL/RAH empty : reading MAC words from EEPROM");
341 let mut mac = [0u8; 6];
342 for i in 0u32..3 {
343 let w = Self::eeprom_read(base, i as u8)?;
344 mac[(i * 2) as usize] = w as u8;
345 mac[(i * 2 + 1) as usize] = (w >> 8) as u8;
346 }
347 if mac == [0; 6] || mac == [0xFF; 6] {
348 return Err(NetError::NotReady);
349 }
350 Ok(mac)
351 }
352
353 unsafe fn eeprom_read(base: u64, addr: u8) -> Result<u16, NetError> {
357 log::trace!("e1000: EEPROM read addr={}", addr);
358 wr(
359 base,
360 regs::EERD,
361 eerd::START | ((addr as u32) << eerd::ADDR_SHIFT),
362 );
363 for poll in 0..EEPROM_MAX_POLLS {
364 let v = rd(base, regs::EERD);
365 if v & eerd::DONE != 0 {
366 let data = ((v >> eerd::DATA_SHIFT) & 0xFFFF) as u16;
367 log::trace!(
368 "e1000: EEPROM addr={} ok after {} polls data={:#x}",
369 addr,
370 poll + 1,
371 data
372 );
373 return Ok(data);
374 }
375 core::hint::spin_loop();
376 }
377 log::warn!(
378 "e1000: EEPROM addr={} timeout after {} EERD polls",
379 addr,
380 EEPROM_MAX_POLLS
381 );
382 Err(NetError::NotReady)
383 }
384}