Skip to main content

e1000/
lib.rs

1#![no_std]
2
3use core::ptr;
4use intel_ethernet::{ctrl, eerd, int_bits, rctl, regs, tctl, LegacyRxDesc, LegacyTxDesc};
5use net_core::NetError;
6use nic_buffers::{DmaAllocator, DmaRegion};
7use nic_queues::{RxDescriptor, RxRing, TxRing};
8
9pub const NUM_RX: usize = 128; // Optimisé pour plus de throughput
10pub const NUM_TX: usize = 128; // Optimisé pour plus de throughput
11pub const RX_BUF_SIZE: usize = 4096; // Buffer size optimisé (MTU + overhead)
12
13/// Poll `CTRL.RST` until hardware clears it (reset complete). Bounded to avoid hangs.
14const RESET_MAX_POLLS: u32 = 200_000;
15/// Max polls per EEPROM read (EERD.DONE). Much smaller than blind 1M spins.
16const EEPROM_MAX_POLLS: u32 = 50_000;
17
18pub const E1000_DEVICE_IDS: &[u16] = &[0x100E, 0x100F, 0x10D3, 0x153A, 0x1539];
19pub const INTEL_VENDOR: u16 = 0x8086;
20
21/// E1000 NIC structure with DMA-safe rings
22pub struct E1000Nic {
23    mmio: u64,
24    rx: RxRing<LegacyRxDesc>,
25    rx_bufs: [DmaRegion; NUM_RX],
26    tx: TxRing<LegacyTxDesc>,
27    tx_bufs: [Option<DmaRegion>; NUM_TX],
28    mac: [u8; 6],
29    link_up: bool,
30}
31
32// SAFETY: E1000Nic owns its MMIO region and DMA buffers. It is safe to send
33// across threads as long as only one thread accesses the hardware at a time.
34unsafe impl Send for E1000Nic {}
35
36// MMIO helpers
37/// # Safety
38///
39/// The caller must ensure that `base + reg` is a valid mapped MMIO region.
40#[inline]
41unsafe fn rd(base: u64, reg: usize) -> u32 {
42    ptr::read_volatile((base + reg as u64) as *const u32)
43}
44/// # Safety
45///
46/// The caller must ensure that `base + reg` is a valid mapped MMIO region.
47#[inline]
48unsafe fn wr(base: u64, reg: usize, val: u32) {
49    ptr::write_volatile((base + reg as u64) as *mut u32, val)
50}
51
52impl E1000Nic {
53    /// Initialise the E1000 hardware.
54    ///
55    /// `mmio_base` is the virtual address of the mapped BAR0 region.
56    /// The caller must ensure the MMIO region (>=128 KiB) is identity-mapped.
57    pub fn init(mmio_base: u64, alloc: &dyn DmaAllocator) -> Result<Self, NetError> {
58        // SAFETY: We have exclusive access to the MMIO region during initialization.
59        // All DMA allocations are fresh and properly zeroed.
60        unsafe {
61            // Reset: set CTRL.RST; hardware clears RST when reset completes (SDM).
62            let c = rd(mmio_base, regs::CTRL);
63            log::trace!("e1000: assert CTRL.RST (ctrl={:#x})", c);
64            wr(mmio_base, regs::CTRL, c | ctrl::RST);
65            let mut reset_done = false;
66            for poll in 0..RESET_MAX_POLLS {
67                let ctrl = rd(mmio_base, regs::CTRL);
68                if ctrl & ctrl::RST == 0 {
69                    log::trace!(
70                        "e1000: reset complete after {} polls (ctrl={:#x})",
71                        poll + 1,
72                        ctrl
73                    );
74                    reset_done = true;
75                    break;
76                }
77                core::hint::spin_loop();
78            }
79            if !reset_done {
80                log::warn!(
81                    "e1000: reset timeout after {} CTRL polls (RST never cleared)",
82                    RESET_MAX_POLLS
83                );
84                return Err(NetError::NotReady);
85            }
86
87            // Disable interrupts during setup
88            wr(mmio_base, regs::IMC, 0xFFFF_FFFF);
89            let _ = rd(mmio_base, regs::ICR);
90
91            log::trace!("e1000: read MAC");
92            let mac = Self::read_mac(mmio_base)?;
93            log::trace!(
94                "e1000: MAC {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
95                mac[0],
96                mac[1],
97                mac[2],
98                mac[3],
99                mac[4],
100                mac[5]
101            );
102
103            // RX ring
104            let rx_ring_region = alloc
105                .alloc_dma(NUM_RX * core::mem::size_of::<LegacyRxDesc>())
106                .map_err(|_| NetError::NotReady)?;
107            ptr::write_bytes(rx_ring_region.virt, 0, rx_ring_region.size);
108            let rx_descs = rx_ring_region.virt as *mut LegacyRxDesc;
109
110            let mut rx_bufs = [DmaRegion::ZERO; NUM_RX];
111            for rx_buf in rx_bufs.iter_mut().take(NUM_RX) {
112                let buf = alloc
113                    .alloc_dma(RX_BUF_SIZE)
114                    .map_err(|_| NetError::NotReady)?;
115                ptr::write_bytes(buf.virt, 0, RX_BUF_SIZE);
116                *rx_buf = buf;
117            }
118
119            wr(mmio_base, regs::RDBAL, rx_ring_region.phys as u32);
120            wr(mmio_base, regs::RDBAH, (rx_ring_region.phys >> 32) as u32);
121            wr(mmio_base, regs::RDLEN, rx_ring_region.size as u32);
122            wr(mmio_base, regs::RDH, 0);
123            wr(mmio_base, regs::RDT, (NUM_RX - 1) as u32);
124
125            // Set buffer addresses in descriptors
126            for (i, buf) in rx_bufs.iter().enumerate().take(NUM_RX) {
127                (*rx_descs.add(i)).addr = buf.phys;
128            }
129
130            // TX ring
131            let tx_ring_region = alloc
132                .alloc_dma(NUM_TX * core::mem::size_of::<LegacyTxDesc>())
133                .map_err(|_| NetError::NotReady)?;
134            ptr::write_bytes(tx_ring_region.virt, 0, tx_ring_region.size);
135            let tx_descs = tx_ring_region.virt as *mut LegacyTxDesc;
136
137            wr(mmio_base, regs::TDBAL, tx_ring_region.phys as u32);
138            wr(mmio_base, regs::TDBAH, (tx_ring_region.phys >> 32) as u32);
139            wr(mmio_base, regs::TDLEN, tx_ring_region.size as u32);
140            wr(mmio_base, regs::TDH, 0);
141            wr(mmio_base, regs::TDT, 0);
142
143            // Enable TX
144            wr(
145                mmio_base,
146                regs::TCTL,
147                tctl::EN | tctl::PSP | (0x10 << tctl::CT_SHIFT) | (0x40 << tctl::COLD_SHIFT),
148            );
149
150            // Enable RX with 2048 buffer size (default, works with all MTUs)
151            wr(
152                mmio_base,
153                regs::RCTL,
154                rctl::EN | rctl::BAM | rctl::BSIZE_2048 | rctl::SECRC,
155            );
156
157            // Link up + interrupts
158            let c = rd(mmio_base, regs::CTRL);
159            wr(mmio_base, regs::CTRL, c | ctrl::SLU);
160            wr(
161                mmio_base,
162                regs::IMS,
163                int_bits::RXT0 | int_bits::LSC | int_bits::RXDMT0 | int_bits::RXO | int_bits::TXDW,
164            );
165            let status = rd(mmio_base, regs::STATUS);
166            let link_up = (status & 0x02) != 0;
167
168            Ok(Self {
169                mmio: mmio_base,
170                rx: RxRing::new(rx_descs, NUM_RX),
171                rx_bufs,
172                tx: TxRing::new(tx_descs, NUM_TX),
173                tx_bufs: [None; NUM_TX],
174                mac,
175                link_up,
176            })
177        }
178    }
179
180    /// Performs the mac address operation.
181    pub fn mac_address(&self) -> [u8; 6] {
182        self.mac
183    }
184
185    /// Performs the link up operation.
186    pub fn link_up(&self) -> bool {
187        self.link_up
188    }
189
190    /// Check and update link status
191    pub fn check_link(&mut self) -> bool {
192        // SAFETY: MMIO read is safe as long as the device is mapped.
193        unsafe {
194            let status = rd(self.mmio, regs::STATUS);
195            self.link_up = (status & 0x02) != 0;
196        }
197        self.link_up
198    }
199
200    /// Performs the receive operation.
201    pub fn receive(&mut self, buf: &mut [u8]) -> Result<usize, NetError> {
202        // Check link status first
203        if !self.check_link() {
204            return Err(NetError::LinkDown);
205        }
206
207        let (idx, pkt_len) = self.rx.poll().ok_or(NetError::NoPacket)?;
208        let len = pkt_len as usize;
209        if buf.len() < len {
210            return Err(NetError::BufferTooSmall);
211        }
212
213        // SAFETY: The DMA buffer is valid and we have exclusive access during receive.
214        unsafe {
215            ptr::copy_nonoverlapping(self.rx_bufs[idx].virt, buf.as_mut_ptr(), len);
216        }
217
218        // Recycle RX buffer
219        self.rx.desc_mut(idx).clear_status();
220        self.rx
221            .desc_mut(idx)
222            .set_buffer_addr(self.rx_bufs[idx].phys);
223        let new_tail = self.rx.advance();
224        // SAFETY: Writing to RDT is safe as the device is initialized.
225        unsafe {
226            wr(self.mmio, regs::RDT, new_tail as u32);
227        }
228
229        Ok(len)
230    }
231
232    /// Performs the transmit operation.
233    pub fn transmit(&mut self, buf: &[u8], alloc: &dyn DmaAllocator) -> Result<(), NetError> {
234        // Check link status first
235        if !self.check_link() {
236            return Err(NetError::LinkDown);
237        }
238
239        if buf.len() > net_core::MTU {
240            return Err(NetError::BufferTooSmall);
241        }
242
243        let idx = self.tx.tail();
244
245        // Check if previous TX at this slot is complete (non-blocking)
246        if self.tx.desc(idx).cmd != 0 && !self.tx.is_done(idx) {
247            return Err(NetError::TxQueueFull);
248        }
249
250        // Free previous buffer if present
251        if let Some(old) = self.tx_bufs[idx].take() {
252            // SAFETY: We own this DMA region and are freeing it after use.
253            unsafe {
254                alloc.free_dma(old);
255            }
256        }
257
258        // Allocate new DMA buffer
259        let region = alloc.alloc_dma(buf.len()).map_err(|_| NetError::NotReady)?;
260        // SAFETY: The DMA region is valid and we have exclusive access.
261        unsafe {
262            ptr::copy_nonoverlapping(buf.as_ptr(), region.virt, buf.len());
263        }
264
265        self.tx_bufs[idx] = Some(region);
266        let _submitted = self.tx.submit(region.phys, buf.len() as u16);
267        // SAFETY: Writing to TDT is safe as the device is initialized.
268        unsafe {
269            wr(self.mmio, regs::TDT, self.tx.tail() as u32);
270        }
271
272        // Non-blocking: return immediately, caller can poll is_transmit_complete()
273        // For small packets, we can optionally wait (commented out for performance)
274        /*
275        while !self.tx.is_done(submitted) {
276            core::hint::spin_loop();
277        }
278        */
279
280        Ok(())
281    }
282
283    /// Check if last transmission is complete (non-blocking)
284    pub fn is_transmit_complete(&self) -> bool {
285        let idx = self.tx.tail();
286        self.tx.is_done(idx)
287    }
288
289    /// Wait for transmission to complete (blocking)
290    pub fn wait_for_transmit(&self) {
291        while !self.is_transmit_complete() {
292            core::hint::spin_loop();
293        }
294    }
295
296    /// Handles interrupt.
297    pub fn handle_interrupt(&self) {
298        // Read and clear interrupt causes
299        // SAFETY: MMIO access is safe during interrupt handling.
300        let icr = unsafe { rd(self.mmio, regs::ICR) };
301
302        // Handle specific interrupt causes
303        if (icr & int_bits::LSC) != 0 {
304            // Link Status Change - update cached state
305            // SAFETY: MMIO read is safe.
306            let _status = unsafe { rd(self.mmio, regs::STATUS) };
307        }
308
309        if (icr & (int_bits::RXT0 | int_bits::RXDMT0 | int_bits::RXO)) != 0 {
310            // RX interrupts - packet received, will be handled by poll
311        }
312
313        if (icr & int_bits::TXDW) != 0 {
314            // TX descriptor written back - buffers can be freed
315        }
316    }
317
318    /// # Safety
319    ///
320    /// The caller must ensure that `base` is a valid mapped MMIO region.
321    unsafe fn read_mac(base: u64) -> Result<[u8; 6], NetError> {
322        // Try RAL/RAH registers first
323        let ral = rd(base, regs::RAL0);
324        let rah = rd(base, regs::RAH0);
325        log::trace!("e1000: RAL0={:#x} RAH0={:#x}", ral, rah);
326
327        // Check if MAC address is valid (not all zeros)
328        if ral != 0 || rah != 0 {
329            return Ok([
330                (ral) as u8,
331                (ral >> 8) as u8,
332                (ral >> 16) as u8,
333                (ral >> 24) as u8,
334                (rah) as u8,
335                (rah >> 8) as u8,
336            ]);
337        }
338
339        // Fallback to EEPROM read
340        log::trace!("e1000: RAL/RAH empty : reading MAC words from EEPROM");
341        let mut mac = [0u8; 6];
342        for i in 0u32..3 {
343            let w = Self::eeprom_read(base, i as u8)?;
344            mac[(i * 2) as usize] = w as u8;
345            mac[(i * 2 + 1) as usize] = (w >> 8) as u8;
346        }
347        if mac == [0; 6] || mac == [0xFF; 6] {
348            return Err(NetError::NotReady);
349        }
350        Ok(mac)
351    }
352
353    /// # Safety
354    ///
355    /// The caller must ensure that `base` is a valid mapped MMIO region.
356    unsafe fn eeprom_read(base: u64, addr: u8) -> Result<u16, NetError> {
357        log::trace!("e1000: EEPROM read addr={}", addr);
358        wr(
359            base,
360            regs::EERD,
361            eerd::START | ((addr as u32) << eerd::ADDR_SHIFT),
362        );
363        for poll in 0..EEPROM_MAX_POLLS {
364            let v = rd(base, regs::EERD);
365            if v & eerd::DONE != 0 {
366                let data = ((v >> eerd::DATA_SHIFT) & 0xFFFF) as u16;
367                log::trace!(
368                    "e1000: EEPROM addr={} ok after {} polls data={:#x}",
369                    addr,
370                    poll + 1,
371                    data
372                );
373                return Ok(data);
374            }
375            core::hint::spin_loop();
376        }
377        log::warn!(
378            "e1000: EEPROM addr={} timeout after {} EERD polls",
379            addr,
380            EEPROM_MAX_POLLS
381        );
382        Err(NetError::NotReady)
383    }
384}