Skip to main content

e1000/
lib.rs

1#![no_std]
2
3use core::ptr;
4use intel_ethernet::{ctrl, eerd, int_bits, rctl, regs, tctl, LegacyRxDesc, LegacyTxDesc};
5use net_core::NetError;
6use nic_buffers::{DmaAllocator, DmaRegion};
7use nic_queues::{RxDescriptor, RxRing, TxRing};
8
9pub const NUM_RX: usize = 128; // Optimisé pour plus de throughput
10pub const NUM_TX: usize = 128; // Optimisé pour plus de throughput
11pub const RX_BUF_SIZE: usize = 4096; // Buffer size optimisé (MTU + overhead)
12
13pub const E1000_DEVICE_IDS: &[u16] = &[0x100E, 0x100F, 0x10D3, 0x153A, 0x1539];
14pub const INTEL_VENDOR: u16 = 0x8086;
15
16/// E1000 NIC structure with DMA-safe rings
17pub struct E1000Nic {
18    mmio: u64,
19    rx: RxRing<LegacyRxDesc>,
20    rx_bufs: [DmaRegion; NUM_RX],
21    tx: TxRing<LegacyTxDesc>,
22    tx_bufs: [Option<DmaRegion>; NUM_TX],
23    mac: [u8; 6],
24    link_up: bool,
25}
26
27// SAFETY: E1000Nic owns its MMIO region and DMA buffers. It is safe to send
28// across threads as long as only one thread accesses the hardware at a time.
29unsafe impl Send for E1000Nic {}
30
31// MMIO helpers
32/// # Safety
33///
34/// The caller must ensure that `base + reg` is a valid mapped MMIO region.
35#[inline]
36unsafe fn rd(base: u64, reg: usize) -> u32 {
37    ptr::read_volatile((base + reg as u64) as *const u32)
38}
39/// # Safety
40///
41/// The caller must ensure that `base + reg` is a valid mapped MMIO region.
42#[inline]
43unsafe fn wr(base: u64, reg: usize, val: u32) {
44    ptr::write_volatile((base + reg as u64) as *mut u32, val)
45}
46
47impl E1000Nic {
48    /// Initialise the E1000 hardware.
49    ///
50    /// `mmio_base` is the virtual address of the mapped BAR0 region.
51    /// The caller must ensure the MMIO region (>=128 KiB) is identity-mapped.
52    pub fn init(mmio_base: u64, alloc: &dyn DmaAllocator) -> Result<Self, NetError> {
53        // SAFETY: We have exclusive access to the MMIO region during initialization.
54        // All DMA allocations are fresh and properly zeroed.
55        unsafe {
56            // Reset
57            let c = rd(mmio_base, regs::CTRL);
58            wr(mmio_base, regs::CTRL, c | ctrl::RST);
59            for _ in 0..10_000_000u64 {
60                core::hint::spin_loop();
61            }
62
63            // Disable interrupts during setup
64            wr(mmio_base, regs::IMC, 0xFFFF_FFFF);
65            let _ = rd(mmio_base, regs::ICR);
66
67            let mac = Self::read_mac(mmio_base);
68
69            // RX ring
70            let rx_ring_region = alloc
71                .alloc_dma(NUM_RX * core::mem::size_of::<LegacyRxDesc>())
72                .map_err(|_| NetError::NotReady)?;
73            ptr::write_bytes(rx_ring_region.virt, 0, rx_ring_region.size);
74            let rx_descs = rx_ring_region.virt as *mut LegacyRxDesc;
75
76            let mut rx_bufs = [DmaRegion::ZERO; NUM_RX];
77            for rx_buf in rx_bufs.iter_mut().take(NUM_RX) {
78                let buf = alloc
79                    .alloc_dma(RX_BUF_SIZE)
80                    .map_err(|_| NetError::NotReady)?;
81                ptr::write_bytes(buf.virt, 0, RX_BUF_SIZE);
82                *rx_buf = buf;
83            }
84
85            wr(mmio_base, regs::RDBAL, rx_ring_region.phys as u32);
86            wr(mmio_base, regs::RDBAH, (rx_ring_region.phys >> 32) as u32);
87            wr(mmio_base, regs::RDLEN, rx_ring_region.size as u32);
88            wr(mmio_base, regs::RDH, 0);
89            wr(mmio_base, regs::RDT, (NUM_RX - 1) as u32);
90
91            // Set buffer addresses in descriptors
92            for (i, buf) in rx_bufs.iter().enumerate().take(NUM_RX) {
93                (*rx_descs.add(i)).addr = buf.phys;
94            }
95
96            // TX ring
97            let tx_ring_region = alloc
98                .alloc_dma(NUM_TX * core::mem::size_of::<LegacyTxDesc>())
99                .map_err(|_| NetError::NotReady)?;
100            ptr::write_bytes(tx_ring_region.virt, 0, tx_ring_region.size);
101            let tx_descs = tx_ring_region.virt as *mut LegacyTxDesc;
102
103            wr(mmio_base, regs::TDBAL, tx_ring_region.phys as u32);
104            wr(mmio_base, regs::TDBAH, (tx_ring_region.phys >> 32) as u32);
105            wr(mmio_base, regs::TDLEN, tx_ring_region.size as u32);
106            wr(mmio_base, regs::TDH, 0);
107            wr(mmio_base, regs::TDT, 0);
108
109            // Enable TX
110            wr(
111                mmio_base,
112                regs::TCTL,
113                tctl::EN | tctl::PSP | (0x10 << tctl::CT_SHIFT) | (0x40 << tctl::COLD_SHIFT),
114            );
115
116            // Enable RX with 2048 buffer size (default, works with all MTUs)
117            wr(
118                mmio_base,
119                regs::RCTL,
120                rctl::EN | rctl::BAM | rctl::BSIZE_2048 | rctl::SECRC,
121            );
122
123            // Link up + interrupts
124            let c = rd(mmio_base, regs::CTRL);
125            wr(mmio_base, regs::CTRL, c | ctrl::SLU);
126            wr(
127                mmio_base,
128                regs::IMS,
129                int_bits::RXT0 | int_bits::LSC | int_bits::RXDMT0 | int_bits::RXO | int_bits::TXDW,
130            );
131            let status = rd(mmio_base, regs::STATUS);
132            let link_up = (status & 0x02) != 0;
133
134            Ok(Self {
135                mmio: mmio_base,
136                rx: RxRing::new(rx_descs, NUM_RX),
137                rx_bufs,
138                tx: TxRing::new(tx_descs, NUM_TX),
139                tx_bufs: [None; NUM_TX],
140                mac,
141                link_up,
142            })
143        }
144    }
145
146    /// Performs the mac address operation.
147    pub fn mac_address(&self) -> [u8; 6] {
148        self.mac
149    }
150
151    /// Performs the link up operation.
152    pub fn link_up(&self) -> bool {
153        self.link_up
154    }
155
156    /// Check and update link status
157    pub fn check_link(&mut self) -> bool {
158        // SAFETY: MMIO read is safe as long as the device is mapped.
159        unsafe {
160            let status = rd(self.mmio, regs::STATUS);
161            self.link_up = (status & 0x02) != 0;
162        }
163        self.link_up
164    }
165
166    /// Performs the receive operation.
167    pub fn receive(&mut self, buf: &mut [u8]) -> Result<usize, NetError> {
168        // Check link status first
169        if !self.check_link() {
170            return Err(NetError::LinkDown);
171        }
172
173        let (idx, pkt_len) = self.rx.poll().ok_or(NetError::NoPacket)?;
174        let len = pkt_len as usize;
175        if buf.len() < len {
176            return Err(NetError::BufferTooSmall);
177        }
178
179        // SAFETY: The DMA buffer is valid and we have exclusive access during receive.
180        unsafe {
181            ptr::copy_nonoverlapping(self.rx_bufs[idx].virt, buf.as_mut_ptr(), len);
182        }
183
184        // Recycle RX buffer
185        self.rx.desc_mut(idx).clear_status();
186        self.rx
187            .desc_mut(idx)
188            .set_buffer_addr(self.rx_bufs[idx].phys);
189        let new_tail = self.rx.advance();
190        // SAFETY: Writing to RDT is safe as the device is initialized.
191        unsafe {
192            wr(self.mmio, regs::RDT, new_tail as u32);
193        }
194
195        Ok(len)
196    }
197
198    /// Performs the transmit operation.
199    pub fn transmit(&mut self, buf: &[u8], alloc: &dyn DmaAllocator) -> Result<(), NetError> {
200        // Check link status first
201        if !self.check_link() {
202            return Err(NetError::LinkDown);
203        }
204
205        if buf.len() > net_core::MTU {
206            return Err(NetError::BufferTooSmall);
207        }
208
209        let idx = self.tx.tail();
210
211        // Check if previous TX at this slot is complete (non-blocking)
212        if self.tx.desc(idx).cmd != 0 && !self.tx.is_done(idx) {
213            return Err(NetError::TxQueueFull);
214        }
215
216        // Free previous buffer if present
217        if let Some(old) = self.tx_bufs[idx].take() {
218            // SAFETY: We own this DMA region and are freeing it after use.
219            unsafe {
220                alloc.free_dma(old);
221            }
222        }
223
224        // Allocate new DMA buffer
225        let region = alloc.alloc_dma(buf.len()).map_err(|_| NetError::NotReady)?;
226        // SAFETY: The DMA region is valid and we have exclusive access.
227        unsafe {
228            ptr::copy_nonoverlapping(buf.as_ptr(), region.virt, buf.len());
229        }
230
231        self.tx_bufs[idx] = Some(region);
232        let _submitted = self.tx.submit(region.phys, buf.len() as u16);
233        // SAFETY: Writing to TDT is safe as the device is initialized.
234        unsafe {
235            wr(self.mmio, regs::TDT, self.tx.tail() as u32);
236        }
237
238        // Non-blocking: return immediately, caller can poll is_transmit_complete()
239        // For small packets, we can optionally wait (commented out for performance)
240        /*
241        while !self.tx.is_done(submitted) {
242            core::hint::spin_loop();
243        }
244        */
245
246        Ok(())
247    }
248
249    /// Check if last transmission is complete (non-blocking)
250    pub fn is_transmit_complete(&self) -> bool {
251        let idx = self.tx.tail();
252        self.tx.is_done(idx)
253    }
254
255    /// Wait for transmission to complete (blocking)
256    pub fn wait_for_transmit(&self) {
257        while !self.is_transmit_complete() {
258            core::hint::spin_loop();
259        }
260    }
261
262    /// Handles interrupt.
263    pub fn handle_interrupt(&self) {
264        // Read and clear interrupt causes
265        // SAFETY: MMIO access is safe during interrupt handling.
266        let icr = unsafe { rd(self.mmio, regs::ICR) };
267
268        // Handle specific interrupt causes
269        if (icr & int_bits::LSC) != 0 {
270            // Link Status Change - update cached state
271            // SAFETY: MMIO read is safe.
272            let _status = unsafe { rd(self.mmio, regs::STATUS) };
273        }
274
275        if (icr & (int_bits::RXT0 | int_bits::RXDMT0 | int_bits::RXO)) != 0 {
276            // RX interrupts - packet received, will be handled by poll
277        }
278
279        if (icr & int_bits::TXDW) != 0 {
280            // TX descriptor written back - buffers can be freed
281        }
282    }
283
284    /// # Safety
285    ///
286    /// The caller must ensure that `base` is a valid mapped MMIO region.
287    unsafe fn read_mac(base: u64) -> [u8; 6] {
288        // Try RAL/RAH registers first
289        let ral = rd(base, regs::RAL0);
290        let rah = rd(base, regs::RAH0);
291
292        // Check if MAC address is valid (not all zeros)
293        if ral != 0 || rah != 0 {
294            return [
295                (ral) as u8,
296                (ral >> 8) as u8,
297                (ral >> 16) as u8,
298                (ral >> 24) as u8,
299                (rah) as u8,
300                (rah >> 8) as u8,
301            ];
302        }
303
304        // Fallback to EEPROM read
305        let mut mac = [0u8; 6];
306        for i in 0u32..3 {
307            let w = Self::eeprom_read(base, i as u8);
308            mac[(i * 2) as usize] = w as u8;
309            mac[(i * 2 + 1) as usize] = (w >> 8) as u8;
310        }
311        mac
312    }
313
314    /// # Safety
315    ///
316    /// The caller must ensure that `base` is a valid mapped MMIO region.
317    unsafe fn eeprom_read(base: u64, addr: u8) -> u16 {
318        wr(
319            base,
320            regs::EERD,
321            eerd::START | ((addr as u32) << eerd::ADDR_SHIFT),
322        );
323        loop {
324            let v = rd(base, regs::EERD);
325            if v & eerd::DONE != 0 {
326                return ((v >> eerd::DATA_SHIFT) & 0xFFFF) as u16;
327            }
328            core::hint::spin_loop();
329        }
330    }
331}