Skip to main content

strat9_kernel/hardware/nic/
igc_drv.rs

1use super::register_device;
2use crate::{
3    hardware::pci_client::{self as pci, Bar},
4    memory::{self},
5    sync::SpinLock,
6};
7use alloc::sync::Arc;
8use e1000::E1000Nic;
9use net_core::{NetError, NetworkDevice};
10use nic_buffers::{DmaAllocator, DmaRegion};
11use x86_64::VirtAddr;
12
13const IGC_IDS: &[u16] = &[
14    pci::intel_eth::I225_LM,
15    pci::intel_eth::I225_V,
16    pci::intel_eth::I226_LM,
17    pci::intel_eth::I226_V,
18];
19
20struct KernelDma;
21
22impl DmaAllocator for KernelDma {
23    /// Allocates dma.
24    fn alloc_dma(&self, size: usize) -> Result<DmaRegion, nic_buffers::DmaAllocError> {
25        let pages = (size + 4095) / 4096;
26        let order = pages.next_power_of_two().trailing_zeros() as u8;
27        let frame = crate::sync::with_irqs_disabled(|token| {
28            crate::memory::allocate_frames(token, order)
29        })
30        .map_err(|_| nic_buffers::DmaAllocError)?;
31        let phys = frame.start_address.as_u64();
32        let virt = memory::phys_to_virt(phys) as *mut u8;
33        Ok(DmaRegion {
34            phys,
35            virt,
36            size: pages * 4096,
37        })
38    }
39
40    /// Releases dma.
41    unsafe fn free_dma(&self, region: DmaRegion) {
42        let pages = (region.size + 4095) / 4096;
43        let order = pages.next_power_of_two().trailing_zeros() as u8;
44        let frame =
45            crate::memory::PhysFrame::containing_address(x86_64::PhysAddr::new(region.phys));
46        crate::sync::with_irqs_disabled(|token| {
47            crate::memory::free_frames(token, frame, order);
48        });
49    }
50}
51
52pub struct KernelIgc {
53    inner: SpinLock<E1000Nic>,
54    mac: [u8; 6],
55}
56
57impl NetworkDevice for KernelIgc {
58    /// Performs the name operation.
59    fn name(&self) -> &str {
60        "igc"
61    }
62    /// Performs the mac address operation.
63    fn mac_address(&self) -> [u8; 6] {
64        self.mac
65    }
66    /// Performs the link up operation.
67    fn link_up(&self) -> bool {
68        self.inner.lock().link_up()
69    }
70    /// Performs the receive operation.
71    fn receive(&self, buf: &mut [u8]) -> Result<usize, NetError> {
72        self.inner.lock().receive(buf)
73    }
74    /// Performs the transmit operation.
75    fn transmit(&self, buf: &[u8]) -> Result<(), NetError> {
76        self.inner.lock().transmit(buf, &KernelDma)
77    }
78    /// Handles interrupt.
79    fn handle_interrupt(&self) {
80        self.inner.lock().handle_interrupt();
81    }
82}
83
84/// Performs the init operation.
85pub fn init() {
86    if !memory::paging::is_initialized() {
87        log::warn!("IGC: paging not initialized, deferring probe");
88        return;
89    }
90
91    let candidates = pci::probe_all(pci::ProbeCriteria {
92        vendor_id: Some(pci::vendor::INTEL),
93        device_id: None,
94        class_code: Some(pci::class::NETWORK),
95        subclass: None,
96        prog_if: None,
97    });
98
99    for pci_dev in candidates.into_iter() {
100        if !IGC_IDS.contains(&pci_dev.device_id) {
101            continue;
102        }
103
104        log::info!(
105            "IGC: PCI {:04x}:{:04x} at {:?}",
106            pci_dev.vendor_id,
107            pci_dev.device_id,
108            pci_dev.address
109        );
110
111        pci_dev.enable_bus_master();
112        pci_dev.enable_memory_space();
113        let mut cmd = pci_dev.read_config_u16(pci::config::COMMAND);
114        cmd &= !pci::command::INTERRUPT_DISABLE;
115        pci_dev.write_config_u16(pci::config::COMMAND, cmd);
116
117        let mmio_phys = match pci_dev.read_bar(0).or_else(|| pci_dev.read_bar(1)) {
118            Some(Bar::Memory32 { addr, .. }) => addr as u64,
119            Some(Bar::Memory64 { addr, .. }) => addr,
120            _ => {
121                log::error!("IGC: no MMIO BAR (BAR0/BAR1)");
122                continue;
123            }
124        };
125
126        memory::paging::ensure_identity_map_range(mmio_phys, 0x2_0000);
127        let mmio_virt = memory::phys_to_virt(mmio_phys);
128        let mmio_page_phys = mmio_phys & !0xFFF;
129        let mmio_page_virt = mmio_virt & !0xFFF;
130        let mapped = memory::paging::translate(VirtAddr::new(mmio_page_virt))
131            .map(|p| p.as_u64())
132            .unwrap_or(0);
133        if mapped != mmio_page_phys {
134            log::error!(
135                "IGC: MMIO map mismatch phys={:#x} virt={:#x} mapped={:#x}",
136                mmio_phys,
137                mmio_virt,
138                mapped
139            );
140            continue;
141        }
142
143        // Linux-like step: probe attempted with retries; if core init fails, keep going.
144        let mut init_ok = None;
145        for _ in 0..3 {
146            if let Ok(nic) = E1000Nic::init(mmio_virt, &KernelDma) {
147                init_ok = Some(nic);
148                break;
149            }
150            let mut cmd_retry = pci_dev.read_config_u16(pci::config::COMMAND);
151            cmd_retry |= pci::command::BUS_MASTER | pci::command::MEMORY_SPACE;
152            cmd_retry &= !pci::command::INTERRUPT_DISABLE;
153            pci_dev.write_config_u16(pci::config::COMMAND, cmd_retry);
154            core::hint::spin_loop();
155        }
156
157        match init_ok {
158            Some(nic) => {
159                let mac = nic.mac_address();
160                let dev = Arc::new(KernelIgc {
161                    mac,
162                    inner: SpinLock::new(nic),
163                });
164                register_device(dev);
165                return;
166            }
167            None => {
168                log::warn!("IGC: core init failed (likely requires dedicated igc register path)");
169            }
170        }
171    }
172}