103#include <sys/cdefs.h>
106#include <sys/param.h>
107#include <sys/systm.h>
109#include <sys/endian.h>
111#include <sys/malloc.h>
112#include <sys/kernel.h>
113#include <sys/module.h>
114#include <sys/socket.h>
115#include <sys/sockio.h>
116#include <sys/queue.h>
117#include <sys/sysctl.h>
120#include <net/ethernet.h>
122#include <net/if_var.h>
123#include <net/if_arp.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126#include <net/if_types.h>
127#include <net/if_vlan_var.h>
129#include <netinet/in.h>
130#include <netinet/in_systm.h>
131#include <netinet/ip.h>
132#include <netinet/tcp.h>
133#include <netinet/udp.h>
135#include <machine/bus.h>
136#include <machine/in_cksum.h>
137#include <machine/resource.h>
140#include <dev/mii/mii.h>
141#include <dev/mii/miivar.h>
143#include <dev/pci/pcireg.h>
144#include <dev/pci/pcivar.h>
153#include "miibus_if.h"
163#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
174 "SK-9Sxx Gigabit Ethernet" },
176 "SK-9Exx Gigabit Ethernet"},
178 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
180 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
182 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
184 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
186 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
188 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
190 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
192 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
194 "Marvell Yukon 88E8035 Fast Ethernet" },
196 "Marvell Yukon 88E8036 Fast Ethernet" },
198 "Marvell Yukon 88E8038 Fast Ethernet" },
200 "Marvell Yukon 88E8039 Fast Ethernet" },
202 "Marvell Yukon 88E8040 Fast Ethernet" },
204 "Marvell Yukon 88E8040T Fast Ethernet" },
206 "Marvell Yukon 88E8042 Fast Ethernet" },
208 "Marvell Yukon 88E8048 Fast Ethernet" },
210 "Marvell Yukon 88E8050 Gigabit Ethernet" },
212 "Marvell Yukon 88E8052 Gigabit Ethernet" },
214 "Marvell Yukon 88E8053 Gigabit Ethernet" },
216 "Marvell Yukon 88E8055 Gigabit Ethernet" },
218 "Marvell Yukon 88E8056 Gigabit Ethernet" },
220 "Marvell Yukon 88E8070 Gigabit Ethernet" },
222 "Marvell Yukon 88E8058 Gigabit Ethernet" },
224 "Marvell Yukon 88E8071 Gigabit Ethernet" },
226 "Marvell Yukon 88E8072 Gigabit Ethernet" },
228 "Marvell Yukon 88E8055 Gigabit Ethernet" },
230 "Marvell Yukon 88E8075 Gigabit Ethernet" },
232 "Marvell Yukon 88E8057 Gigabit Ethernet" },
234 "Marvell Yukon 88E8059 Gigabit Ethernet" },
236 "D-Link 550SX Gigabit Ethernet" },
238 "D-Link 560SX Gigabit Ethernet" },
240 "D-Link 560T Gigabit Ethernet" }
278#ifndef __NO_STRICT_ALIGNMENT
288static int msk_ioctl(
struct ifnet *, u_long, caddr_t);
299static void msk_dmamap_cb(
void *, bus_dma_segment_t *,
int,
int);
359 DEVMETHOD(device_shutdown, bus_generic_shutdown),
382 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
387 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
392 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
397 { SYS_RES_IRQ, 1, RF_ACTIVE },
406 sc_if = device_get_softc(dev);
432 if_printf(sc_if->
msk_ifp,
"phy failed to come ready\n");
444 sc_if = device_get_softc(dev);
467 if_printf(sc_if->
msk_ifp,
"phy write timeout\n");
477 struct mii_data *mii;
481 sc_if = device_get_softc(dev);
488 if (mii == NULL || ifp == NULL ||
489 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
493 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
494 (IFM_AVALID | IFM_ACTIVE)) {
495 switch (IFM_SUBTYPE(mii->mii_media_active)) {
522 switch (IFM_SUBTYPE(mii->mii_media_active)) {
534 if ((IFM_OPTIONS(mii->mii_media_active) &
535 IFM_ETH_RXPAUSE) == 0)
537 if ((IFM_OPTIONS(mii->mii_media_active) &
538 IFM_ETH_TXPAUSE) == 0)
540 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
549 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
550 if ((IFM_OPTIONS(mii->mii_media_active) &
551 IFM_ETH_RXPAUSE) != 0)
579 uint32_t *mchash = arg;
582 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
586 mchash[crc >> 5] |= 1 << (crc & 0x1f);
605 bzero(mchash,
sizeof(mchash));
607 if ((ifp->if_flags & IFF_PROMISC) != 0)
609 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
616 if (mchash[0] != 0 || mchash[1] != 0)
623 (mchash[0] >> 16) & 0xffff);
627 (mchash[1] >> 16) & 0xffff);
637 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
657 (sc_if->
msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
659 for (i = 100; i > 0; i--) {
669 "prefetch unit stuck?\n");
682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
723 (sc_if->
msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
744 for (i = 0; i < nbuf; i++) {
752 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
791 (sc_if->
msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
812 for (i = 0; i < nbuf; i++) {
820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
857 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
904 bus_dma_segment_t segs[1];
908 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
912 m->m_len = m->m_pkthdr.len = MCLBYTES;
914 m_adj(m, ETHER_ALIGN);
915#ifndef __NO_STRICT_ALIGNMENT
922 BUS_DMA_NOWAIT) != 0) {
926 KASSERT(nsegs == 1, (
"%s: %d segments returned!", __func__, nsegs));
936 if (rxd->
rx_m != NULL) {
938 BUS_DMASYNC_POSTREAD);
946 BUS_DMASYNC_PREREAD);
962 bus_dma_segment_t segs[1];
966 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
969 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
971 m_adj(m, ETHER_ALIGN);
972#ifndef __NO_STRICT_ALIGNMENT
979 BUS_DMA_NOWAIT) != 0) {
983 KASSERT(nsegs == 1, (
"%s: %d segments returned!", __func__, nsegs));
993 if (rxd->
rx_m != NULL) {
1004 BUS_DMASYNC_PREREAD);
1021 struct mii_data *mii;
1024 sc_if = ifp->if_softc;
1028 error = mii_mediachg(mii);
1041 struct mii_data *mii;
1043 sc_if = ifp->if_softc;
1045 if ((ifp->if_flags & IFF_UP) == 0) {
1052 ifmr->ifm_active = mii->mii_media_active;
1053 ifmr->ifm_status = mii->mii_media_status;
1062 struct mii_data *mii;
1063 int error, mask, reinit;
1065 sc_if = ifp->if_softc;
1066 ifr = (
struct ifreq *)data;
1072 if (ifr->ifr_mtu >
MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
1074 else if (ifp->if_mtu != ifr->ifr_mtu) {
1075 if (ifr->ifr_mtu > ETHERMTU) {
1085 ifp->if_capenable &=
1086 ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1087 VLAN_CAPABILITIES(ifp);
1090 ifp->if_mtu = ifr->ifr_mtu;
1091 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1092 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1100 if ((ifp->if_flags & IFF_UP) != 0) {
1101 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1103 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1107 }
else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1115 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1122 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1127 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1128 if ((mask & IFCAP_TXCSUM) != 0 &&
1129 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1130 ifp->if_capenable ^= IFCAP_TXCSUM;
1131 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1134 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
1136 if ((mask & IFCAP_RXCSUM) != 0 &&
1137 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1138 ifp->if_capenable ^= IFCAP_RXCSUM;
1142 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1143 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
1144 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1145 if ((mask & IFCAP_TSO4) != 0 &&
1146 (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1147 ifp->if_capenable ^= IFCAP_TSO4;
1148 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1149 ifp->if_hwassist |= CSUM_TSO;
1151 ifp->if_hwassist &= ~CSUM_TSO;
1153 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1154 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
1155 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1156 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1157 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1158 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1159 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
1160 ifp->if_capenable &=
1161 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1164 if (ifp->if_mtu > ETHERMTU &&
1167 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1169 VLAN_CAPABILITIES(ifp);
1170 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1171 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1177 error = ether_ioctl(ifp, command, data);
1188 uint16_t vendor, devid;
1191 vendor = pci_get_vendor(dev);
1192 devid = pci_get_device(dev);
1196 device_set_desc(dev, mp->
msk_name);
1197 return (BUS_PROBE_DEFAULT);
1235 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1239 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1323 val &= ~PCI_Y2_PHY1_COMA;
1325 val &= ~PCI_Y2_PHY2_COMA;
1369 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1370 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1385 status = pci_read_config(sc->
msk_dev, PCIR_STATUS, 2);
1388 pci_write_config(sc->
msk_dev, PCIR_STATUS, status |
1389 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1390 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
1406 val = pci_read_config(sc->
msk_dev, PCIR_CACHELNSZ, 1);
1408 pci_write_config(sc->
msk_dev, PCIR_CACHELNSZ, 2, 1);
1469 for (i = 0; initram > 0 && i < sc->
msk_num_port; i++) {
1511 pcix_cmd = pci_read_config(sc->
msk_dev,
1514 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1522 if (pci_get_max_read_req(sc->
msk_dev) == 512)
1523 pci_set_max_read_req(sc->
msk_dev, 2048);
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1576 sc = device_get_softc(device_get_parent(dev));
1583 snprintf(desc,
sizeof(desc),
1584 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1587 device_set_desc_copy(dev, desc);
1589 return (BUS_PROBE_DEFAULT);
1606 sc_if = device_get_softc(dev);
1607 sc = device_get_softc(device_get_parent(dev));
1608 mmd = device_get_ivars(dev);
1634 ifp = sc_if->
msk_ifp = if_alloc(IFT_ETHER);
1636 device_printf(sc_if->
msk_if_dev,
"can not if_alloc()\n");
1640 ifp->if_softc = sc_if;
1641 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1643 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
1650 ifp->if_capabilities |= IFCAP_RXCSUM;
1653 ifp->if_capabilities |= IFCAP_RXCSUM;
1655 ifp->if_capenable = ifp->if_capabilities;
1661 IFQ_SET_READY(&ifp->if_snd);
1672 for (i = 0; i < ETHER_ADDR_LEN; i++)
1679 ether_ifattach(ifp, eaddr);
1683 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1691 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
1698 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1700 ifp->if_capenable = ifp->if_capabilities;
1706 ifp->if_capenable &= ~IFCAP_RXCSUM;
1713 ifp->if_hdrlen =
sizeof(
struct ether_vlan_header);
1723 device_printf(sc_if->
msk_if_dev,
"attaching PHYs failed\n");
1724 ether_ifdetach(ifp);
1748 int error, msic, msir, reg;
1750 sc = device_get_softc(dev);
1752 mtx_init(&sc->
msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1758 pci_enable_busmaster(dev);
1761#ifdef MSK_USEIOSPACE
1775 device_printf(dev,
"couldn't allocate %s resources\n",
1793 device_printf(dev,
"unknown device: id=0x%02x, rev=0x%02x\n",
1799 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1800 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1801 OID_AUTO,
"process_limit",
1802 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1804 "max number of Rx events to process");
1807 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1812 device_printf(dev,
"process_limit value out of range; "
1819 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
1820 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
1822 "Maximum number of time to delay interrupts");
1823 resource_int_value(device_get_name(dev), device_get_unit(dev),
1836 if (pci_find_cap(sc->
msk_dev, PCIY_EXPRESS, ®) == 0) {
1839 }
else if (pci_find_cap(sc->
msk_dev, PCIY_PCIX, ®) == 0) {
1918 msic = pci_msi_count(dev);
1920 device_printf(dev,
"MSI count : %d\n", msic);
1925 if (pci_alloc_msi(dev, &msir) == 0) {
1930 pci_release_msi(dev);
1936 device_printf(dev,
"couldn't allocate IRQ resources\n");
1956 device_printf(dev,
"failed to add child for PORT_A\n");
1960 mmd = malloc(
sizeof(
struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
1967 mmd->
mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1973 device_printf(dev,
"failed to add child for PORT_B\n");
1977 mmd = malloc(
sizeof(
struct msk_mii_data), M_DEVBUF, M_WAITOK |
1984 mmd->
mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
1988 error = bus_generic_attach(dev);
1990 device_printf(dev,
"failed to attach port(s)\n");
1995 error = bus_setup_intr(dev, sc->
msk_irq[0], INTR_TYPE_NET |
1998 device_printf(dev,
"couldn't set up interrupt handler\n");
2022 sc_if = device_get_softc(dev);
2024 (
"msk mutex not initialized in msk_detach"));
2028 if (device_is_attached(dev)) {
2036 ether_ifdetach(ifp);
2053 bus_generic_detach(dev);
2069 sc = device_get_softc(dev);
2070 KASSERT(mtx_initialized(&sc->
msk_mtx), (
"msk mutex not initialized"));
2072 if (device_is_alive(dev)) {
2083 bus_generic_detach(dev);
2106 pci_release_msi(dev);
2117 return (bus_get_dma_tag(bus));
2151 count = imin(4096, roundup2(count, 1024));
2154 error = bus_dma_tag_create(
2168 "failed to create status DMA tag\n");
2174 (
void **)&sc->
msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
2178 "failed to allocate DMA'able memory for status ring\n");
2187 "failed to load DMA'able memory for status ring\n");
2225 error = bus_dma_tag_create(
2231 BUS_SPACE_MAXSIZE_32BIT,
2233 BUS_SPACE_MAXSIZE_32BIT,
2239 "failed to create parent DMA tag\n");
2256 "failed to create Tx ring DMA tag\n");
2274 "failed to create Rx ring DMA tag\n");
2292 "failed to create Tx DMA tag\n");
2317 "failed to create Rx DMA tag\n");
2327 "failed to allocate DMA'able memory for Tx ring\n");
2337 "failed to load DMA'able memory for Tx ring\n");
2348 "failed to allocate DMA'able memory for Rx ring\n");
2358 "failed to load DMA'able memory for Rx ring\n");
2372 "failed to create Tx dmamap\n");
2380 "failed to create spare Rx dmamap\n");
2391 "failed to create Rx dmamap\n");
2411 "disabling jumbo frame support\n");
2428 "failed to create jumbo Rx ring DMA tag\n");
2453 "failed to create jumbo Rx DMA tag\n");
2460 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2464 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2475 "failed to load DMA'able memory for jumbo Rx ring\n");
2484 "failed to create spare jumbo Rx dmamap\n");
2495 "failed to create jumbo Rx dmamap\n");
2504 device_printf(sc_if->
msk_if_dev,
"disabling jumbo frame support "
2505 "due to resource shortage\n");
2631 uint32_t control, csum, prod, si;
2632 uint16_t offset, tcp_offset, tso_mtu;
2633 int error, i, nseg, tso;
2637 tcp_offset = offset = 0;
2642 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
2652 struct ether_header *eh;
2656 if (M_WRITABLE(m) == 0) {
2658 m = m_dup(*m_head, M_NOWAIT);
2667 offset =
sizeof(
struct ether_header);
2668 m = m_pullup(m, offset);
2673 eh = mtod(m,
struct ether_header *);
2675 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2676 offset =
sizeof(
struct ether_vlan_header);
2677 m = m_pullup(m, offset);
2683 m = m_pullup(m, offset +
sizeof(
struct ip));
2688 ip = (
struct ip *)(mtod(m,
char *) + offset);
2689 offset += (ip->ip_hl << 2);
2690 tcp_offset = offset;
2691 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2692 m = m_pullup(m, offset +
sizeof(
struct tcphdr));
2697 tcp = (
struct tcphdr *)(mtod(m,
char *) + offset);
2698 offset += (tcp->th_off << 2);
2701 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2717 m = m_pullup(m, offset +
sizeof(
struct tcphdr));
2722 *(uint16_t *)(m->m_data + offset +
2723 m->m_pkthdr.csum_data) = in_cksum_skip(m,
2724 m->m_pkthdr.len, offset);
2725 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2735 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2736 if (error == EFBIG) {
2745 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
2751 }
else if (error != 0)
2771 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2773 tso_mtu = m->m_pkthdr.tso_segsz;
2775 tso_mtu = offset + m->m_pkthdr.tso_segsz;
2778 tx_le->
msk_addr = htole32(tso_mtu);
2791 if ((m->m_flags & M_VLANTAG) != 0) {
2792 if (tx_le == NULL) {
2796 htons(m->m_pkthdr.ether_vtag));
2801 htons(m->m_pkthdr.ether_vtag));
2811 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2814 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
2816 csum |= (uint32_t)tcp_offset << 16;
2845 tx_le->
msk_control = htole32(txsegs[0].ds_len | control |
2848 tx_le->
msk_control = htole32(txsegs[0].ds_len | control |
2853 for (i = 1; i < nseg; i++) {
2870 tx_le->
msk_control = htole32(txsegs[i].ds_len | control |
2897 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2907 sc_if = ifp->if_softc;
2917 struct mbuf *m_head;
2920 sc_if = ifp->if_softc;
2923 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2927 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2930 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2941 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2942 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2951 ETHER_BPF_MTAP(ifp, m_head);
2977 if_printf(sc_if->
msk_ifp,
"watchdog timeout "
2979 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2980 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2985 if_printf(ifp,
"watchdog timeout\n");
2986 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2987 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2989 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2999 sc = device_get_softc(dev);
3004 IFF_DRV_RUNNING) != 0))
3020 sc = device_get_softc(dev);
3027 IFF_DRV_RUNNING) != 0))
3054 sc = device_get_softc(dev);
3075#ifndef __NO_STRICT_ALIGNMENT
3080 uint16_t *src, *dst;
3082 src = mtod(m, uint16_t *);
3085 for (i = 0; i < (m->m_len /
sizeof(uint16_t) + 1); i++)
3095 struct ether_header *eh;
3098 int32_t hlen, len, pktlen, temp32;
3099 uint16_t csum, *opts;
3103 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3105 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3108 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3110 m->m_pkthdr.csum_data = 0xffff;
3128 "Rx checksum value mismatch!\n");
3131 pktlen = m->m_pkthdr.len;
3132 if (pktlen <
sizeof(
struct ether_header) +
sizeof(
struct ip))
3134 eh = mtod(m,
struct ether_header *);
3135 if (eh->ether_type != htons(ETHERTYPE_IP))
3137 ip = (
struct ip *)(eh + 1);
3138 if (ip->ip_v != IPVERSION)
3141 hlen = ip->ip_hl << 2;
3142 pktlen -=
sizeof(
struct ether_header);
3143 if (hlen <
sizeof(
struct ip))
3145 if (ntohs(ip->ip_len) < hlen)
3147 if (ntohs(ip->ip_len) != pktlen)
3149 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
3154 if (pktlen < (hlen +
sizeof(
struct tcphdr)))
3158 if (pktlen < (hlen +
sizeof(
struct udphdr)))
3160 uh = (
struct udphdr *)((caddr_t)ip + hlen);
3161 if (uh->uh_sum == 0)
3167 csum = bswap16(sc_if->
msk_csum & 0xFFFF);
3169 len = hlen -
sizeof(
struct ip);
3171 opts = (uint16_t *)(ip + 1);
3172 for (; len > 0; len -=
sizeof(uint16_t), opts++) {
3173 temp32 = csum - *opts;
3174 temp32 = (temp32 >> 16) + (temp32 & 65535);
3175 csum = temp32 & 65535;
3178 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
3179 m->m_pkthdr.csum_data = csum;
3197 rxlen = status >> 16;
3199 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3200 rxlen -= ETHER_VLAN_ENCAP_LEN;
3208 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3217 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3229 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3234 m->m_pkthdr.rcvif = ifp;
3235 m->m_pkthdr.len = m->m_len = len;
3236#ifndef __NO_STRICT_ALIGNMENT
3240 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3241 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3245 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3246 m->m_pkthdr.ether_vtag = sc_if->
msk_vtag;
3247 m->m_flags |= M_VLANTAG;
3250 (*ifp->if_input)(ifp, m);
3273 rxlen = status >> 16;
3275 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3276 rxlen -= ETHER_VLAN_ENCAP_LEN;
3282 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3294 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3299 m->m_pkthdr.rcvif = ifp;
3300 m->m_pkthdr.len = m->m_len = len;
3301#ifndef __NO_STRICT_ALIGNMENT
3305 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3306 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3310 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
3311 m->m_pkthdr.ether_vtag = sc_if->
msk_vtag;
3312 m->m_flags |= M_VLANTAG;
3315 (*ifp->if_input)(ifp, m);
3338 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3352 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3353 if ((control &
EOP) == 0)
3357 BUS_DMASYNC_POSTWRITE);
3360 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3361 KASSERT(txd->
tx_m != NULL, (
"%s: freeing NULL mbuf!",
3378 struct epoch_tracker et;
3380 struct mii_data *mii;
3391 NET_EPOCH_ENTER(et);
3408 "PHY FIFO underrun/overflow.\n");
3428 device_printf(sc_if->
msk_if_dev,
"Tx FIFO underrun!\n");
3450 "RAM buffer read parity error\n");
3457 "RAM buffer write parity error\n");
3463 device_printf(sc_if->
msk_if_dev,
"Tx MAC parity error\n");
3469 device_printf(sc_if->
msk_if_dev,
"Rx parity error\n");
3474 device_printf(sc_if->
msk_if_dev,
"TCP segmentation error\n");
3484 uint32_t tlphead[4];
3499 "PCI Express protocol violation error\n");
3507 "unexpected IRQ Status error\n");
3510 "unexpected IRQ Master error\n");
3512 v16 = pci_read_config(sc->
msk_dev, PCIR_STATUS, 2);
3514 pci_write_config(sc->
msk_dev, PCIR_STATUS, v16 |
3515 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3516 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
3536 "Uncorrectable PCI Express error\n");
3542 for (i = 0; i < 4; i++)
3546 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3575 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3580 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3591 uint32_t control, status;
3592 int cons, len, port, rxprog;
3599 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3609 control &= ~HW_OWNER;
3613 port = (control >> 16) & 0x01;
3614 sc_if = sc->
msk_if[port];
3615 if (sc_if == NULL) {
3616 device_printf(sc->
msk_dev,
"invalid port opcode "
3632 if (!(sc_if->
msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
3664 device_printf(sc->
msk_dev,
"unhandled opcode 0x%08x\n",
3675 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3690 struct ifnet *ifp0, *ifp1;
3699 if (status == 0 || status == 0xffffffff ||
3724 device_printf(sc->
msk_dev,
"Rx descriptor error\n");
3730 device_printf(sc->
msk_dev,
"Tx descriptor error\n");
3745 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3746 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3748 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
3749 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3769 if (ifp->if_mtu > ETHERMTU) {
3799 struct mii_data *mii;
3811 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3818 if (ifp->if_mtu < ETHERMTU)
3822 sc_if->
msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3823 if (ifp->if_mtu > ETHERMTU &&
3826 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
3868 if (ifp->if_mtu > ETHERMTU)
3873 eaddr = IF_LLADDR(ifp);
3875 eaddr[0] | (eaddr[1] << 8));
3877 eaddr[2] | (eaddr[3] << 8));
3879 eaddr[4] | (eaddr[5] << 8));
3881 eaddr[0] | (eaddr[1] << 8));
3883 eaddr[2] | (eaddr[3] << 8));
3885 eaddr[4] | (eaddr[5] << 8));
4007 (ifp->if_capenable & IFCAP_RXCSUM) != 0)
4025 "initialization failed: no memory for Rx buffers\n");
4060 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4061 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4197 device_printf(sc_if->
msk_if_dev,
"Tx BMU stop failed\n");
4245 device_printf(sc_if->
msk_if_dev,
"Rx BMU stop failed\n");
4259 if (rxd->
rx_m != NULL) {
4270 if (jrxd->
rx_m != NULL) {
4275 m_freem(jrxd->
rx_m);
4281 if (txd->
tx_m != NULL) {
4294 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4303#define MSK_READ_MIB32(x, y) \
4304 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
4305 (uint32_t)GMAC_READ_2(sc, x, y)
4306#define MSK_READ_MIB64(x, y) \
4307 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
4308 (uint64_t)MSK_READ_MIB32(x, y)
4328 gmac &= ~GM_PAR_MIB_CLR;
4344 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
4434 gmac &= ~GM_PAR_MIB_CLR;
4443 uint32_t result, *stat;
4449 stat = (uint32_t *)((uint8_t *)&sc_if->
msk_stats + off);
4456 return (sysctl_handle_int(oidp, &result, 0, req));
4464 uint64_t result, *stat;
4470 stat = (uint64_t *)((uint8_t *)&sc_if->
msk_stats + off);
4477 return (sysctl_handle_64(oidp, &result, 0, req));
4480#undef MSK_READ_MIB32
4481#undef MSK_READ_MIB64
4483#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
4484 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, \
4485 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, \
4486 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
4488#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
4489 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, \
4490 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, \
4491 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
4497 struct sysctl_ctx_list *ctx;
4498 struct sysctl_oid_list *child, *schild;
4499 struct sysctl_oid *tree;
4501 ctx = device_get_sysctl_ctx(sc_if->
msk_if_dev);
4502 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->
msk_if_dev));
4504 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
"stats",
4505 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"MSK Statistics");
4506 schild = SYSCTL_CHILDREN(tree);
4507 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO,
"rx",
4508 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"MSK RX Statistics");
4509 child = SYSCTL_CHILDREN(tree);
4511 child, rx_ucast_frames,
"Good unicast frames");
4513 child, rx_bcast_frames,
"Good broadcast frames");
4515 child, rx_pause_frames,
"Pause frames");
4517 child, rx_mcast_frames,
"Multicast frames");
4519 child, rx_crc_errs,
"CRC errors");
4521 child, rx_good_octets,
"Good octets");
4523 child, rx_bad_octets,
"Bad octets");
4525 child, rx_pkts_64,
"64 bytes frames");
4527 child, rx_pkts_65_127,
"65 to 127 bytes frames");
4529 child, rx_pkts_128_255,
"128 to 255 bytes frames");
4531 child, rx_pkts_256_511,
"256 to 511 bytes frames");
4533 child, rx_pkts_512_1023,
"512 to 1023 bytes frames");
4535 child, rx_pkts_1024_1518,
"1024 to 1518 bytes frames");
4537 child, rx_pkts_1519_max,
"1519 to max frames");
4539 child, rx_pkts_too_long,
"frames too long");
4541 child, rx_pkts_jabbers,
"Jabber errors");
4543 child, rx_fifo_oflows,
"FIFO overflows");
4545 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO,
"tx",
4546 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"MSK TX Statistics");
4547 child = SYSCTL_CHILDREN(tree);
4549 child, tx_ucast_frames,
"Unicast frames");
4551 child, tx_bcast_frames,
"Broadcast frames");
4553 child, tx_pause_frames,
"Pause frames");
4555 child, tx_mcast_frames,
"Multicast frames");
4557 child, tx_octets,
"Octets");
4559 child, tx_pkts_64,
"64 bytes frames");
4561 child, tx_pkts_65_127,
"65 to 127 bytes frames");
4563 child, tx_pkts_128_255,
"128 to 255 bytes frames");
4565 child, tx_pkts_256_511,
"256 to 511 bytes frames");
4567 child, tx_pkts_512_1023,
"512 to 1023 bytes frames");
4569 child, tx_pkts_1024_1518,
"1024 to 1518 bytes frames");
4571 child, tx_pkts_1519_max,
"1519 to max frames");
4573 child, tx_colls,
"Collisions");
4575 child, tx_late_colls,
"Late collisions");
4577 child, tx_excess_colls,
"Excessive collisions");
4579 child, tx_multi_colls,
"Multiple collisions");
4581 child, tx_single_colls,
"Single collisions");
4583 child, tx_underflows,
"FIFO underflows");
4586#undef MSK_SYSCTL_STAT32
4587#undef MSK_SYSCTL_STAT64
4596 value = *(
int *)arg1;
4597 error = sysctl_handle_int(oidp, &value, 0, req);
4598 if (error || !req->newptr)
4600 if (value < low || value > high)
4602 *(
int *)arg1 = value;
static u_int msk_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)
static driver_t msk_driver
static driver_t mskc_driver
static int msk_jumbo_newbuf(struct msk_if_softc *, int)
static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int)
static devclass_t mskc_devclass
static void msk_set_tx_stfwd(struct msk_if_softc *)
static struct resource_spec msk_irq_spec_msi[]
static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t)
static __inline void msk_discard_rxbuf(struct msk_if_softc *, int)
static int msk_miibus_readreg(device_t, int, int)
static void msk_watchdog(struct msk_if_softc *)
static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
static void msk_txeof(struct msk_if_softc *, int)
static void msk_miibus_statchg(device_t)
static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int)
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int)
static int msk_phy_writereg(struct msk_if_softc *, int, int, int)
static void msk_intr_gmac(struct msk_if_softc *)
static void msk_init(void *)
static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *)
static int msk_ioctl(struct ifnet *, u_long, caddr_t)
#define MSK_READ_MIB32(x, y)
static void msk_phy_power(struct msk_softc *, int)
static int mskc_suspend(device_t)
static int msk_miibus_writereg(device_t, int, int, int)
static int mskc_detach(device_t)
static __inline void msk_rxput(struct msk_if_softc *)
static void mskc_reset(struct msk_softc *)
static int msk_newbuf(struct msk_if_softc *, int)
static void msk_txrx_dma_free(struct msk_if_softc *)
static int msk_probe(device_t)
static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
static void msk_tick(void *)
static void msk_rx_dma_jfree(struct msk_if_softc *)
static int msk_phy_readreg(struct msk_if_softc *, int, int)
static void msk_intr_hwerr(struct msk_softc *)
static int msk_mediachange(struct ifnet *)
static void msk_stats_update(struct msk_if_softc *)
static int msk_txrx_dma_alloc(struct msk_if_softc *)
static void msk_mediastatus(struct ifnet *, struct ifmediareq *)
static __inline void msk_fixup_rx(struct mbuf *)
static int mskc_probe(device_t)
static int mskc_shutdown(device_t)
static int mskc_attach(device_t)
static void msk_stats_clear(struct msk_if_softc *)
static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t)
static void msk_sysctl_node(struct msk_if_softc *)
#define MSK_READ_MIB64(x, y)
static void msk_init_tx_ring(struct msk_if_softc *)
static int msk_handle_events(struct msk_softc *)
static device_method_t mskc_methods[]
static void msk_rxfilter(struct msk_if_softc *)
static int msk_init_jumbo_rx_ring(struct msk_if_softc *)
static int mskc_resume(device_t)
static void msk_stop(struct msk_if_softc *)
static void msk_init_locked(struct msk_if_softc *)
static int msk_attach(device_t)
static device_method_t msk_methods[]
static void msk_start_locked(struct ifnet *)
static struct resource_spec msk_res_spec_io[]
static void msk_handle_hwerr(struct msk_if_softc *, uint32_t)
DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL)
static void msk_set_rambuffer(struct msk_if_softc *)
static devclass_t msk_devclass
static void msk_status_dma_free(struct msk_softc *)
static void msk_start(struct ifnet *)
static void msk_intr(void *)
#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)
static const char * model_name[]
static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int)
static int msk_detach(device_t)
static int mskc_setup_rambuffer(struct msk_softc *)
static int msk_init_rx_ring(struct msk_if_softc *)
static void msk_intr_phy(struct msk_if_softc *)
static int msk_rx_dma_jalloc(struct msk_if_softc *)
static int msk_status_dma_alloc(struct msk_softc *)
static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
static struct resource_spec msk_irq_spec_legacy[]
static void msk_setvlan(struct msk_if_softc *, struct ifnet *)
#define MSK_CSUM_FEATURES
TUNABLE_INT("hw.msk.msi_disable", &msi_disable)
static int msk_rx_fill(struct msk_if_softc *, int)
static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int)
static const struct msk_product msk_products[]
MODULE_DEPEND(msk, pci, 1, 1, 1)
static struct resource_spec msk_res_spec_mem[]
static int msk_encap(struct msk_if_softc *, struct mbuf **)
#define MSK_TSO_MAXSGSIZE
#define F_TX_CHK_AUTO_OFF
#define DEVICEID_DLINK_DGE560T
#define CSR_WRITE_2(sc, reg, val)
#define MSK_FLAG_NOHWVLAN
#define GM_GPCR_SPEED_100
#define GM_SMI_CT_PHY_AD(x)
#define DEVICEID_MRVL_8040T
#define GM_GPCR_FC_TX_DIS
#define Y2_CLK_GAT_LNK1_DIS
#define PCI_ASPM_CLKRUN_REQUEST
#define DEVICEID_MRVL_8040
#define MSK_IF_UNLOCK(_sc)
#define STAT_LIST_ADDR_HI
#define GM_GPCR_SPEED_1000
#define RB_ADDR(Queue, Offs)
#define MSK_IF_LOCK_ASSERT(_sc)
#define TX_IPG_JAM_DATA(x)
#define TX_JAM_IPG_VAL(x)
#define PREF_UNIT_GET_IDX_REG
#define PREF_UNIT_PUT_IDX_REG
#define DEVICEID_MRVL_4370
#define CSR_READ_1(sc, reg)
#define MSK_FLAG_JUMBO_NOCSUM
#define CHIP_REV_YU_XL_A1
#define PREF_UNIT_ADDR_HI_REG
#define MSK_RESERVED_TX_DESC_CNT
#define MSK_USECS(sc, us)
#define CHIP_REV_YU_SU_B0
#define DEVICEID_MRVL_8062X
#define PCI_CTL_TIM_VMAIN_AV_MSK
#define Y2_PCI_CLK_LNK2_DIS
#define MSK_JUMBO_RX_RING_SZ
#define DEVICEID_MRVL_4363
#define CSR_PCI_READ_4(sc, reg)
#define DEVICEID_SK_YUKON2
#define PREF_UNIT_ADDR_LOW_REG
#define CHIP_ID_YUKON_FE_P
#define MSK_FLAG_FASTETHER
#define MSK_FLAG_AUTOTX_CSUM
#define DEVICEID_MRVL_436C
#define CSR_WRITE_1(sc, reg, val)
#define MSK_JUMBO_RX_RING_CNT
#define Y2_STATUS_LNK2_INAC
#define CHIP_REV_YU_EC_U_A1
#define RX_GMF_FL_THR_DEF
#define TX_JAM_LEN_VAL(x)
#define DEVICEID_MRVL_4380
#define CHIP_REV_YU_EC_U_A0
#define PHY_M_IS_FIFO_ERROR
#define Y2_PCI_CLK_LNK1_DIS
#define PCI_ASPM_GPHY_LINK_DOWN
#define STAT_LIST_ADDR_LO
#define MSK_INT_HOLDOFF_DEFAULT
#define DEVICEID_MRVL_436B
#define DEVICEID_MRVL_8021X
#define DEVICEID_MRVL_4360
#define CHIP_REV_YU_FE_P_A0
#define DEVICEID_MRVL_8039
#define PCI_FORCE_ASPM_REQUEST
#define Y2_CLK_GAT_LNK2_DIS
#define DEVICEID_MRVL_8035
#define CHIP_REV_YU_XL_A0
#define GMC_BYP_MACSECRX_ON
#define DEVICEID_DLINK_DGE560SX
#define DEVICEID_MRVL_4365
#define CSR_WRITE_4(sc, reg, val)
#define GM_GPCR_AU_ALL_DIS
#define CSS_TCPUDP_CSUM_OK
#define PCI_CLK_MACSEC_DIS
#define DEVICEID_MRVL_8022X
#define DEVICEID_MRVL_8021CU
#define PCI_ASPM_INT_FIFO_EMPTY
#define GM_SMOD_JUMBO_ENA
#define DEVICEID_MRVL_436A
#define MR_ADDR(Mac, Offs)
#define DEVICEID_MRVL_4361
#define CSR_READ_4(sc, reg)
#define DEVICEID_MRVL_8062CU
#define GM_SMI_CT_REG_AD(x)
#define STAT_ISR_TIMER_INI
#define DEVICEID_MRVL_436D
#define Y2_ASF_HCU_CCSR_CPU_RST_MODE
#define CSR_PCI_WRITE_4(sc, reg, val)
#define Y2_PREF_Q_ADDR(Queue, Offs)
#define STAT_ISR_TIMER_CTRL
#define DEVICEID_DLINK_DGE550SX
#define DEVICEID_MRVL_8048
#define MSK_PHY_POWERDOWN
#define DEVICEID_MRVL_8061X
#define BMU_DIS_RX_CHKSUM
#define DEVICEID_MRVL_8061CU
#define Y2_COR_CLK_LNK1_DIS
#define PHY_MARV_INT_MASK
#define CHIP_ID_YUKON_EC_U
#define CHIP_ID_YUKON_UL_2
#define CHIP_REV_YU_EX_B0
#define PREF_UNIT_RST_CLR
#define Q_ADDR(Queue, Offs)
#define Y2_COR_CLK_LNK2_DIS
#define CHIP_REV_YU_EC_A1
#define CSR_READ_2(sc, reg)
#define GMAC_READ_2(sc, port, reg)
#define TST_CFG_WRITE_OFF
#define DEVICEID_MRVL_8042
#define Y2_ASF_HCU_CCSR_AHB_RST
#define BMU_ENA_RX_CHKSUM
#define CHIP_REV_YU_EX_A0
#define GLB_GPIO_STAT_RACE_DIS
#define B28_Y2_ASF_STAT_CMD
#define DEVICEID_SK_YUKON2_EXPR
#define DEVICEID_MRVL_4362
#define DEVICEID_MRVL_4381
#define TX_BACK_OFF_LIM(x)
#define PHY_MARV_INT_STAT
#define BMU_DIS_RX_RSS_HASH
#define DEVICEID_MRVL_4364
#define GMC_BYP_MACSECTX_ON
#define CHIP_ID_YUKON_UNKNOWN
#define DEVICEID_MRVL_8022CU
#define STAT_LEV_TIMER_CTRL
#define DEVICEID_MRVL_8038
#define B28_Y2_ASF_HCU_CCSR
#define GMF_RX_MACSEC_FLUSH_OFF
#define DATA_BLIND_VAL(x)
#define STAT_TX_TIMER_INI
#define PREF_UNIT_LAST_IDX_REG
#define MSK_FLAG_NORX_CSUM
#define DEVICEID_MRVL_8036
#define RX_VLAN_STRIP_OFF
#define CHIP_ID_YUKON_OPT
#define GMAC_WRITE_2(sc, port, reg, val)
#define PREF_UNIT_CTRL_REG
#define CHIP_ID_YUKON_SUPR
#define STAT_TX_TIMER_CTRL
#define GM_GPCR_FC_RX_DIS
#define PREF_UNIT_RST_SET
#define SELECT_RAM_BUFFER(rb, addr)
struct msk_txdesc msk_txdesc[MSK_TX_RING_CNT]
bus_dma_tag_t msk_jumbo_rx_tag
struct msk_rxdesc msk_jumbo_rxdesc[MSK_JUMBO_RX_RING_CNT]
bus_dmamap_t msk_tx_ring_map
bus_dmamap_t msk_jumbo_rx_sparemap
bus_dma_tag_t msk_parent_tag
bus_dma_tag_t msk_jumbo_rx_ring_tag
bus_dmamap_t msk_rx_sparemap
bus_dma_tag_t msk_rx_ring_tag
uint32_t msk_tx_high_addr
bus_dma_tag_t msk_tx_ring_tag
bus_dmamap_t msk_rx_ring_map
struct msk_rxdesc msk_rxdesc[MSK_RX_RING_CNT]
bus_dmamap_t msk_jumbo_rx_ring_map
uint32_t rx_pkts_1024_1518
uint32_t tx_pkts_512_1023
uint32_t tx_pkts_1024_1518
uint32_t tx_pkts_1519_max
uint32_t rx_pkts_too_long
uint32_t rx_pkts_512_1023
uint32_t rx_pkts_1519_max
struct msk_ring_data msk_rdata
struct msk_hw_stats msk_stats
struct callout msk_tick_ch
struct msk_chain_data msk_cdata
struct msk_softc * msk_softc
bus_addr_t msk_jumbo_rx_ring_paddr
bus_addr_t msk_tx_ring_paddr
bus_addr_t msk_rx_ring_paddr
struct msk_rx_desc * msk_jumbo_rx_ring
struct msk_tx_desc * msk_tx_ring
struct msk_rx_desc * msk_rx_ring
struct msk_rx_desc * rx_le
struct resource_spec * msk_res_spec
struct msk_if_softc * msk_if[2]
struct resource * msk_res[1]
struct resource * msk_irq[1]
bus_addr_t msk_stat_ring_paddr
bus_dmamap_t msk_stat_map
bus_dma_tag_t msk_stat_tag
struct msk_stat_desc * msk_stat_ring
struct resource_spec * msk_irq_spec
struct msk_tx_desc * tx_le