FreeBSD kernel IXGBE device code
if_ixv.c
Go to the documentation of this file.
1/******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#include "opt_rss.h"
39
40#include "ixgbe.h"
41#include "ifdi_if.h"
42
43#include <net/netmap.h>
44#include <dev/netmap/netmap_kern.h>
45
46/************************************************************************
47 * Driver version
48 ************************************************************************/
49char ixv_driver_version[] = "2.0.1-k";
50
51/************************************************************************
52 * PCI Device ID Table
53 *
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixv_strings
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60static pci_vendor_info_t ixv_vendor_info_array[] =
61{
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
67 /* required last entry */
68PVID_END
69};
70
71/************************************************************************
72 * Function prototypes
73 ************************************************************************/
74static void *ixv_register(device_t);
75static int ixv_if_attach_pre(if_ctx_t);
76static int ixv_if_attach_post(if_ctx_t);
77static int ixv_if_detach(if_ctx_t);
78
79static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
80static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
82static void ixv_if_queues_free(if_ctx_t);
83static void ixv_identify_hardware(if_ctx_t);
84static void ixv_init_device_features(struct ixgbe_softc *);
85static int ixv_allocate_pci_resources(if_ctx_t);
86static void ixv_free_pci_resources(if_ctx_t);
87static int ixv_setup_interface(if_ctx_t);
88static void ixv_if_media_status(if_ctx_t, struct ifmediareq *);
89static int ixv_if_media_change(if_ctx_t);
90static void ixv_if_update_admin_status(if_ctx_t);
91static int ixv_if_msix_intr_assign(if_ctx_t, int);
92
93static int ixv_if_mtu_set(if_ctx_t, uint32_t);
94static void ixv_if_init(if_ctx_t);
95static void ixv_if_local_timer(if_ctx_t, uint16_t);
96static void ixv_if_stop(if_ctx_t);
97static int ixv_negotiate_api(struct ixgbe_softc *);
98
99static void ixv_initialize_transmit_units(if_ctx_t);
100static void ixv_initialize_receive_units(if_ctx_t);
101static void ixv_initialize_rss_mapping(struct ixgbe_softc *);
102
103static void ixv_setup_vlan_support(if_ctx_t);
104static void ixv_configure_ivars(struct ixgbe_softc *);
105static void ixv_if_enable_intr(if_ctx_t);
106static void ixv_if_disable_intr(if_ctx_t);
107static void ixv_if_multi_set(if_ctx_t);
108
109static void ixv_if_register_vlan(if_ctx_t, u16);
110static void ixv_if_unregister_vlan(if_ctx_t, u16);
111
112static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114
115static void ixv_save_stats(struct ixgbe_softc *);
116static void ixv_init_stats(struct ixgbe_softc *);
117static void ixv_update_stats(struct ixgbe_softc *);
118static void ixv_add_stats_sysctls(struct ixgbe_softc *);
119
120static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
122
123static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124
125/* The MSI-X Interrupt handlers */
126static int ixv_msix_que(void *);
127static int ixv_msix_mbx(void *);
128
129/************************************************************************
130 * FreeBSD Device Interface Entry Points
131 ************************************************************************/
132static device_method_t ixv_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_register, ixv_register),
135 DEVMETHOD(device_probe, iflib_device_probe),
136 DEVMETHOD(device_attach, iflib_device_attach),
137 DEVMETHOD(device_detach, iflib_device_detach),
138 DEVMETHOD(device_shutdown, iflib_device_shutdown),
140};
141
142static driver_t ixv_driver = {
143 "ixv", ixv_methods, sizeof(struct ixgbe_softc),
144};
145
146devclass_t ixv_devclass;
149MODULE_DEPEND(ixv, iflib, 1, 1, 1);
150MODULE_DEPEND(ixv, pci, 1, 1, 1);
151MODULE_DEPEND(ixv, ether, 1, 1, 1);
152
153static device_method_t ixv_if_methods[] = {
154 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 DEVMETHOD(ifdi_detach, ixv_if_detach),
157 DEVMETHOD(ifdi_init, ixv_if_init),
158 DEVMETHOD(ifdi_stop, ixv_if_stop),
159 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
178};
179
180static driver_t ixv_if_driver = {
181 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
182};
183
184/*
185 * TUNEABLE PARAMETERS:
186 */
187
188/* Flow control setting, default to full */
190TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191
192/*
193 * Header split: this causes the hardware to DMA
194 * the header into a separate mbuf from the payload,
195 * it can be a performance win in some workloads, but
196 * in others it actually hurts, its off by default.
197 */
198static int ixv_header_split = false;
199TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200
201/*
202 * Shadow VFTA table, this is needed because
203 * the real filter table gets cleared during
204 * a soft reset and we need to repopulate it.
205 */
207extern struct if_txrx ixgbe_txrx;
208
209static struct if_shared_ctx ixv_sctx_init = {
210 .isc_magic = IFLIB_MAGIC,
211 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213 .isc_tx_maxsegsize = PAGE_SIZE,
214 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215 .isc_tso_maxsegsize = PAGE_SIZE,
216 .isc_rx_maxsize = MJUM16BYTES,
217 .isc_rx_nsegments = 1,
218 .isc_rx_maxsegsize = MJUM16BYTES,
219 .isc_nfl = 1,
220 .isc_ntxqs = 1,
221 .isc_nrxqs = 1,
222 .isc_admin_intrcnt = 1,
223 .isc_vendor_info = ixv_vendor_info_array,
224 .isc_driver_version = ixv_driver_version,
225 .isc_driver = &ixv_if_driver,
226 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
227
228 .isc_nrxd_min = {MIN_RXD},
229 .isc_ntxd_min = {MIN_TXD},
230 .isc_nrxd_max = {MAX_RXD},
231 .isc_ntxd_max = {MAX_TXD},
232 .isc_nrxd_default = {DEFAULT_RXD},
233 .isc_ntxd_default = {DEFAULT_TXD},
234};
235
236static void *
237ixv_register(device_t dev)
238{
239 return (&ixv_sctx_init);
240}
241
242/************************************************************************
243 * ixv_if_tx_queues_alloc
244 ************************************************************************/
245static int
246ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247 int ntxqs, int ntxqsets)
248{
249 struct ixgbe_softc *sc = iflib_get_softc(ctx);
250 if_softc_ctx_t scctx = sc->shared;
251 struct ix_tx_queue *que;
252 int i, j, error;
253
254 MPASS(sc->num_tx_queues == ntxqsets);
255 MPASS(ntxqs == 1);
256
257 /* Allocate queue structure memory */
258 sc->tx_queues =
259 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260 M_DEVBUF, M_NOWAIT | M_ZERO);
261 if (!sc->tx_queues) {
262 device_printf(iflib_get_dev(ctx),
263 "Unable to allocate TX ring memory\n");
264 return (ENOMEM);
265 }
266
267 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
268 struct tx_ring *txr = &que->txr;
269
270 txr->me = i;
271 txr->sc = que->sc = sc;
272
273 /* Allocate report status array */
274 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275 error = ENOMEM;
276 goto fail;
277 }
278 for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 txr->tx_rsq[j] = QIDX_INVALID;
280 /* get the virtual and physical address of the hardware queues */
281 txr->tail = IXGBE_VFTDT(txr->me);
282 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 txr->tx_paddr = paddrs[i*ntxqs];
284
285 txr->bytes = 0;
286 txr->total_packets = 0;
287
288 }
289
290 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 sc->num_tx_queues);
292
293 return (0);
294
295 fail:
297
298 return (error);
299} /* ixv_if_tx_queues_alloc */
300
301/************************************************************************
302 * ixv_if_rx_queues_alloc
303 ************************************************************************/
304static int
305ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306 int nrxqs, int nrxqsets)
307{
308 struct ixgbe_softc *sc = iflib_get_softc(ctx);
309 struct ix_rx_queue *que;
310 int i, error;
311
312 MPASS(sc->num_rx_queues == nrxqsets);
313 MPASS(nrxqs == 1);
314
315 /* Allocate queue structure memory */
316 sc->rx_queues =
317 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318 M_DEVBUF, M_NOWAIT | M_ZERO);
319 if (!sc->rx_queues) {
320 device_printf(iflib_get_dev(ctx),
321 "Unable to allocate TX ring memory\n");
322 error = ENOMEM;
323 goto fail;
324 }
325
326 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
327 struct rx_ring *rxr = &que->rxr;
328 rxr->me = i;
329 rxr->sc = que->sc = sc;
330
331
332 /* get the virtual and physical address of the hw queues */
333 rxr->tail = IXGBE_VFRDT(rxr->me);
334 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 rxr->rx_paddr = paddrs[i*nrxqs];
336 rxr->bytes = 0;
337 rxr->que = que;
338 }
339
340 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 sc->num_rx_queues);
342
343 return (0);
344
345fail:
347
348 return (error);
349} /* ixv_if_rx_queues_alloc */
350
351/************************************************************************
352 * ixv_if_queues_free
353 ************************************************************************/
354static void
356{
357 struct ixgbe_softc *sc = iflib_get_softc(ctx);
358 struct ix_tx_queue *que = sc->tx_queues;
359 int i;
360
361 if (que == NULL)
362 goto free;
363
364 for (i = 0; i < sc->num_tx_queues; i++, que++) {
365 struct tx_ring *txr = &que->txr;
366 if (txr->tx_rsq == NULL)
367 break;
368
369 free(txr->tx_rsq, M_DEVBUF);
370 txr->tx_rsq = NULL;
371 }
372 if (sc->tx_queues != NULL)
373 free(sc->tx_queues, M_DEVBUF);
374free:
375 if (sc->rx_queues != NULL)
376 free(sc->rx_queues, M_DEVBUF);
377 sc->tx_queues = NULL;
378 sc->rx_queues = NULL;
379} /* ixv_if_queues_free */
380
381/************************************************************************
382 * ixv_if_attach_pre - Device initialization routine
383 *
384 * Called when the driver is being loaded.
385 * Identifies the type of hardware, allocates all resources
386 * and initializes the hardware.
387 *
388 * return 0 on success, positive on failure
389 ************************************************************************/
390static int
391ixv_if_attach_pre(if_ctx_t ctx)
392{
393 struct ixgbe_softc *sc;
394 device_t dev;
395 if_softc_ctx_t scctx;
396 struct ixgbe_hw *hw;
397 int error = 0;
398
399 INIT_DEBUGOUT("ixv_attach: begin");
400
401 /* Allocate, clear, and link in our sc structure */
402 dev = iflib_get_dev(ctx);
403 sc = iflib_get_softc(ctx);
404 sc->dev = dev;
405 sc->ctx = ctx;
406 sc->hw.back = sc;
407 scctx = sc->shared = iflib_get_softc_ctx(ctx);
408 sc->media = iflib_get_media(ctx);
409 hw = &sc->hw;
410
411 /* Do base PCI setup - map BAR0 */
413 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414 error = ENXIO;
415 goto err_out;
416 }
417
418 /* SYSCTL APIs */
419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422 sc, 0, ixv_sysctl_debug, "I", "Debug Info");
423
424 /* Determine hardware revision */
427
428 /* Initialize the shared code */
429 error = ixgbe_init_ops_vf(hw);
430 if (error) {
431 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432 error = EIO;
433 goto err_out;
434 }
435
436 /* Setup the mailbox */
438
439 error = hw->mac.ops.reset_hw(hw);
440 if (error == IXGBE_ERR_RESET_FAILED)
441 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 else if (error)
443 device_printf(dev, "...reset_hw() failed with error %d\n",
444 error);
445 if (error) {
446 error = EIO;
447 goto err_out;
448 }
449
450 error = hw->mac.ops.init_hw(hw);
451 if (error) {
452 device_printf(dev, "...init_hw() failed with error %d\n",
453 error);
454 error = EIO;
455 goto err_out;
456 }
457
458 /* Negotiate mailbox API version */
459 error = ixv_negotiate_api(sc);
460 if (error) {
461 device_printf(dev,
462 "Mailbox API negotiation failed during attach!\n");
463 goto err_out;
464 }
465
466 /* Check if VF was disabled by PF */
467 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
468 if (error) {
469 /* PF is not capable of controlling VF state. Enable the link. */
470 sc->link_enabled = true;
471 }
472
473 /* If no mac address was assigned, make a random one */
474 if (!ixv_check_ether_addr(hw->mac.addr)) {
475 ether_gen_addr(iflib_get_ifp(ctx),
476 (struct ether_addr *)hw->mac.addr);
477 bcopy(hw->mac.addr, hw->mac.perm_addr,
478 sizeof(hw->mac.perm_addr));
479 }
480
481 /* Most of the iflib initialization... */
482
483 iflib_set_mac(ctx, hw->mac.addr);
484 switch (sc->hw.mac.type) {
488 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
489 break;
490 default:
491 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
492 }
493 scctx->isc_txqsizes[0] =
494 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
495 sizeof(u32), DBA_ALIGN);
496 scctx->isc_rxqsizes[0] =
497 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
498 DBA_ALIGN);
499 /* XXX */
500 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
501 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
502 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
503 scctx->isc_msix_bar = pci_msix_table_bar(dev);
504 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
505 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
506 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
507
508 scctx->isc_txrx = &ixgbe_txrx;
509
510 /*
511 * Tell the upper layer(s) we support everything the PF
512 * driver does except...
513 * Wake-on-LAN
514 */
515 scctx->isc_capabilities = IXGBE_CAPS;
516 scctx->isc_capabilities ^= IFCAP_WOL;
517 scctx->isc_capenable = scctx->isc_capabilities;
518
519 INIT_DEBUGOUT("ixv_if_attach_pre: end");
520
521 return (0);
522
523err_out:
525
526 return (error);
527} /* ixv_if_attach_pre */
528
529static int
531{
532 struct ixgbe_softc *sc = iflib_get_softc(ctx);
533 device_t dev = iflib_get_dev(ctx);
534 int error = 0;
535
536 /* Setup OS specific network interface */
537 error = ixv_setup_interface(ctx);
538 if (error) {
539 device_printf(dev, "Interface setup failed: %d\n", error);
540 goto end;
541 }
542
543 /* Do the stats setup */
544 ixv_save_stats(sc);
545 ixv_init_stats(sc);
547
548end:
549 return error;
550} /* ixv_if_attach_post */
551
552/************************************************************************
553 * ixv_detach - Device removal routine
554 *
555 * Called when the driver is being removed.
556 * Stops the adapter and deallocates all the resources
557 * that were allocated for driver operation.
558 *
559 * return 0 on success, positive on failure
560 ************************************************************************/
561static int
563{
564 INIT_DEBUGOUT("ixv_detach: begin");
565
567
568 return (0);
569} /* ixv_if_detach */
570
571/************************************************************************
572 * ixv_if_mtu_set
573 ************************************************************************/
574static int
575ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
576{
577 struct ixgbe_softc *sc = iflib_get_softc(ctx);
578 struct ifnet *ifp = iflib_get_ifp(ctx);
579 int error = 0;
580
581 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
583 error = EINVAL;
584 } else {
585 ifp->if_mtu = mtu;
586 sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
587 }
588
589 return error;
590} /* ixv_if_mtu_set */
591
592/************************************************************************
593 * ixv_if_init - Init entry point
594 *
595 * Used in two ways: It is used by the stack as an init entry
596 * point in network interface structure. It is also used
597 * by the driver as a hw/sw initialization routine to get
598 * to a consistent state.
599 *
600 * return 0 on success, positive on failure
601 ************************************************************************/
602static void
603ixv_if_init(if_ctx_t ctx)
604{
605 struct ixgbe_softc *sc = iflib_get_softc(ctx);
606 struct ifnet *ifp = iflib_get_ifp(ctx);
607 device_t dev = iflib_get_dev(ctx);
608 struct ixgbe_hw *hw = &sc->hw;
609 int error = 0;
610
611 INIT_DEBUGOUT("ixv_if_init: begin");
612 hw->adapter_stopped = false;
613 hw->mac.ops.stop_adapter(hw);
614
615 /* reprogram the RAR[0] in case user changed it. */
616 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
617
618 /* Get the latest mac address, User can use a LAA */
619 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
620 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
621
622 /* Reset VF and renegotiate mailbox API version */
623 hw->mac.ops.reset_hw(hw);
624 hw->mac.ops.start_hw(hw);
625 error = ixv_negotiate_api(sc);
626 if (error) {
627 device_printf(dev,
628 "Mailbox API negotiation failed in if_init!\n");
629 return;
630 }
631
633
634 /* Setup Multicast table */
635 ixv_if_multi_set(ctx);
636
637 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
638
639 /* Configure RX settings */
641
642 /* Set up VLAN offload and filter */
644
645 /* Set up MSI-X routing */
647
648 /* Set up auto-mask */
650
651 /* Set moderation on the Link interrupt */
653
654 /* Stats init */
655 ixv_init_stats(sc);
656
657 /* Config/Enable Link */
658 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
659 if (error) {
660 /* PF is not capable of controlling VF state. Enable the link. */
661 sc->link_enabled = true;
662 } else if (sc->link_enabled == false)
663 device_printf(dev, "VF is disabled by PF\n");
664
665 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
666 false);
667
668 /* And now turn on interrupts */
670
671 return;
672} /* ixv_if_init */
673
674/************************************************************************
675 * ixv_enable_queue
676 ************************************************************************/
677static inline void
679{
680 struct ixgbe_hw *hw = &sc->hw;
681 u32 queue = 1 << vector;
682 u32 mask;
683
684 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
685 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
686} /* ixv_enable_queue */
687
688/************************************************************************
689 * ixv_disable_queue
690 ************************************************************************/
691static inline void
693{
694 struct ixgbe_hw *hw = &sc->hw;
695 u64 queue = (u64)(1 << vector);
696 u32 mask;
697
698 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
699 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
700} /* ixv_disable_queue */
701
702
703/************************************************************************
704 * ixv_msix_que - MSI-X Queue Interrupt Service routine
705 ************************************************************************/
706static int
707ixv_msix_que(void *arg)
708{
709 struct ix_rx_queue *que = arg;
710 struct ixgbe_softc *sc = que->sc;
711
712 ixv_disable_queue(sc, que->msix);
713 ++que->irqs;
714
715 return (FILTER_SCHEDULE_THREAD);
716} /* ixv_msix_que */
717
718/************************************************************************
719 * ixv_msix_mbx
720 ************************************************************************/
721static int
722ixv_msix_mbx(void *arg)
723{
724 struct ixgbe_softc *sc = arg;
725 struct ixgbe_hw *hw = &sc->hw;
726 u32 reg;
727
728 ++sc->link_irq;
729
730 /* First get the cause */
731 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
732 /* Clear interrupt with write */
734
735 /* Link status change */
736 if (reg & IXGBE_EICR_LSC)
737 iflib_admin_intr_deferred(sc->ctx);
738
740
741 return (FILTER_HANDLED);
742} /* ixv_msix_mbx */
743
744/************************************************************************
745 * ixv_media_status - Media Ioctl callback
746 *
747 * Called whenever the user queries the status of
748 * the interface using ifconfig.
749 ************************************************************************/
750static void
751ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
752{
753 struct ixgbe_softc *sc = iflib_get_softc(ctx);
754
755 INIT_DEBUGOUT("ixv_media_status: begin");
756
757 iflib_admin_intr_deferred(ctx);
758
759 ifmr->ifm_status = IFM_AVALID;
760 ifmr->ifm_active = IFM_ETHER;
761
762 if (!sc->link_active)
763 return;
764
765 ifmr->ifm_status |= IFM_ACTIVE;
766
767 switch (sc->link_speed) {
769 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
770 break;
772 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
773 break;
775 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
776 break;
778 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
779 break;
780 }
781} /* ixv_if_media_status */
782
783/************************************************************************
784 * ixv_if_media_change - Media Ioctl callback
785 *
786 * Called when the user changes speed/duplex using
787 * media/mediopt option with ifconfig.
788 ************************************************************************/
789static int
791{
792 struct ixgbe_softc *sc = iflib_get_softc(ctx);
793 struct ifmedia *ifm = iflib_get_media(ctx);
794
795 INIT_DEBUGOUT("ixv_media_change: begin");
796
797 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
798 return (EINVAL);
799
800 switch (IFM_SUBTYPE(ifm->ifm_media)) {
801 case IFM_AUTO:
802 break;
803 default:
804 device_printf(sc->dev, "Only auto media type\n");
805 return (EINVAL);
806 }
807
808 return (0);
809} /* ixv_if_media_change */
810
811
812/************************************************************************
813 * ixv_negotiate_api
814 *
815 * Negotiate the Mailbox API with the PF;
816 * start with the most featured API first.
817 ************************************************************************/
818static int
820{
821 struct ixgbe_hw *hw = &sc->hw;
822 int mbx_api[] = { ixgbe_mbox_api_12,
826 int i = 0;
827
828 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
829 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
830 return (0);
831 i++;
832 }
833
834 return (EINVAL);
835} /* ixv_negotiate_api */
836
837
838/************************************************************************
839 * ixv_if_multi_set - Multicast Update
840 *
841 * Called whenever multicast address list is updated.
842 ************************************************************************/
843static void
844ixv_if_multi_set(if_ctx_t ctx)
845{
847 struct ixgbe_softc *sc = iflib_get_softc(ctx);
848 u8 *update_ptr;
849 struct ifmultiaddr *ifma;
850 if_t ifp = iflib_get_ifp(ctx);
851 int mcnt = 0;
852
853 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
854
855 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
856 if (ifma->ifma_addr->sa_family != AF_LINK)
857 continue;
858 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
859 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
861 mcnt++;
862 }
863
864 update_ptr = mta;
865
866 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
867 ixv_mc_array_itr, true);
868} /* ixv_if_multi_set */
869
870/************************************************************************
871 * ixv_mc_array_itr
872 *
873 * An iterator function needed by the multicast shared code.
874 * It feeds the shared code routine the addresses in the
875 * array of ixv_set_multi() one by one.
876 ************************************************************************/
877static u8 *
878ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
879{
880 u8 *addr = *update_ptr;
881 u8 *newptr;
882
883 *vmdq = 0;
884
885 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
886 *update_ptr = newptr;
887
888 return addr;
889} /* ixv_mc_array_itr */
890
891/************************************************************************
892 * ixv_if_local_timer - Timer routine
893 *
894 * Checks for link status, updates statistics,
895 * and runs the watchdog check.
896 ************************************************************************/
897static void
898ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
899{
900 if (qid != 0)
901 return;
902
903 /* Fire off the adminq task */
904 iflib_admin_intr_deferred(ctx);
905} /* ixv_if_local_timer */
906
907/************************************************************************
908 * ixv_if_update_admin_status - Update OS on link state
909 *
910 * Note: Only updates the OS on the cached link state.
911 * The real check of the hardware only happens with
912 * a link interrupt.
913 ************************************************************************/
914static void
916{
917 struct ixgbe_softc *sc = iflib_get_softc(ctx);
918 device_t dev = iflib_get_dev(ctx);
919 s32 status;
920
921 sc->hw.mac.get_link_status = true;
922
923 status = ixgbe_check_link(&sc->hw, &sc->link_speed,
924 &sc->link_up, false);
925
926 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
927 /* Mailbox's Clear To Send status is lost or timeout occurred.
928 * We need reinitialization. */
929 iflib_get_ifp(ctx)->if_init(ctx);
930 }
931
932 if (sc->link_up && sc->link_enabled) {
933 if (sc->link_active == false) {
934 if (bootverbose)
935 device_printf(dev, "Link is up %d Gbps %s \n",
936 ((sc->link_speed == 128) ? 10 : 1),
937 "Full Duplex");
938 sc->link_active = true;
939 iflib_link_state_change(ctx, LINK_STATE_UP,
940 IF_Gbps(10));
941 }
942 } else { /* Link down */
943 if (sc->link_active == true) {
944 if (bootverbose)
945 device_printf(dev, "Link is Down\n");
946 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
947 sc->link_active = false;
948 }
949 }
950
951 /* Stats Update */
953} /* ixv_if_update_admin_status */
954
955
956/************************************************************************
957 * ixv_if_stop - Stop the hardware
958 *
959 * Disables all traffic on the adapter by issuing a
960 * global reset on the MAC and deallocates TX/RX buffers.
961 ************************************************************************/
962static void
964{
965 struct ixgbe_softc *sc = iflib_get_softc(ctx);
966 struct ixgbe_hw *hw = &sc->hw;
967
968 INIT_DEBUGOUT("ixv_stop: begin\n");
969
971
972 hw->mac.ops.reset_hw(hw);
973 sc->hw.adapter_stopped = false;
974 hw->mac.ops.stop_adapter(hw);
975
976 /* Update the stack */
977 sc->link_up = false;
979
980 /* reprogram the RAR[0] in case user changed it. */
981 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
982} /* ixv_if_stop */
983
984
985/************************************************************************
986 * ixv_identify_hardware - Determine hardware revision.
987 ************************************************************************/
988static void
990{
991 struct ixgbe_softc *sc = iflib_get_softc(ctx);
992 device_t dev = iflib_get_dev(ctx);
993 struct ixgbe_hw *hw = &sc->hw;
994
995 /* Save off the information about this board */
996 hw->vendor_id = pci_get_vendor(dev);
997 hw->device_id = pci_get_device(dev);
998 hw->revision_id = pci_get_revid(dev);
999 hw->subsystem_vendor_id = pci_get_subvendor(dev);
1000 hw->subsystem_device_id = pci_get_subdevice(dev);
1001
1002 /* A subset of set_mac_type */
1003 switch (hw->device_id) {
1006 break;
1009 break;
1012 break;
1015 break;
1018 break;
1019 default:
1020 device_printf(dev, "unknown mac type\n");
1022 break;
1023 }
1024} /* ixv_identify_hardware */
1025
1026/************************************************************************
1027 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1028 ************************************************************************/
1029static int
1030ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1031{
1032 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1033 device_t dev = iflib_get_dev(ctx);
1034 struct ix_rx_queue *rx_que = sc->rx_queues;
1035 struct ix_tx_queue *tx_que;
1036 int error, rid, vector = 0;
1037 char buf[16];
1038
1039 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1040 rid = vector + 1;
1041
1042 snprintf(buf, sizeof(buf), "rxq%d", i);
1043 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1044 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1045
1046 if (error) {
1047 device_printf(iflib_get_dev(ctx),
1048 "Failed to allocate que int %d err: %d", i, error);
1049 sc->num_rx_queues = i + 1;
1050 goto fail;
1051 }
1052
1053 rx_que->msix = vector;
1054 }
1055
1056 for (int i = 0; i < sc->num_tx_queues; i++) {
1057 snprintf(buf, sizeof(buf), "txq%d", i);
1058 tx_que = &sc->tx_queues[i];
1059 tx_que->msix = i % sc->num_rx_queues;
1060 iflib_softirq_alloc_generic(ctx,
1061 &sc->rx_queues[tx_que->msix].que_irq,
1062 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1063 }
1064 rid = vector + 1;
1065 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1066 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1067 if (error) {
1068 device_printf(iflib_get_dev(ctx),
1069 "Failed to register admin handler");
1070 return (error);
1071 }
1072
1073 sc->vector = vector;
1074 /*
1075 * Due to a broken design QEMU will fail to properly
1076 * enable the guest for MSIX unless the vectors in
1077 * the table are all set up, so we must rewrite the
1078 * ENABLE in the MSIX control register again at this
1079 * point to cause it to successfully initialize us.
1080 */
1081 if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1082 int msix_ctrl;
1083 pci_find_cap(dev, PCIY_MSIX, &rid);
1084 rid += PCIR_MSIX_CTRL;
1085 msix_ctrl = pci_read_config(dev, rid, 2);
1086 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1087 pci_write_config(dev, rid, msix_ctrl, 2);
1088 }
1089
1090 return (0);
1091
1092fail:
1093 iflib_irq_free(ctx, &sc->irq);
1094 rx_que = sc->rx_queues;
1095 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1096 iflib_irq_free(ctx, &rx_que->que_irq);
1097
1098 return (error);
1099} /* ixv_if_msix_intr_assign */
1100
1101/************************************************************************
1102 * ixv_allocate_pci_resources
1103 ************************************************************************/
1104static int
1106{
1107 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1108 device_t dev = iflib_get_dev(ctx);
1109 int rid;
1110
1111 rid = PCIR_BAR(0);
1112 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1113 RF_ACTIVE);
1114
1115 if (!(sc->pci_mem)) {
1116 device_printf(dev, "Unable to allocate bus resource: memory\n");
1117 return (ENXIO);
1118 }
1119
1120 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1122 rman_get_bushandle(sc->pci_mem);
1123 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1124
1125 return (0);
1126} /* ixv_allocate_pci_resources */
1127
1128/************************************************************************
1129 * ixv_free_pci_resources
1130 ************************************************************************/
1131static void
1133{
1134 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1135 struct ix_rx_queue *que = sc->rx_queues;
1136 device_t dev = iflib_get_dev(ctx);
1137
1138 /* Release all MSI-X queue resources */
1139 if (sc->intr_type == IFLIB_INTR_MSIX)
1140 iflib_irq_free(ctx, &sc->irq);
1141
1142 if (que != NULL) {
1143 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1144 iflib_irq_free(ctx, &que->que_irq);
1145 }
1146 }
1147
1148 if (sc->pci_mem != NULL)
1149 bus_release_resource(dev, SYS_RES_MEMORY,
1150 rman_get_rid(sc->pci_mem), sc->pci_mem);
1151} /* ixv_free_pci_resources */
1152
1153/************************************************************************
1154 * ixv_setup_interface
1155 *
1156 * Setup networking device structure and register an interface.
1157 ************************************************************************/
1158static int
1160{
1161 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1162 if_softc_ctx_t scctx = sc->shared;
1163 struct ifnet *ifp = iflib_get_ifp(ctx);
1164
1165 INIT_DEBUGOUT("ixv_setup_interface: begin");
1166
1167 if_setbaudrate(ifp, IF_Gbps(10));
1168 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1169
1170
1171 sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1172 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1173 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1174
1175 return 0;
1176} /* ixv_setup_interface */
1177
1178/************************************************************************
1179 * ixv_if_get_counter
1180 ************************************************************************/
1181static uint64_t
1182ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1183{
1184 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1185 if_t ifp = iflib_get_ifp(ctx);
1186
1187 switch (cnt) {
1188 case IFCOUNTER_IPACKETS:
1189 return (sc->ipackets);
1190 case IFCOUNTER_OPACKETS:
1191 return (sc->opackets);
1192 case IFCOUNTER_IBYTES:
1193 return (sc->ibytes);
1194 case IFCOUNTER_OBYTES:
1195 return (sc->obytes);
1196 case IFCOUNTER_IMCASTS:
1197 return (sc->imcasts);
1198 default:
1199 return (if_get_counter_default(ifp, cnt));
1200 }
1201} /* ixv_if_get_counter */
1202
1203/* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1204 * @ctx: iflib context
1205 * @event: event code to check
1206 *
1207 * Defaults to returning true for every event.
1208 *
1209 * @returns true if iflib needs to reinit the interface
1210 */
1211static bool
1212ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1213{
1214 switch (event) {
1215 case IFLIB_RESTART_VLAN_CONFIG:
1216 /* XXX: This may not need to return true */
1217 default:
1218 return (true);
1219 }
1220}
1221
1222/************************************************************************
1223 * ixv_initialize_transmit_units - Enable transmit unit.
1224 ************************************************************************/
1225static void
1227{
1228 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1229 struct ixgbe_hw *hw = &sc->hw;
1230 if_softc_ctx_t scctx = sc->shared;
1231 struct ix_tx_queue *que = sc->tx_queues;
1232 int i;
1233
1234 for (i = 0; i < sc->num_tx_queues; i++, que++) {
1235 struct tx_ring *txr = &que->txr;
1236 u64 tdba = txr->tx_paddr;
1237 u32 txctrl, txdctl;
1238 int j = txr->me;
1239
1240 /* Set WTHRESH to 8, burst writeback */
1241 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1242 txdctl |= (8 << 16);
1243 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1244
1245 /* Set the HW Tx Head and Tail indices */
1246 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1247 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1248
1249 /* Set Tx Tail register */
1250 txr->tail = IXGBE_VFTDT(j);
1251
1252 txr->tx_rs_cidx = txr->tx_rs_pidx;
1253 /* Initialize the last processed descriptor to be the end of
1254 * the ring, rather than the start, so that we avoid an
1255 * off-by-one error when calculating how many descriptors are
1256 * done in the credits_update function.
1257 */
1258 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1259 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1260 txr->tx_rsq[k] = QIDX_INVALID;
1261
1262 /* Set Ring parameters */
1264 (tdba & 0x00000000ffffffffULL));
1265 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1267 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1268 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1269 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1270 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1271
1272 /* Now enable */
1273 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1274 txdctl |= IXGBE_TXDCTL_ENABLE;
1275 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1276 }
1277
1278 return;
1279} /* ixv_initialize_transmit_units */
1280
1281/************************************************************************
1282 * ixv_initialize_rss_mapping
1283 ************************************************************************/
1284static void
1286{
1287 struct ixgbe_hw *hw = &sc->hw;
1288 u32 reta = 0, mrqc, rss_key[10];
1289 int queue_id;
1290 int i, j;
1291 u32 rss_hash_config;
1292
1293 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1294 /* Fetch the configured RSS key */
1295 rss_getkey((uint8_t *)&rss_key);
1296 } else {
1297 /* set up random bits */
1298 arc4rand(&rss_key, sizeof(rss_key), 0);
1299 }
1300
1301 /* Now fill out hash function seeds */
1302 for (i = 0; i < 10; i++)
1303 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1304
1305 /* Set up the redirection table */
1306 for (i = 0, j = 0; i < 64; i++, j++) {
1307 if (j == sc->num_rx_queues)
1308 j = 0;
1309
1310 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1311 /*
1312 * Fetch the RSS bucket id for the given indirection
1313 * entry. Cap it at the number of configured buckets
1314 * (which is num_rx_queues.)
1315 */
1316 queue_id = rss_get_indirection_to_bucket(i);
1317 queue_id = queue_id % sc->num_rx_queues;
1318 } else
1319 queue_id = j;
1320
1321 /*
1322 * The low 8 bits are for hash value (n+0);
1323 * The next 8 bits are for hash value (n+1), etc.
1324 */
1325 reta >>= 8;
1326 reta |= ((uint32_t)queue_id) << 24;
1327 if ((i & 3) == 3) {
1328 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1329 reta = 0;
1330 }
1331 }
1332
1333 /* Perform hash on these packet types */
1334 if (sc->feat_en & IXGBE_FEATURE_RSS)
1335 rss_hash_config = rss_gethashconfig();
1336 else {
1337 /*
1338 * Disable UDP - IP fragments aren't currently being handled
1339 * and so we end up with a mix of 2-tuple and 4-tuple
1340 * traffic.
1341 */
1342 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1346 }
1347
1348 mrqc = IXGBE_MRQC_RSSEN;
1349 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1351 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1353 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1355 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1357 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1358 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1359 __func__);
1360 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1361 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1362 __func__);
1363 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1365 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1367 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1368 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1369 __func__);
1370 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1371} /* ixv_initialize_rss_mapping */
1372
1373
1374/************************************************************************
1375 * ixv_initialize_receive_units - Setup receive registers and features.
1376 ************************************************************************/
1377static void
1379{
1380 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1381 if_softc_ctx_t scctx;
1382 struct ixgbe_hw *hw = &sc->hw;
1383 struct ifnet *ifp = iflib_get_ifp(ctx);
1384 struct ix_rx_queue *que = sc->rx_queues;
1385 u32 bufsz, psrtype;
1386
1387 if (ifp->if_mtu > ETHERMTU)
1388 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1389 else
1390 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1391
1392 psrtype = IXGBE_PSRTYPE_TCPHDR
1397
1398 if (sc->num_rx_queues > 1)
1399 psrtype |= 1 << 29;
1400
1401 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1402
1403 /* Tell PF our max_frame size */
1404 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1405 device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1406 }
1407 scctx = sc->shared;
1408
1409 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1410 struct rx_ring *rxr = &que->rxr;
1411 u64 rdba = rxr->rx_paddr;
1412 u32 reg, rxdctl;
1413 int j = rxr->me;
1414
1415 /* Disable the queue */
1416 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1417 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1419 for (int k = 0; k < 10; k++) {
1420 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1422 msec_delay(1);
1423 else
1424 break;
1425 }
1426 wmb();
1427 /* Setup the Base and Length of the Rx Descriptor Ring */
1429 (rdba & 0x00000000ffffffffULL));
1430 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1432 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1433
1434 /* Reset the ring indices */
1435 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1436 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1437
1438 /* Set up the SRRCTL register */
1439 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1440 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1441 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1442 reg |= bufsz;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1445
1446 /* Capture Rx Tail index */
1447 rxr->tail = IXGBE_VFRDT(rxr->me);
1448
1449 /* Do the queue enabling last */
1451 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1452 for (int l = 0; l < 10; l++) {
1453 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1455 break;
1456 msec_delay(1);
1457 }
1458 wmb();
1459
1460 /* Set the Tail Pointer */
1461#ifdef DEV_NETMAP
1462 /*
1463 * In netmap mode, we must preserve the buffers made
1464 * available to userspace before the if_init()
1465 * (this is true by default on the TX side, because
1466 * init makes all buffers available to userspace).
1467 *
1468 * netmap_reset() and the device specific routines
1469 * (e.g. ixgbe_setup_receive_rings()) map these
1470 * buffers at the end of the NIC ring, so here we
1471 * must set the RDT (tail) register to make sure
1472 * they are not overwritten.
1473 *
1474 * In this driver the NIC ring starts at RDH = 0,
1475 * RDT points to the last slot available for reception (?),
1476 * so RDT = num_rx_desc - 1 means the whole ring is available.
1477 */
1478 if (ifp->if_capenable & IFCAP_NETMAP) {
1479 struct netmap_adapter *na = NA(ifp);
1480 struct netmap_kring *kring = na->rx_rings[j];
1481 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1482
1483 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1484 } else
1485#endif /* DEV_NETMAP */
1486 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1487 scctx->isc_nrxd[0] - 1);
1488 }
1489
1490 /*
1491 * Do not touch RSS and RETA settings for older hardware
1492 * as those are shared among PF and all VF.
1493 */
1494 if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1496} /* ixv_initialize_receive_units */
1497
1498/************************************************************************
1499 * ixv_setup_vlan_support
1500 ************************************************************************/
1501static void
1503{
1504 struct ifnet *ifp = iflib_get_ifp(ctx);
1505 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1506 struct ixgbe_hw *hw = &sc->hw;
1507 u32 ctrl, vid, vfta, retry;
1508
1509 /*
1510 * We get here thru if_init, meaning
1511 * a soft reset, this has already cleared
1512 * the VFTA and other state, so if there
1513 * have been no vlan's registered do nothing.
1514 */
1515 if (sc->num_vlans == 0)
1516 return;
1517
1518 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1519 /* Enable the queues */
1520 for (int i = 0; i < sc->num_rx_queues; i++) {
1521 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1522 ctrl |= IXGBE_RXDCTL_VME;
1523 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1524 /*
1525 * Let Rx path know that it needs to store VLAN tag
1526 * as part of extra mbuf info.
1527 */
1528 sc->rx_queues[i].rxr.vtag_strip = true;
1529 }
1530 }
1531
1532 /*
1533 * If filtering VLAN tags is disabled,
1534 * there is no need to fill VLAN Filter Table Array (VFTA).
1535 */
1536 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1537 return;
1538
1539 /*
1540 * A soft reset zero's out the VFTA, so
1541 * we need to repopulate it now.
1542 */
1543 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1544 if (ixv_shadow_vfta[i] == 0)
1545 continue;
1546 vfta = ixv_shadow_vfta[i];
1547 /*
1548 * Reconstruct the vlan id's
1549 * based on the bits set in each
1550 * of the array ints.
1551 */
1552 for (int j = 0; j < 32; j++) {
1553 retry = 0;
1554 if ((vfta & (1 << j)) == 0)
1555 continue;
1556 vid = (i * 32) + j;
1557 /* Call the shared code mailbox routine */
1558 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1559 if (++retry > 5)
1560 break;
1561 }
1562 }
1563 }
1564} /* ixv_setup_vlan_support */
1565
1566/************************************************************************
1567 * ixv_if_register_vlan
1568 *
1569 * Run via a vlan config EVENT, it enables us to use the
1570 * HW Filter table since we can get the vlan id. This just
1571 * creates the entry in the soft version of the VFTA, init
1572 * will repopulate the real table.
1573 ************************************************************************/
1574static void
1575ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1576{
1577 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1578 u16 index, bit;
1579
1580 index = (vtag >> 5) & 0x7F;
1581 bit = vtag & 0x1F;
1582 ixv_shadow_vfta[index] |= (1 << bit);
1583 ++sc->num_vlans;
1584} /* ixv_if_register_vlan */
1585
1586/************************************************************************
1587 * ixv_if_unregister_vlan
1588 *
1589 * Run via a vlan unconfig EVENT, remove our entry
1590 * in the soft vfta.
1591 ************************************************************************/
1592static void
1594{
1595 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1596 u16 index, bit;
1597
1598 index = (vtag >> 5) & 0x7F;
1599 bit = vtag & 0x1F;
1600 ixv_shadow_vfta[index] &= ~(1 << bit);
1601 --sc->num_vlans;
1602} /* ixv_if_unregister_vlan */
1603
1604/************************************************************************
1605 * ixv_if_enable_intr
1606 ************************************************************************/
1607static void
1609{
1610 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1611 struct ixgbe_hw *hw = &sc->hw;
1612 struct ix_rx_queue *que = sc->rx_queues;
1613 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1614
1615 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1616
1618 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1619 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1620
1621 for (int i = 0; i < sc->num_rx_queues; i++, que++)
1622 ixv_enable_queue(sc, que->msix);
1623
1625} /* ixv_if_enable_intr */
1626
1627/************************************************************************
1628 * ixv_if_disable_intr
1629 ************************************************************************/
1630static void
1632{
1633 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1635 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1636 IXGBE_WRITE_FLUSH(&sc->hw);
1637} /* ixv_if_disable_intr */
1638
1639/************************************************************************
1640 * ixv_if_rx_queue_intr_enable
1641 ************************************************************************/
1642static int
1643ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1644{
1645 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1646 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1647
1648 ixv_enable_queue(sc, que->rxr.me);
1649
1650 return (0);
1651} /* ixv_if_rx_queue_intr_enable */
1652
1653/************************************************************************
1654 * ixv_set_ivar
1655 *
1656 * Setup the correct IVAR register for a particular MSI-X interrupt
1657 * - entry is the register array entry
1658 * - vector is the MSI-X vector for this queue
1659 * - type is RX/TX/MISC
1660 ************************************************************************/
1661static void
1662ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1663{
1664 struct ixgbe_hw *hw = &sc->hw;
1665 u32 ivar, index;
1666
1667 vector |= IXGBE_IVAR_ALLOC_VAL;
1668
1669 if (type == -1) { /* MISC IVAR */
1671 ivar &= ~0xFF;
1672 ivar |= vector;
1674 } else { /* RX/TX IVARS */
1675 index = (16 * (entry & 1)) + (8 * type);
1676 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1677 ivar &= ~(0xFF << index);
1678 ivar |= (vector << index);
1679 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1680 }
1681} /* ixv_set_ivar */
1682
1683/************************************************************************
1684 * ixv_configure_ivars
1685 ************************************************************************/
1686static void
1688{
1689 struct ix_rx_queue *que = sc->rx_queues;
1690
1691 MPASS(sc->num_rx_queues == sc->num_tx_queues);
1692
1693 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1694 /* First the RX queue entry */
1695 ixv_set_ivar(sc, i, que->msix, 0);
1696 /* ... and the TX */
1697 ixv_set_ivar(sc, i, que->msix, 1);
1698 /* Set an initial value in EITR */
1701 }
1702
1703 /* For the mailbox interrupt */
1704 ixv_set_ivar(sc, 1, sc->vector, -1);
1705} /* ixv_configure_ivars */
1706
1707/************************************************************************
1708 * ixv_save_stats
1709 *
1710 * The VF stats registers never have a truly virgin
1711 * starting point, so this routine tries to make an
1712 * artificial one, marking ground zero on attach as
1713 * it were.
1714 ************************************************************************/
1715static void
1717{
1718 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1729 }
1730} /* ixv_save_stats */
1731
1732/************************************************************************
1733 * ixv_init_stats
1734 ************************************************************************/
1735static void
1737{
1738 struct ixgbe_hw *hw = &sc->hw;
1739
1742 sc->stats.vf.last_vfgorc |=
1743 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1744
1747 sc->stats.vf.last_vfgotc |=
1748 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1749
1751
1757} /* ixv_init_stats */
1758
1759#define UPDATE_STAT_32(reg, last, count) \
1760{ \
1761 u32 current = IXGBE_READ_REG(hw, reg); \
1762 if (current < last) \
1763 count += 0x100000000LL; \
1764 last = current; \
1765 count &= 0xFFFFFFFF00000000LL; \
1766 count |= current; \
1767}
1768
1769#define UPDATE_STAT_36(lsb, msb, last, count) \
1770{ \
1771 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1772 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1773 u64 current = ((cur_msb << 32) | cur_lsb); \
1774 if (current < last) \
1775 count += 0x1000000000LL; \
1776 last = current; \
1777 count &= 0xFFFFFFF000000000LL; \
1778 count |= current; \
1779}
1780
1781/************************************************************************
1782 * ixv_update_stats - Update the board statistics counters.
1783 ************************************************************************/
1784void
1786{
1787 struct ixgbe_hw *hw = &sc->hw;
1788 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1789
1791 sc->stats.vf.vfgprc);
1793 sc->stats.vf.vfgptc);
1795 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1797 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1799 sc->stats.vf.vfmprc);
1800
1801 /* Fill out the OS statistics structure */
1802 IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1803 IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1804 IXGBE_SET_IBYTES(sc, stats->vfgorc);
1805 IXGBE_SET_OBYTES(sc, stats->vfgotc);
1806 IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1807} /* ixv_update_stats */
1808
1809/************************************************************************
1810 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1811 ************************************************************************/
1812static void
1814{
1815 device_t dev = sc->dev;
1816 struct ix_tx_queue *tx_que = sc->tx_queues;
1817 struct ix_rx_queue *rx_que = sc->rx_queues;
1818 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1819 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1820 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1821 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1822 struct sysctl_oid *stat_node, *queue_node;
1823 struct sysctl_oid_list *stat_list, *queue_list;
1824
1825#define QUEUE_NAME_LEN 32
1826 char namebuf[QUEUE_NAME_LEN];
1827
1828 /* Driver Statistics */
1829 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1830 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1831 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1832 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1833
1834 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1835 struct tx_ring *txr = &tx_que->txr;
1836 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1837 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1838 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1839 queue_list = SYSCTL_CHILDREN(queue_node);
1840
1841 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1842 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1843 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1844 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1845 }
1846
1847 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1848 struct rx_ring *rxr = &rx_que->rxr;
1849 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1850 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1851 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1852 queue_list = SYSCTL_CHILDREN(queue_node);
1853
1854 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1855 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1856 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1857 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1858 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1859 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1860 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1861 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1862 }
1863
1864 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1865 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1866 "VF Statistics (read from HW registers)");
1867 stat_list = SYSCTL_CHILDREN(stat_node);
1868
1869 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1870 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1872 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1873 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1874 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1875 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1876 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1877 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1878 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1879} /* ixv_add_stats_sysctls */
1880
1881/************************************************************************
1882 * ixv_print_debug_info
1883 *
1884 * Called only when em_display_debug_stats is enabled.
1885 * Provides a way to take a look at important statistics
1886 * maintained by the driver and hardware.
1887 ************************************************************************/
1888static void
1890{
1891 device_t dev = sc->dev;
1892 struct ixgbe_hw *hw = &sc->hw;
1893
1894 device_printf(dev, "Error Byte Count = %u \n",
1896
1897 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1898} /* ixv_print_debug_info */
1899
1900/************************************************************************
1901 * ixv_sysctl_debug
1902 ************************************************************************/
1903static int
1904ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1905{
1906 struct ixgbe_softc *sc;
1907 int error, result;
1908
1909 result = -1;
1910 error = sysctl_handle_int(oidp, &result, 0, req);
1911
1912 if (error || !req->newptr)
1913 return (error);
1914
1915 if (result == 1) {
1916 sc = (struct ixgbe_softc *)arg1;
1918 }
1919
1920 return error;
1921} /* ixv_sysctl_debug */
1922
1923/************************************************************************
1924 * ixv_init_device_features
1925 ************************************************************************/
1926static void
1928{
1932
1933 /* A tad short on feature flags for VFs, atm. */
1934 switch (sc->hw.mac.type) {
1935 case ixgbe_mac_82599_vf:
1936 break;
1937 case ixgbe_mac_X540_vf:
1938 break;
1939 case ixgbe_mac_X550_vf:
1944 break;
1945 default:
1946 break;
1947 }
1948
1949 /* Enabled by default... */
1950 /* Is a virtual function (VF) */
1951 if (sc->feat_cap & IXGBE_FEATURE_VF)
1953 /* Netmap */
1956 /* Receive-Side Scaling (RSS) */
1957 if (sc->feat_cap & IXGBE_FEATURE_RSS)
1959 /* Needs advanced context descriptor regardless of offloads req'd */
1962} /* ixv_init_device_features */
1963
static void ixv_if_update_admin_status(if_ctx_t)
Definition: if_ixv.c:915
static void ixv_free_pci_resources(if_ctx_t)
Definition: if_ixv.c:1132
#define QUEUE_NAME_LEN
static int ixv_setup_interface(if_ctx_t)
Definition: if_ixv.c:1159
static int ixv_msix_que(void *)
Definition: if_ixv.c:707
static int ixv_if_attach_pre(if_ctx_t)
Definition: if_ixv.c:391
static int ixv_flow_control
Definition: if_ixv.c:189
static void ixv_initialize_rss_mapping(struct ixgbe_softc *)
Definition: if_ixv.c:1285
static void ixv_update_stats(struct ixgbe_softc *)
Definition: if_ixv.c:1785
static void ixv_print_debug_info(struct ixgbe_softc *sc)
Definition: if_ixv.c:1889
static int ixv_msix_mbx(void *)
Definition: if_ixv.c:722
static void ixv_setup_vlan_support(if_ctx_t)
Definition: if_ixv.c:1502
#define UPDATE_STAT_36(lsb, msb, last, count)
Definition: if_ixv.c:1769
static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int)
Definition: if_ixv.c:246
static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
Definition: if_ixv.c:1904
static int ixv_negotiate_api(struct ixgbe_softc *)
Definition: if_ixv.c:819
static void ixv_if_enable_intr(if_ctx_t)
Definition: if_ixv.c:1608
devclass_t ixv_devclass
Definition: if_ixv.c:146
static device_method_t ixv_if_methods[]
Definition: if_ixv.c:153
static void ixv_initialize_receive_units(if_ctx_t)
Definition: if_ixv.c:1378
static int ixv_if_msix_intr_assign(if_ctx_t, int)
Definition: if_ixv.c:1030
#define UPDATE_STAT_32(reg, last, count)
Definition: if_ixv.c:1759
static void ixv_init_device_features(struct ixgbe_softc *)
Definition: if_ixv.c:1927
static void ixv_init_stats(struct ixgbe_softc *)
Definition: if_ixv.c:1736
static struct if_shared_ctx ixv_sctx_init
Definition: if_ixv.c:209
static int ixv_if_detach(if_ctx_t)
Definition: if_ixv.c:562
static void ixv_if_disable_intr(if_ctx_t)
Definition: if_ixv.c:1631
char ixv_driver_version[]
Definition: if_ixv.c:49
static void ixv_if_queues_free(if_ctx_t)
Definition: if_ixv.c:355
static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter)
Definition: if_ixv.c:1182
static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int)
Definition: if_ixv.c:305
MODULE_DEPEND(ixv, iflib, 1, 1, 1)
static int ixv_if_mtu_set(if_ctx_t, uint32_t)
Definition: if_ixv.c:575
static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8)
Definition: if_ixv.c:1662
static pci_vendor_info_t ixv_vendor_info_array[]
Definition: if_ixv.c:60
static int ixv_if_media_change(if_ctx_t)
Definition: if_ixv.c:790
IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array)
static void ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
Definition: if_ixv.c:692
TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control)
static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t)
Definition: if_ixv.c:1643
static driver_t ixv_driver
Definition: if_ixv.c:142
static void ixv_if_register_vlan(if_ctx_t, u16)
Definition: if_ixv.c:1575
static int ixv_if_attach_post(if_ctx_t)
Definition: if_ixv.c:530
DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0)
static void ixv_configure_ivars(struct ixgbe_softc *)
Definition: if_ixv.c:1687
static void ixv_if_multi_set(if_ctx_t)
Definition: if_ixv.c:844
static void * ixv_register(device_t)
Definition: if_ixv.c:237
static int ixv_header_split
Definition: if_ixv.c:198
static void ixv_save_stats(struct ixgbe_softc *)
Definition: if_ixv.c:1716
static void ixv_if_media_status(if_ctx_t, struct ifmediareq *)
Definition: if_ixv.c:751
static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event)
static void ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
Definition: if_ixv.c:678
static int ixv_allocate_pci_resources(if_ctx_t)
Definition: if_ixv.c:1105
static void ixv_identify_hardware(if_ctx_t)
Definition: if_ixv.c:989
struct if_txrx ixgbe_txrx
Definition: ix_txrx.c:62
static void ixv_if_unregister_vlan(if_ctx_t, u16)
Definition: if_ixv.c:1593
static void ixv_if_stop(if_ctx_t)
Definition: if_ixv.c:963
static void ixv_if_init(if_ctx_t)
Definition: if_ixv.c:603
static void ixv_add_stats_sysctls(struct ixgbe_softc *)
Definition: if_ixv.c:1813
static void ixv_if_local_timer(if_ctx_t, uint16_t)
Definition: if_ixv.c:898
static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]
Definition: if_ixv.c:206
static void ixv_initialize_transmit_units(if_ctx_t)
Definition: if_ixv.c:1226
static device_method_t ixv_methods[]
Definition: if_ixv.c:132
static driver_t ixv_if_driver
Definition: if_ixv.c:180
static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *)
Definition: if_ixv.c:878
#define DEFAULT_TXD
Definition: ixgbe.h:100
#define IXGBE_MAX_FRAME_SIZE
Definition: ixgbe.h:138
#define IXGBE_SET_OBYTES(sc, count)
Definition: ixgbe.h:479
#define IXGBE_SET_IMCASTS(sc, count)
Definition: ixgbe.h:480
#define IXGBE_SET_IBYTES(sc, count)
Definition: ixgbe.h:478
#define IXGBE_EITR_DEFAULT
Definition: ixgbe.h:192
#define IXGBE_CAPS
Definition: ixgbe.h:199
#define IOCTL_DEBUGOUT(S)
Definition: ixgbe.h:174
#define DBA_ALIGN
Definition: ixgbe.h:121
#define MAX_NUM_MULTICAST_ADDRESSES
Definition: ixgbe.h:181
#define IXGBE_82599_SCATTER
Definition: ixgbe.h:183
#define IXGBE_LINK_ITR
Definition: ixgbe.h:217
#define IXGBE_VFTA_SIZE
Definition: ixgbe.h:186
#define IXGBE_SET_OPACKETS(sc, count)
Definition: ixgbe.h:475
#define DEFAULT_RXD
Definition: ixgbe.h:115
#define INIT_DEBUGOUT(S)
Definition: ixgbe.h:171
#define MAX_RXD
Definition: ixgbe.h:117
#define MAX_TXD
Definition: ixgbe.h:102
#define DEVMETHOD_END
Definition: ixgbe.h:205
#define MIN_RXD
Definition: ixgbe.h:118
#define IXGBE_TSO_SIZE
Definition: ixgbe.h:184
#define IXGBE_MTU_HDR
Definition: ixgbe.h:139
#define IXGBE_SET_IPACKETS(sc, count)
Definition: ixgbe.h:473
static bool ixv_check_ether_addr(u8 *addr)
Definition: ixgbe.h:523
#define MIN_TXD
Definition: ixgbe.h:103
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete)
Definition: ixgbe_api.c:667
s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
Definition: ixgbe_vf.c:51
#define IXGBE_FEATURE_LEGACY_TX
#define IXGBE_FEATURE_VF
#define IXGBE_FEATURE_NETMAP
#define IXGBE_FEATURE_NEEDS_CTXD
#define IXGBE_FEATURE_RSS
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
Definition: ixgbe_mbx.c:503
@ ixgbe_mbox_api_12
Definition: ixgbe_mbx.h:91
@ ixgbe_mbox_api_11
Definition: ixgbe_mbx.h:90
@ ixgbe_mbox_api_10
Definition: ixgbe_mbx.h:88
@ ixgbe_mbox_api_unknown
Definition: ixgbe_mbx.h:94
#define msec_delay(x)
Definition: ixgbe_osdep.h:72
#define IXGBE_READ_REG(a, reg)
Definition: ixgbe_osdep.h:224
uint64_t u64
Definition: ixgbe_osdep.h:149
#define IXGBE_INTEL_VENDOR_ID
Definition: ixgbe_osdep.h:122
#define wmb()
Definition: ixgbe_osdep.h:172
uint8_t u8
Definition: ixgbe_osdep.h:143
int8_t s8
Definition: ixgbe_osdep.h:144
#define IXGBE_WRITE_FLUSH(a)
Definition: ixgbe_osdep.h:221
#define IXGBE_WRITE_REG(a, reg, val)
Definition: ixgbe_osdep.h:227
uint16_t u16
Definition: ixgbe_osdep.h:145
int32_t s32
Definition: ixgbe_osdep.h:148
uint32_t u32
Definition: ixgbe_osdep.h:147
#define rss_gethashconfig()
Definition: ixgbe_rss.h:59
#define RSS_HASHTYPE_RSS_IPV4
Definition: ixgbe_rss.h:45
#define RSS_HASHTYPE_RSS_TCP_IPV6_EX
Definition: ixgbe_rss.h:50
#define RSS_HASHTYPE_RSS_IPV6_EX
Definition: ixgbe_rss.h:49
#define rss_getkey(_a)
Definition: ixgbe_rss.h:57
#define RSS_HASHTYPE_RSS_UDP_IPV6_EX
Definition: ixgbe_rss.h:53
#define RSS_HASHTYPE_RSS_UDP_IPV6
Definition: ixgbe_rss.h:52
#define RSS_HASHTYPE_RSS_TCP_IPV4
Definition: ixgbe_rss.h:46
#define RSS_HASHTYPE_RSS_UDP_IPV4
Definition: ixgbe_rss.h:51
#define RSS_HASHTYPE_RSS_TCP_IPV6
Definition: ixgbe_rss.h:48
#define RSS_HASHTYPE_RSS_IPV6
Definition: ixgbe_rss.h:47
#define rss_get_indirection_to_bucket(_a)
Definition: ixgbe_rss.h:58
#define IXGBE_RXDCTL_ENABLE
Definition: ixgbe_type.h:2594
@ ixgbe_mac_X550_vf
Definition: ixgbe_type.h:3682
@ ixgbe_mac_82599_vf
Definition: ixgbe_type.h:3676
@ ixgbe_mac_X550EM_x_vf
Definition: ixgbe_type.h:3683
@ ixgbe_mac_X540_vf
Definition: ixgbe_type.h:3678
@ ixgbe_mac_X550EM_a_vf
Definition: ixgbe_type.h:3684
@ ixgbe_mac_unknown
Definition: ixgbe_type.h:3673
#define IXGBE_DEV_ID_X550EM_A_VF
Definition: ixgbe_type.h:155
#define IXGBE_DEV_ID_X540_VF
Definition: ixgbe_type.h:130
#define IXGBE_EICS_RTX_QUEUE
Definition: ixgbe_type.h:1944
#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP
Definition: ixgbe_type.h:2690
#define IXGBE_ETH_LENGTH_OF_ADDRESS
Definition: ixgbe_type.h:2441
#define IXGBE_EIMS_LSC
Definition: ixgbe_type.h:1970
#define IXGBE_RAH_AV
Definition: ixgbe_type.h:2557
#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP
Definition: ixgbe_type.h:2683
#define IXGBE_EICR_LSC
Definition: ixgbe_type.h:1913
#define IXGBE_IVAR_ALLOC_VAL
Definition: ixgbe_type.h:2074
#define IXGBE_DEV_ID_82599_VF
Definition: ixgbe_type.h:126
#define IXGBE_SUCCESS
Definition: ixgbe_type.h:4234
#define IXGBE_PSRTYPE_UDPHDR
Definition: ixgbe_type.h:2795
#define IXGBE_MRQC_RSS_FIELD_IPV4
Definition: ixgbe_type.h:2684
#define IXGBE_MRQC_RSSEN
Definition: ixgbe_type.h:2670
#define IXGBE_RXDCTL_VME
Definition: ixgbe_type.h:2598
#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
Definition: ixgbe_type.h:2809
#define IXGBE_PSRTYPE_L2HDR
Definition: ixgbe_type.h:2798
#define IXGBE_LINK_SPEED_10_FULL
Definition: ixgbe_type.h:3445
#define IXGBE_TXDCTL_ENABLE
Definition: ixgbe_type.h:2579
#define IXGBE_PSRTYPE_IPV4HDR
Definition: ixgbe_type.h:2796
#define IXGBE_DEV_ID_X550_VF
Definition: ixgbe_type.h:154
#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP
Definition: ixgbe_type.h:2689
#define IXGBE_LINK_SPEED_100_FULL
Definition: ixgbe_type.h:3446
#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP
Definition: ixgbe_type.h:2688
#define IXGBE_LINK_SPEED_1GB_FULL
Definition: ixgbe_type.h:3447
#define IXGBE_PSRTYPE_TCPHDR
Definition: ixgbe_type.h:2794
#define IXGBE_DEV_ID_X550EM_X_VF
Definition: ixgbe_type.h:157
#define IXGBE_EIMS_RTX_QUEUE
Definition: ixgbe_type.h:1965
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT
Definition: ixgbe_type.h:2801
#define IXGBE_LINK_SPEED_10GB_FULL
Definition: ixgbe_type.h:3450
#define IXGBE_EIMS_ENABLE_MASK
Definition: ixgbe_type.h:2007
#define IXGBE_ERRBC
Definition: ixgbe_type.h:984
@ ixgbe_fc_full
Definition: ixgbe_type.h:3769
#define IXGBE_EIMS_OTHER
Definition: ixgbe_type.h:1984
#define IXGBE_PSRTYPE_IPV6HDR
Definition: ixgbe_type.h:2797
#define IXGBE_MRQC_RSS_FIELD_IPV6
Definition: ixgbe_type.h:2687
#define IXGBE_ERR_RESET_FAILED
Definition: ixgbe_type.h:4249
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
Definition: ixgbe_vf.c:728
s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
Definition: ixgbe_vf.c:705
#define IXGBE_VFTXDCTL(x)
Definition: ixgbe_vf.h:76
#define IXGBE_VFGPRC
Definition: ixgbe_vf.h:81
#define IXGBE_VTEIMC
Definition: ixgbe_vf.h:54
#define IXGBE_VFRDBAH(x)
Definition: ixgbe_vf.h:63
#define IXGBE_VTEICR
Definition: ixgbe_vf.h:51
#define IXGBE_VTEIAM
Definition: ixgbe_vf.h:56
#define IXGBE_VTEIAC
Definition: ixgbe_vf.h:55
#define IXGBE_VFTDLEN(x)
Definition: ixgbe_vf.h:73
#define IXGBE_VTEITR(x)
Definition: ixgbe_vf.h:57
#define IXGBE_VFSRRCTL(x)
Definition: ixgbe_vf.h:68
#define IXGBE_VFDCA_TXCTRL(x)
Definition: ixgbe_vf.h:80
#define IXGBE_VFGPTC
Definition: ixgbe_vf.h:82
#define IXGBE_VFRETA(x)
Definition: ixgbe_vf.h:90
#define IXGBE_VFTDBAL(x)
Definition: ixgbe_vf.h:71
#define IXGBE_VFGOTC_MSB
Definition: ixgbe_vf.h:86
#define IXGBE_VFTDH(x)
Definition: ixgbe_vf.h:74
#define IXGBE_VFMPRC
Definition: ixgbe_vf.h:87
#define IXGBE_VFRDBAL(x)
Definition: ixgbe_vf.h:62
#define IXGBE_VFRDT(x)
Definition: ixgbe_vf.h:66
#define IXGBE_VFTDBAH(x)
Definition: ixgbe_vf.h:72
#define IXGBE_VTIVAR(x)
Definition: ixgbe_vf.h:58
#define IXGBE_VFGORC_LSB
Definition: ixgbe_vf.h:83
#define IXGBE_VFRDLEN(x)
Definition: ixgbe_vf.h:64
#define IXGBE_VTEIMS
Definition: ixgbe_vf.h:53
#define IXGBE_VFRXDCTL(x)
Definition: ixgbe_vf.h:67
#define IXGBE_VFGOTC_LSB
Definition: ixgbe_vf.h:85
#define IXGBE_VFRSSRK(x)
Definition: ixgbe_vf.h:89
#define IXGBE_VTEICS
Definition: ixgbe_vf.h:52
#define IXGBE_VFMRQC
Definition: ixgbe_vf.h:88
#define IXGBE_VFGORC_MSB
Definition: ixgbe_vf.h:84
#define IXGBE_VFTDT(x)
Definition: ixgbe_vf.h:75
#define IXGBE_VTIVAR_MISC
Definition: ixgbe_vf.h:59
#define IXGBE_VFRDH(x)
Definition: ixgbe_vf.h:65
#define IXGBE_VFPSRTYPE
Definition: ixgbe_vf.h:70
u64 irqs
Definition: ixgbe.h:327
struct if_irq que_irq
Definition: ixgbe.h:326
u32 msix
Definition: ixgbe.h:320
struct ixgbe_softc * sc
Definition: ixgbe.h:319
struct rx_ring rxr
Definition: ixgbe.h:325
struct tx_ring txr
Definition: ixgbe.h:333
u32 msix
Definition: ixgbe.h:332
struct ixgbe_softc * sc
Definition: ixgbe.h:331
bool adapter_stopped
Definition: ixgbe_type.h:4221
struct ixgbe_mac_info mac
Definition: ixgbe_type.h:4207
u16 vendor_id
Definition: ixgbe_type.h:4217
u8 revision_id
Definition: ixgbe_type.h:4220
u16 subsystem_vendor_id
Definition: ixgbe_type.h:4219
void * back
Definition: ixgbe_type.h:4206
u16 device_id
Definition: ixgbe_type.h:4216
u16 subsystem_device_id
Definition: ixgbe_type.h:4218
u8 IOMEM * hw_addr
Definition: ixgbe_type.h:4205
u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]
Definition: ixgbe_type.h:4117
enum ixgbe_mac_type type
Definition: ixgbe_type.h:4115
bool get_link_status
Definition: ixgbe_type.h:4135
u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]
Definition: ixgbe_type.h:4116
struct ixgbe_mac_operations ops
Definition: ixgbe_type.h:4114
s32(* reset_hw)(struct ixgbe_hw *)
Definition: ixgbe_type.h:3950
s32(* init_hw)(struct ixgbe_hw *)
Definition: ixgbe_type.h:3949
s32(* set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32)
Definition: ixgbe_type.h:4000
s32(* check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool)
Definition: ixgbe_type.h:3984
s32(* set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool)
Definition: ixgbe_type.h:4016
s32(* get_link_state)(struct ixgbe_hw *hw, bool *link_state)
Definition: ixgbe_type.h:4012
s32(* update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, ixgbe_mc_addr_itr, bool clear)
Definition: ixgbe_type.h:4010
s32(* start_hw)(struct ixgbe_hw *)
Definition: ixgbe_type.h:3951
s32(* stop_adapter)(struct ixgbe_hw *)
Definition: ixgbe_type.h:3962
bus_space_tag_t mem_bus_space_tag
Definition: ixgbe_osdep.h:207
bus_space_handle_t mem_bus_space_handle
Definition: ixgbe_osdep.h:208
bool link_enabled
Definition: ixgbe.h:397
struct ifmedia * media
Definition: ixgbe.h:376
struct ixgbe_osdep osdep
Definition: ixgbe.h:354
u32 feat_en
Definition: ixgbe.h:463
union ixgbe_softc::@0 stats
struct resource * pci_mem
Definition: ixgbe.h:365
u32 rx_mbuf_sz
Definition: ixgbe.h:407
struct ixgbe_hw hw
Definition: ixgbe.h:353
u32 link_speed
Definition: ixgbe.h:395
struct ix_tx_queue * tx_queues
Definition: ixgbe.h:423
u32 vector
Definition: ixgbe.h:398
struct ix_rx_queue * rx_queues
Definition: ixgbe.h:424
u32 feat_cap
Definition: ixgbe.h:462
u64 ipackets
Definition: ixgbe.h:450
u16 num_vlans
Definition: ixgbe.h:380
if_ctx_t ctx
Definition: ixgbe.h:355
bool link_active
Definition: ixgbe.h:393
device_t dev
Definition: ixgbe.h:362
u64 imcasts
Definition: ixgbe.h:456
struct ixgbevf_hw_stats vf
Definition: ixgbe.h:446
struct ifnet * ifp
Definition: ixgbe.h:363
unsigned long link_irq
Definition: ixgbe.h:443
unsigned long watchdog_events
Definition: ixgbe.h:442
bool link_up
Definition: ixgbe.h:396
u64 ibytes
Definition: ixgbe.h:454
u64 obytes
Definition: ixgbe.h:455
struct if_irq irq
Definition: ixgbe.h:372
u64 opackets
Definition: ixgbe.h:452
if_softc_ctx_t shared
Definition: ixgbe.h:356
u64 saved_reset_vfgptc
Definition: ixgbe_vf.h:113
u64 saved_reset_vfgprc
Definition: ixgbe_vf.h:112
u64 saved_reset_vfgotc
Definition: ixgbe_vf.h:115
u64 saved_reset_vfgorc
Definition: ixgbe_vf.h:114
u64 saved_reset_vfmprc
Definition: ixgbe_vf.h:116
Definition: ixgbe.h:288
u32 me
Definition: ixgbe.h:291
uint64_t rx_paddr
Definition: ixgbe.h:296
bool vtag_strip
Definition: ixgbe.h:295
u64 rx_bytes
Definition: ixgbe.h:306
struct ixgbe_softc * sc
Definition: ixgbe.h:290
u64 rx_packets
Definition: ixgbe.h:305
u64 rx_discarded
Definition: ixgbe.h:307
union ixgbe_adv_rx_desc * rx_base
Definition: ixgbe.h:293
u32 bytes
Definition: ixgbe.h:299
struct ix_rx_queue * que
Definition: ixgbe.h:289
u32 tail
Definition: ixgbe.h:292
Definition: ixgbe.h:262
u32 tail
Definition: ixgbe.h:266
u32 bytes
Definition: ixgbe.h:277
qidx_t tx_rs_pidx
Definition: ixgbe.h:269
u64 tso_tx
Definition: ixgbe.h:280
uint8_t me
Definition: ixgbe.h:271
qidx_t * tx_rsq
Definition: ixgbe.h:267
u64 total_packets
Definition: ixgbe.h:281
qidx_t tx_rs_cidx
Definition: ixgbe.h:268
qidx_t tx_cidx_processed
Definition: ixgbe.h:270
uint64_t tx_paddr
Definition: ixgbe.h:265
struct ixgbe_softc * sc
Definition: ixgbe.h:263
union ixgbe_adv_tx_desc * tx_base
Definition: ixgbe.h:264