FreeBSD kernel IXGBE device code
ix_txrx.c
Go to the documentation of this file.
1/******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#ifndef IXGBE_STANDALONE_BUILD
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#include "opt_rss.h"
39#endif
40
41#include "ixgbe.h"
42
43/************************************************************************
44 * Local Function prototypes
45 ************************************************************************/
46static int ixgbe_isc_txd_encap(void *, if_pkt_info_t);
47static void ixgbe_isc_txd_flush(void *, uint16_t, qidx_t);
48static int ixgbe_isc_txd_credits_update(void *, uint16_t, bool);
49
50static void ixgbe_isc_rxd_refill(void *, if_rxd_update_t);
51static void ixgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
52static int ixgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
53static int ixgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
54
55static void ixgbe_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
57 if_pkt_info_t);
58
59extern void ixgbe_if_enable_intr(if_ctx_t ctx);
60static int ixgbe_determine_rsstype(uint16_t pkt_info);
61
62struct if_txrx ixgbe_txrx = {
63 .ift_txd_encap = ixgbe_isc_txd_encap,
64 .ift_txd_flush = ixgbe_isc_txd_flush,
65 .ift_txd_credits_update = ixgbe_isc_txd_credits_update,
66 .ift_rxd_available = ixgbe_isc_rxd_available,
67 .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
68 .ift_rxd_refill = ixgbe_isc_rxd_refill,
69 .ift_rxd_flush = ixgbe_isc_rxd_flush,
70 .ift_legacy_intr = NULL
71};
72
73/************************************************************************
74 * ixgbe_tx_ctx_setup
75 *
76 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
77 *
78 ************************************************************************/
79static int
80ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
81{
82 uint32_t vlan_macip_lens, type_tucmd_mlhl;
83 uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
84 u8 ehdrlen;
85
86 offload = true;
87 olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
88 /* VLAN MACLEN IPLEN */
89 vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
90
91 /*
92 * Some of our VF devices need a context descriptor for every
93 * packet. That means the ehdrlen needs to be non-zero in order
94 * for the host driver not to flag a malicious event. The stack
95 * will most likely populate this for all other reasons of why
96 * this function was called.
97 */
98 if (pi->ipi_ehdrlen == 0) {
99 ehdrlen = ETHER_HDR_LEN;
100 ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
101 } else
102 ehdrlen = pi->ipi_ehdrlen;
103 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
104
105 pktlen = pi->ipi_len;
106 /* First check if TSO is to be used */
107 if (pi->ipi_csum_flags & CSUM_TSO) {
108 /* This is used in the transmit desc in encap */
109 pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
110 mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
111 mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
112 }
113
114 olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
115
116 if (pi->ipi_flags & IPI_TX_IPV4) {
117 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
118 /* Tell transmit desc to also do IPv4 checksum. */
119 if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
120 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
121 } else if (pi->ipi_flags & IPI_TX_IPV6)
122 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
123 else
124 offload = false;
125
126 vlan_macip_lens |= pi->ipi_ip_hlen;
127
128 switch (pi->ipi_ipproto) {
129 case IPPROTO_TCP:
130 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
131 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
132 else
133 offload = false;
134 break;
135 case IPPROTO_UDP:
136 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
137 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
138 else
139 offload = false;
140 break;
141 case IPPROTO_SCTP:
142 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
143 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
144 else
145 offload = false;
146 break;
147 default:
148 offload = false;
149 break;
150 }
151 /* Insert L4 checksum into data descriptors */
152 if (offload)
153 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
154
156
157 /* Now copy bits into descriptor */
158 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
159 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
160 TXD->seqnum_seed = htole32(0);
161 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
162
163 return (olinfo_status);
164} /* ixgbe_tx_ctx_setup */
165
166/************************************************************************
167 * ixgbe_isc_txd_encap
168 ************************************************************************/
169static int
170ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
171{
172 struct ixgbe_softc *sc = arg;
173 if_softc_ctx_t scctx = sc->shared;
174 struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
175 struct tx_ring *txr = &que->txr;
176 int nsegs = pi->ipi_nsegs;
177 bus_dma_segment_t *segs = pi->ipi_segs;
178 union ixgbe_adv_tx_desc *txd = NULL;
179 struct ixgbe_adv_tx_context_desc *TXD;
180 int i, j, first, pidx_last;
181 uint32_t olinfo_status, cmd, flags;
182 qidx_t ntxd;
183
186
187 if (pi->ipi_mflags & M_VLANTAG)
189
190 i = first = pi->ipi_pidx;
191 flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
192 ntxd = scctx->isc_ntxd[0];
193
194 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
195 if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
197 pi->ipi_vtag) {
198 /*********************************************
199 * Set up the appropriate offload context
200 * this will consume the first descriptor
201 *********************************************/
202 olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
203 if (pi->ipi_csum_flags & CSUM_TSO) {
205 ++txr->tso_tx;
206 }
207
208 if (++i == scctx->isc_ntxd[0])
209 i = 0;
210 } else {
211 /* Indicate the whole packet as payload when not doing TSO */
212 olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
213 }
214
215 olinfo_status |= IXGBE_ADVTXD_CC;
216 pidx_last = 0;
217 for (j = 0; j < nsegs; j++) {
218 bus_size_t seglen;
219
220 txd = &txr->tx_base[i];
221 seglen = segs[j].ds_len;
222
223 txd->read.buffer_addr = htole64(segs[j].ds_addr);
224 txd->read.cmd_type_len = htole32(cmd | seglen);
225 txd->read.olinfo_status = htole32(olinfo_status);
226
227 pidx_last = i;
228 if (++i == scctx->isc_ntxd[0]) {
229 i = 0;
230 }
231 }
232
233 if (flags) {
234 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
235 txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
236 }
237 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
238
239 txr->bytes += pi->ipi_len;
240 pi->ipi_new_pidx = i;
241
242 ++txr->total_packets;
243
244 return (0);
245} /* ixgbe_isc_txd_encap */
246
247/************************************************************************
248 * ixgbe_isc_txd_flush
249 ************************************************************************/
250static void
251ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
252{
253 struct ixgbe_softc *sc = arg;
254 struct ix_tx_queue *que = &sc->tx_queues[txqid];
255 struct tx_ring *txr = &que->txr;
256
257 IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
258} /* ixgbe_isc_txd_flush */
259
260/************************************************************************
261 * ixgbe_isc_txd_credits_update
262 ************************************************************************/
263static int
264ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
265{
266 struct ixgbe_softc *sc = arg;
267 if_softc_ctx_t scctx = sc->shared;
268 struct ix_tx_queue *que = &sc->tx_queues[txqid];
269 struct tx_ring *txr = &que->txr;
270 qidx_t processed = 0;
271 int updated;
272 qidx_t cur, prev, ntxd, rs_cidx;
273 int32_t delta;
274 uint8_t status;
275
276 rs_cidx = txr->tx_rs_cidx;
277 if (rs_cidx == txr->tx_rs_pidx)
278 return (0);
279
280 cur = txr->tx_rsq[rs_cidx];
281 status = txr->tx_base[cur].wb.status;
282 updated = !!(status & IXGBE_TXD_STAT_DD);
283
284 if (!updated)
285 return (0);
286
287 /* If clear is false just let caller know that there
288 * are descriptors to reclaim */
289 if (!clear)
290 return (1);
291
292 prev = txr->tx_cidx_processed;
293 ntxd = scctx->isc_ntxd[0];
294 do {
295 MPASS(prev != cur);
296 delta = (int32_t)cur - (int32_t)prev;
297 if (delta < 0)
298 delta += ntxd;
299 MPASS(delta > 0);
300
301 processed += delta;
302 prev = cur;
303 rs_cidx = (rs_cidx + 1) & (ntxd - 1);
304 if (rs_cidx == txr->tx_rs_pidx)
305 break;
306
307 cur = txr->tx_rsq[rs_cidx];
308 status = txr->tx_base[cur].wb.status;
309 } while ((status & IXGBE_TXD_STAT_DD));
310
311 txr->tx_rs_cidx = rs_cidx;
312 txr->tx_cidx_processed = prev;
313
314 return (processed);
315} /* ixgbe_isc_txd_credits_update */
316
317/************************************************************************
318 * ixgbe_isc_rxd_refill
319 ************************************************************************/
320static void
321ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
322{
323 struct ixgbe_softc *sc = arg;
324 struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
325 struct rx_ring *rxr = &que->rxr;
326 uint64_t *paddrs;
327 int i;
328 uint32_t next_pidx, pidx;
329 uint16_t count;
330
331 paddrs = iru->iru_paddrs;
332 pidx = iru->iru_pidx;
333 count = iru->iru_count;
334
335 for (i = 0, next_pidx = pidx; i < count; i++) {
336 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
337 if (++next_pidx == sc->shared->isc_nrxd[0])
338 next_pidx = 0;
339 }
340} /* ixgbe_isc_rxd_refill */
341
342/************************************************************************
343 * ixgbe_isc_rxd_flush
344 ************************************************************************/
345static void
346ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
347{
348 struct ixgbe_softc *sc = arg;
349 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
350 struct rx_ring *rxr = &que->rxr;
351
352 IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
353} /* ixgbe_isc_rxd_flush */
354
355/************************************************************************
356 * ixgbe_isc_rxd_available
357 ************************************************************************/
358static int
359ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
360{
361 struct ixgbe_softc *sc = arg;
362 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
363 struct rx_ring *rxr = &que->rxr;
364 union ixgbe_adv_rx_desc *rxd;
365 uint32_t staterr;
366 int cnt, i, nrxd;
367
368 nrxd = sc->shared->isc_nrxd[0];
369 for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
370 rxd = &rxr->rx_base[i];
371 staterr = le32toh(rxd->wb.upper.status_error);
372
373 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
374 break;
375 if (++i == nrxd)
376 i = 0;
377 if (staterr & IXGBE_RXD_STAT_EOP)
378 cnt++;
379 }
380 return (cnt);
381} /* ixgbe_isc_rxd_available */
382
383/************************************************************************
384 * ixgbe_isc_rxd_pkt_get
385 *
386 * Routine sends data which has been dma'ed into host memory
387 * to upper layer. Initialize ri structure.
388 *
389 * Returns 0 upon success, errno on failure
390 ************************************************************************/
391
392static int
393ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
394{
395 struct ixgbe_softc *sc = arg;
396 if_softc_ctx_t scctx = sc->shared;
397 struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
398 struct rx_ring *rxr = &que->rxr;
399 union ixgbe_adv_rx_desc *rxd;
400
401 uint16_t pkt_info, len, cidx, i;
402 uint32_t ptype;
403 uint32_t staterr = 0;
404 bool eop;
405
406 i = 0;
407 cidx = ri->iri_cidx;
408 do {
409 rxd = &rxr->rx_base[cidx];
410 staterr = le32toh(rxd->wb.upper.status_error);
411 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
412
413 /* Error Checking then decrement count */
414 MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
415
416 len = le16toh(rxd->wb.upper.length);
417 ptype = le32toh(rxd->wb.lower.lo_dword.data) &
419
420 ri->iri_len += len;
421 rxr->bytes += len;
422
423 rxd->wb.upper.status_error = 0;
424 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
425
426 /* Make sure bad packets are discarded */
427 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
428 if (sc->feat_en & IXGBE_FEATURE_VF)
429 if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS, 1);
430
431 rxr->rx_discarded++;
432 return (EBADMSG);
433 }
434 ri->iri_frags[i].irf_flid = 0;
435 ri->iri_frags[i].irf_idx = cidx;
436 ri->iri_frags[i].irf_len = len;
437 if (++cidx == sc->shared->isc_nrxd[0])
438 cidx = 0;
439 i++;
440 /* even a 16K packet shouldn't consume more than 8 clusters */
441 MPASS(i < 9);
442 } while (!eop);
443
444 rxr->rx_packets++;
445 rxr->packets++;
446 rxr->rx_bytes += ri->iri_len;
447
448 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
449 ixgbe_rx_checksum(staterr, ri, ptype);
450
451 ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
452 ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
453 if ((sc->feat_en & IXGBE_FEATURE_RSS) == 0) {
454 if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
455 ri->iri_rsstype = M_HASHTYPE_NONE;
456 else
457 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
458 }
459 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP)) {
460 ri->iri_vtag = le16toh(rxd->wb.upper.vlan);
461 ri->iri_flags |= M_VLANTAG;
462 }
463
464 ri->iri_nfrags = i;
465 return (0);
466} /* ixgbe_isc_rxd_pkt_get */
467
468/************************************************************************
469 * ixgbe_rx_checksum
470 *
471 * Verify that the hardware indicated that the checksum is valid.
472 * Inform the stack about the status of checksum so that stack
473 * doesn't spend time verifying the checksum.
474 ************************************************************************/
475static void
476ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
477{
478 uint16_t status = (uint16_t)staterr;
479 uint8_t errors = (uint8_t)(staterr >> 24);
480
481 /* If there is a layer 3 or 4 error we are done */
482 if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
483 return;
484
485 /* IP Checksum Good */
486 if (status & IXGBE_RXD_STAT_IPCS)
487 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
488
489 /* Valid L4E checksum */
490 if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) {
491 /* SCTP header present. */
492 if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
493 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
494 ri->iri_csum_flags |= CSUM_SCTP_VALID;
495 } else {
496 ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
497 ri->iri_csum_data = htons(0xffff);
498 }
499 }
500} /* ixgbe_rx_checksum */
501
502/************************************************************************
503 * ixgbe_determine_rsstype
504 *
505 * Parse the packet type to determine the appropriate hash
506 ************************************************************************/
507static int
509{
512 return M_HASHTYPE_RSS_TCP_IPV4;
514 return M_HASHTYPE_RSS_IPV4;
516 return M_HASHTYPE_RSS_TCP_IPV6;
518 return M_HASHTYPE_RSS_IPV6_EX;
520 return M_HASHTYPE_RSS_IPV6;
522 return M_HASHTYPE_RSS_TCP_IPV6_EX;
524 return M_HASHTYPE_RSS_UDP_IPV4;
526 return M_HASHTYPE_RSS_UDP_IPV6;
528 return M_HASHTYPE_RSS_UDP_IPV6_EX;
529 default:
530 return M_HASHTYPE_OPAQUE;
531 }
532} /* ixgbe_determine_rsstype */
static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t)
Definition: ix_txrx.c:80
static void ixgbe_isc_txd_flush(void *, uint16_t, qidx_t)
Definition: ix_txrx.c:251
static int ixgbe_isc_txd_encap(void *, if_pkt_info_t)
Definition: ix_txrx.c:170
static void ixgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t)
static void ixgbe_isc_rxd_refill(void *, if_rxd_update_t)
Definition: ix_txrx.c:321
static int ixgbe_isc_rxd_pkt_get(void *, if_rxd_info_t)
Definition: ix_txrx.c:393
static int ixgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t)
Definition: ix_txrx.c:359
static void ixgbe_rx_checksum(uint32_t, if_rxd_info_t, uint32_t)
Definition: ix_txrx.c:476
static int ixgbe_determine_rsstype(uint16_t pkt_info)
Definition: ix_txrx.c:508
void ixgbe_if_enable_intr(if_ctx_t ctx)
Definition: if_ix.c:3746
static int ixgbe_isc_txd_credits_update(void *, uint16_t, bool)
Definition: ix_txrx.c:264
struct if_txrx ixgbe_txrx
Definition: ix_txrx.c:62
#define CSUM_OFFLOAD
Definition: ixgbe.h:195
#define IXGBE_FEATURE_VF
#define IXGBE_FEATURE_NEEDS_CTXD
#define IXGBE_FEATURE_RSS
uint8_t u8
Definition: ixgbe_osdep.h:143
#define IXGBE_WRITE_REG(a, reg, val)
Definition: ixgbe_osdep.h:227
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK
Definition: ixgbe_type.h:2875
#define IXGBE_RXD_STAT_VP
Definition: ixgbe_type.h:2731
#define IXGBE_ADVTXD_MACLEN_SHIFT
Definition: ixgbe_type.h:3408
#define IXGBE_RXDADV_RSSTYPE_IPV6
Definition: ixgbe_type.h:2834
#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP
Definition: ixgbe_type.h:2830
#define IXGBE_TXD_CMD_EOP
Definition: ixgbe_type.h:2705
#define IXGBE_TXD_POPTS_IXSM
Definition: ixgbe_type.h:2703
#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP
Definition: ixgbe_type.h:2832
#define IXGBE_ADVTXD_CC
Definition: ixgbe_type.h:3395
#define IXGBE_TXD_CMD_RS
Definition: ixgbe_type.h:2708
#define IXGBE_ADVTXD_TUCMD_IPV6
Definition: ixgbe_type.h:3411
#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP
Definition: ixgbe_type.h:2837
#define IXGBE_ADVTXD_DCMD_IFCS
Definition: ixgbe_type.h:3385
#define IXGBE_ADVTXD_TUCMD_IPV4
Definition: ixgbe_type.h:3410
#define IXGBE_TXD_STAT_DD
Definition: ixgbe_type.h:2711
#define IXGBE_ADVTXD_DCMD_DEXT
Definition: ixgbe_type.h:3388
#define IXGBE_ADVTXD_PAYLEN_SHIFT
Definition: ixgbe_type.h:3407
#define IXGBE_ADVTXD_DTYP_DATA
Definition: ixgbe_type.h:3383
#define IXGBE_ADVTXD_DTYP_CTXT
Definition: ixgbe_type.h:3382
#define IXGBE_ADVTXD_TUCMD_L4T_UDP
Definition: ixgbe_type.h:3412
#define IXGBE_TXD_POPTS_TXSM
Definition: ixgbe_type.h:2704
#define IXGBE_ADVTXD_DCMD_TSE
Definition: ixgbe_type.h:3390
#define IXGBE_RXD_STAT_DD
Definition: ixgbe_type.h:2728
#define IXGBE_RXD_STAT_L4CS
Definition: ixgbe_type.h:2735
#define IXGBE_RXDADV_PKTTYPE_SCTP
Definition: ixgbe_type.h:2848
#define IXGBE_RXDADV_RSSTYPE_IPV4
Definition: ixgbe_type.h:2831
#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX
Definition: ixgbe_type.h:2835
#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX
Definition: ixgbe_type.h:2838
#define IXGBE_RXDADV_RSSTYPE_MASK
Definition: ixgbe_type.h:2818
#define IXGBE_ADVTXD_L4LEN_SHIFT
Definition: ixgbe_type.h:3430
#define IXGBE_ADVTXD_MSS_SHIFT
Definition: ixgbe_type.h:3431
#define IXGBE_ADVTXD_DCMD_VLE
Definition: ixgbe_type.h:3389
#define IXGBE_RXD_STAT_IPCS
Definition: ixgbe_type.h:2736
#define IXGBE_RXDADV_PKTTYPE_MASK
Definition: ixgbe_type.h:2819
#define IXGBE_RXD_ERR_IPE
Definition: ixgbe_type.h:2755
#define IXGBE_RXD_ERR_TCPE
Definition: ixgbe_type.h:2754
#define IXGBE_ADVTXD_VLAN_SHIFT
Definition: ixgbe_type.h:3409
#define IXGBE_ADVTXD_TUCMD_L4T_TCP
Definition: ixgbe_type.h:3413
#define IXGBE_RXDADV_RSSTYPE_IPV6_EX
Definition: ixgbe_type.h:2833
#define IXGBE_RXDADV_PKTTYPE_ETQF
Definition: ixgbe_type.h:2856
#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP
Definition: ixgbe_type.h:2836
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP
Definition: ixgbe_type.h:3414
#define IXGBE_RXD_STAT_EOP
Definition: ixgbe_type.h:2729
struct ixgbe_softc * sc
Definition: ixgbe.h:319
struct rx_ring rxr
Definition: ixgbe.h:325
struct tx_ring txr
Definition: ixgbe.h:333
struct ixgbe_softc * sc
Definition: ixgbe.h:331
u32 feat_en
Definition: ixgbe.h:463
struct ixgbe_hw hw
Definition: ixgbe.h:353
struct ix_tx_queue * tx_queues
Definition: ixgbe.h:423
struct ix_rx_queue * rx_queues
Definition: ixgbe.h:424
if_softc_ctx_t shared
Definition: ixgbe.h:356
Definition: ixgbe.h:288
bool vtag_strip
Definition: ixgbe.h:295
u32 packets
Definition: ixgbe.h:300
u64 rx_bytes
Definition: ixgbe.h:306
struct ixgbe_softc * sc
Definition: ixgbe.h:290
u64 rx_packets
Definition: ixgbe.h:305
u64 rx_discarded
Definition: ixgbe.h:307
union ixgbe_adv_rx_desc * rx_base
Definition: ixgbe.h:293
u32 bytes
Definition: ixgbe.h:299
struct ix_rx_queue * que
Definition: ixgbe.h:289
u32 tail
Definition: ixgbe.h:292
Definition: ixgbe.h:262
u32 tail
Definition: ixgbe.h:266
u32 bytes
Definition: ixgbe.h:277
qidx_t tx_rs_pidx
Definition: ixgbe.h:269
u64 tso_tx
Definition: ixgbe.h:280
qidx_t * tx_rsq
Definition: ixgbe.h:267
u64 total_packets
Definition: ixgbe.h:281
qidx_t tx_rs_cidx
Definition: ixgbe.h:268
qidx_t tx_cidx_processed
Definition: ixgbe.h:270
struct ixgbe_softc * sc
Definition: ixgbe.h:263
union ixgbe_adv_tx_desc * tx_base
Definition: ixgbe.h:264
struct ixgbe_adv_rx_desc::@11::@12 lower
struct ixgbe_adv_rx_desc::@11::@13 upper
struct ixgbe_adv_rx_desc::@10 read
union ixgbe_adv_rx_desc::@11::@12::@15 hi_dword
struct ixgbe_adv_rx_desc::@11::@12::@14::@16 hs_rss
union ixgbe_adv_rx_desc::@11::@12::@14 lo_dword
struct ixgbe_adv_rx_desc::@11 wb
struct ixgbe_adv_tx_desc::@9 wb
struct ixgbe_adv_tx_desc::@8 read