FreeBSD kernel CXGB device code
cxgb_sge.c
Go to the documentation of this file.
1/**************************************************************************
2SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
4Copyright (c) 2007-2009, Chelsio Inc.
5All rights reserved.
6
7Redistribution and use in source and binary forms, with or without
8modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27POSSIBILITY OF SUCH DAMAGE.
28
29***************************************************************************/
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "opt_inet6.h"
35#include "opt_inet.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/module.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
43#include <machine/bus.h>
44#include <machine/resource.h>
45#include <sys/rman.h>
46#include <sys/queue.h>
47#include <sys/sysctl.h>
48#include <sys/taskqueue.h>
49
50#include <sys/proc.h>
51#include <sys/sbuf.h>
52#include <sys/sched.h>
53#include <sys/smp.h>
54#include <sys/systm.h>
55#include <sys/syslog.h>
56#include <sys/socket.h>
57#include <sys/sglist.h>
58
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/bpf.h>
62#include <net/ethernet.h>
63#include <net/if_vlan_var.h>
64
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/ip.h>
68#include <netinet/ip6.h>
69#include <netinet/tcp.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76
77#include <cxgb_include.h>
78#include <sys/mvec.h>
79
80int txq_fills = 0;
82
83#ifdef TCP_OFFLOAD
84CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
85#endif
86
87extern struct sysctl_oid_list sysctl__hw_cxgb_children;
89SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90 "size of per-queue mbuf ring");
91
93SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
95 "coalesce small packets into a single work request regardless of ring state");
96
97#define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
98#define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99#define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
100#define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
101#define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
102#define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
103#define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
104
105
107SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
109 "coalesce enable threshold");
111SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
113 "coalesce disable threshold");
115SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
117 "tx cleaning minimum threshold");
118
119/*
120 * XXX don't re-enable this until TOE stops assuming
121 * we have an m_ext
122 */
123static int recycle_enable = 0;
124
125extern int cxgb_use_16k_clusters;
126extern int nmbjumbop;
127extern int nmbjumbo9;
128extern int nmbjumbo16;
129
130#define USE_GTS 0
131
132#define SGE_RX_SM_BUF_SIZE 1536
133#define SGE_RX_DROP_THRES 16
134#define SGE_RX_COPY_THRES 128
135
136/*
137 * Period of the Tx buffer reclaim timer. This timer does not need to run
138 * frequently as Tx buffers are usually reclaimed by new Tx packets.
139 */
140#define TX_RECLAIM_PERIOD (hz >> 1)
141
142/*
143 * Values for sge_txq.flags
144 */
145enum {
146 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
147 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
148};
149
150struct tx_desc {
153
154struct rx_desc {
155 uint32_t addr_lo;
156 uint32_t len_gen;
157 uint32_t gen2;
158 uint32_t addr_hi;
159} __packed;
160
161struct rsp_desc { /* response queue descriptor */
163 uint32_t flags;
164 uint32_t len_cq;
165 uint8_t imm_data[47];
166 uint8_t intr_gen;
167} __packed;
168
169#define RX_SW_DESC_MAP_CREATED (1 << 0)
170#define TX_SW_DESC_MAP_CREATED (1 << 1)
171#define RX_SW_DESC_INUSE (1 << 3)
172#define TX_SW_DESC_MAPPED (1 << 4)
173
174#define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
175#define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
176#define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
177#define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
178
179struct tx_sw_desc { /* SW state per Tx descriptor */
180 struct mbuf *m;
181 bus_dmamap_t map;
182 int flags;
183};
184
185struct rx_sw_desc { /* SW state per Rx descriptor */
186 caddr_t rxsd_cl;
187 struct mbuf *m;
188 bus_dmamap_t map;
189 int flags;
190};
191
192struct txq_state {
193 unsigned int compl;
194 unsigned int gen;
195 unsigned int pidx;
196};
197
199 int error;
200 bus_dma_segment_t seg;
201 int nseg;
202};
203
204
205/*
206 * Maps a number of flits to the number of Tx descriptors that can hold them.
207 * The formula is
208 *
209 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
210 *
211 * HW allows up to 4 descriptors to be combined into a WR.
212 */
213static uint8_t flit_desc_map[] = {
214 0,
215#if SGE_NUM_GENBITS == 1
216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220#elif SGE_NUM_GENBITS == 2
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
225#else
226# error "SGE_NUM_GENBITS must be 1 or 2"
227#endif
228};
229
230#define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
231#define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232#define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233#define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234#define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235#define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237#define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238#define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240#define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242
244
245static void sge_timer_cb(void *arg);
246static void sge_timer_reclaim(void *arg, int ncount);
247static void sge_txq_reclaim_handler(void *arg, int ncount);
248static void cxgb_start_locked(struct sge_qset *qs);
249
250/*
251 * XXX need to cope with bursty scheduling by looking at a wider
252 * window than we are now for determining the need for coalescing
253 *
254 */
255static __inline uint64_t
257{
258 struct adapter *sc;
259 struct sge_txq *txq;
260 uint8_t *fill;
261
262 if (__predict_false(cxgb_tx_coalesce_force))
263 return (1);
264 txq = &qs->txq[TXQ_ETH];
265 sc = qs->port->adapter;
266 fill = &sc->tunq_fill[qs->idx];
267
272 /*
273 * if the hardware transmit queue is more than 1/8 full
274 * we mark it as coalescing - we drop back from coalescing
275 * when we go below 1/32 full and there are no packets enqueued,
276 * this provides us with some degree of hysteresis
277 */
278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
280 *fill = 0;
281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
282 *fill = 1;
283
284 return (sc->tunq_coalesce);
285}
286
287#ifdef __LP64__
288static void
289set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
290{
291 uint64_t wr_hilo;
292#if _BYTE_ORDER == _LITTLE_ENDIAN
293 wr_hilo = wr_hi;
294 wr_hilo |= (((uint64_t)wr_lo)<<32);
295#else
296 wr_hilo = wr_lo;
297 wr_hilo |= (((uint64_t)wr_hi)<<32);
298#endif
299 wrp->wrh_hilo = wr_hilo;
300}
301#else
302static void
303set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
304{
305
306 wrp->wrh_hi = wr_hi;
307 wmb();
308 wrp->wrh_lo = wr_lo;
309}
310#endif
311
313 int count;
316};
317
318static int
319coalesce_check(struct mbuf *m, void *arg)
320{
321 struct coalesce_info *ci = arg;
322
323 if ((m->m_next != NULL) ||
324 ((mtod(m, vm_offset_t) & PAGE_MASK) + m->m_len > PAGE_SIZE))
325 ci->noncoal = 1;
326
327 if ((ci->count == 0) || (ci->noncoal == 0 && (ci->count < 7) &&
328 (ci->nbytes + m->m_len <= 10500))) {
329 ci->count++;
330 ci->nbytes += m->m_len;
331 return (1);
332 }
333 return (0);
334}
335
336static struct mbuf *
338{
339 struct mbuf *m, *m_head, *m_tail;
340 struct coalesce_info ci;
341
342
343 if (check_pkt_coalesce(qs) == 0)
344 return TXQ_RING_DEQUEUE(qs);
345
346 m_head = m_tail = NULL;
347 ci.count = ci.nbytes = ci.noncoal = 0;
348 do {
350 if (m_head == NULL) {
351 m_tail = m_head = m;
352 } else if (m != NULL) {
353 m_tail->m_nextpkt = m;
354 m_tail = m;
355 }
356 } while (m != NULL);
357 if (ci.count > 7)
358 panic("trying to coalesce %d packets in to one WR", ci.count);
359 return (m_head);
360}
361
371static __inline int
372reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
373{
374 struct sge_txq *q = &qs->txq[queue];
375 int reclaim = desc_reclaimable(q);
376
380
381 if (reclaim < reclaim_min)
382 return (0);
383
384 mtx_assert(&qs->lock, MA_OWNED);
385 if (reclaim > 0) {
386 t3_free_tx_desc(qs, reclaim, queue);
387 q->cleaned += reclaim;
388 q->in_use -= reclaim;
389 }
390 if (isset(&qs->txq_stopped, TXQ_ETH))
391 clrbit(&qs->txq_stopped, TXQ_ETH);
392
393 return (reclaim);
394}
395
396#ifdef DEBUGNET
397int
398cxgb_debugnet_poll_tx(struct sge_qset *qs)
399{
400
402}
403#endif
404
411static __inline int
413{
414 unsigned int r = q->processed - q->cleaned;
415
416 return q->in_use - r < (q->size >> 1);
417}
418
429void
431{
432 u_int ctrl, ups;
433
434 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
435
438 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
439 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
440#if SGE_NUM_GENBITS == 1
441 ctrl |= F_EGRGENCTRL;
442#endif
443 if (adap->params.rev > 0) {
444 if (!(adap->flags & (USING_MSIX | USING_MSI)))
446 }
447 t3_write_reg(adap, A_SG_CONTROL, ctrl);
449 V_LORCQDRBTHRSH(512));
452 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
454 adap->params.rev < T3_REV_C ? 1000 : 500);
458 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
459 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
460}
461
462
470static __inline unsigned int
471sgl_len(unsigned int n)
472{
473 return ((3 * n) / 2 + (n & 1));
474}
475
482static int
483get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
484{
485
486 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
487 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
488 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
489 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
490 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
491 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
492 } else
493 m->m_len = IMMED_PKT_SIZE;
494 m->m_ext.ext_buf = NULL;
495 m->m_ext.ext_type = 0;
496 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
497 return (0);
498}
499
500static __inline u_int
502{
503 return (flit_desc_map[n]);
504}
505
506#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
507 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
508 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
509 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
510 F_HIRCQPARITYERROR)
511#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
512#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
513 F_RSPQDISABLED)
514
521void
523{
524 unsigned int v, status;
525
527 if (status & SGE_PARERR)
528 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
529 status & SGE_PARERR);
530 if (status & SGE_FRAMINGERR)
531 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
532 status & SGE_FRAMINGERR);
533 if (status & F_RSPQCREDITOVERFOW)
534 CH_ALERT(adapter, "SGE response queue credit overflow\n");
535
536 if (status & F_RSPQDISABLED) {
538
540 "packet delivered to disabled response queue (0x%x)\n",
541 (v >> S_RSPQ0DISABLED) & 0xff);
542 }
543
545 if (status & SGE_FATALERR)
547}
548
549void
551{
552 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
553
554 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
555 nqsets *= adap->params.nports;
556
557 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
558
559 while (!powerof2(fl_q_size))
560 fl_q_size--;
561
563 is_offload(adap);
564
565 if (use_16k) {
566 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
567 jumbo_buf_size = MJUM16BYTES;
568 } else {
569 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
570 jumbo_buf_size = MJUM9BYTES;
571 }
572 while (!powerof2(jumbo_q_size))
573 jumbo_q_size--;
574
575 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
576 device_printf(adap->dev,
577 "Insufficient clusters and/or jumbo buffers.\n");
578
579 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
580
581 for (i = 0; i < SGE_QSETS; ++i) {
582 struct qset_params *q = p->qset + i;
583
584 if (adap->params.nports > 2) {
585 q->coalesce_usecs = 50;
586 } else {
587#ifdef INVARIANTS
588 q->coalesce_usecs = 10;
589#else
590 q->coalesce_usecs = 5;
591#endif
592 }
593 q->polling = 0;
595 q->fl_size = fl_q_size;
596 q->jumbo_size = jumbo_q_size;
599 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
601 q->cong_thres = 0;
602 }
603}
604
605int
607{
608
609 /* The parent tag. */
610 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
611 1, 0, /* algnmnt, boundary */
612 BUS_SPACE_MAXADDR, /* lowaddr */
613 BUS_SPACE_MAXADDR, /* highaddr */
614 NULL, NULL, /* filter, filterarg */
615 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
616 BUS_SPACE_UNRESTRICTED, /* nsegments */
617 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
618 0, /* flags */
619 NULL, NULL, /* lock, lockarg */
620 &sc->parent_dmat)) {
621 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
622 return (ENOMEM);
623 }
624
625 /*
626 * DMA tag for normal sized RX frames
627 */
628 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
629 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
630 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
631 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
632 return (ENOMEM);
633 }
634
635 /*
636 * DMA tag for jumbo sized RX frames.
637 */
638 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
639 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
640 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
641 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
642 return (ENOMEM);
643 }
644
645 /*
646 * DMA tag for TX frames.
647 */
648 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
649 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
650 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
651 NULL, NULL, &sc->tx_dmat)) {
652 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
653 return (ENOMEM);
654 }
655
656 return (0);
657}
658
659int
661{
662
663 if (sc->tx_dmat != NULL)
664 bus_dma_tag_destroy(sc->tx_dmat);
665
666 if (sc->rx_jumbo_dmat != NULL)
667 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
668
669 if (sc->rx_dmat != NULL)
670 bus_dma_tag_destroy(sc->rx_dmat);
671
672 if (sc->parent_dmat != NULL)
673 bus_dma_tag_destroy(sc->parent_dmat);
674
675 return (0);
676}
677
678void
679t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
680{
681
682 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
683 qs->rspq.polling = 0 /* p->polling */;
684}
685
686#if !defined(__i386__) && !defined(__amd64__)
687static void
688refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
689{
690 struct refill_fl_cb_arg *cb_arg = arg;
691
692 cb_arg->error = error;
693 cb_arg->seg = segs[0];
694 cb_arg->nseg = nseg;
695
696}
697#endif
707static void
708refill_fl(adapter_t *sc, struct sge_fl *q, int n)
709{
710 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
711 struct rx_desc *d = &q->desc[q->pidx];
712 struct refill_fl_cb_arg cb_arg;
713 struct mbuf *m;
714 caddr_t cl;
715 int err;
716
717 cb_arg.error = 0;
718 while (n--) {
719 /*
720 * We allocate an uninitialized mbuf + cluster, mbuf is
721 * initialized after rx.
722 */
723 if (q->zone == zone_pack) {
724 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
725 break;
726 cl = m->m_ext.ext_buf;
727 } else {
728 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
729 break;
730 if ((m = m_gethdr_raw(M_NOWAIT, 0)) == NULL) {
731 uma_zfree(q->zone, cl);
732 break;
733 }
734 }
735 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
736 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
737 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
738 uma_zfree(q->zone, cl);
739 goto done;
740 }
742 }
743#if !defined(__i386__) && !defined(__amd64__)
744 err = bus_dmamap_load(q->entry_tag, sd->map,
745 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
746
747 if (err != 0 || cb_arg.error) {
748 if (q->zone != zone_pack)
749 uma_zfree(q->zone, cl);
750 m_free(m);
751 goto done;
752 }
753#else
754 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
755#endif
756 sd->flags |= RX_SW_DESC_INUSE;
757 sd->rxsd_cl = cl;
758 sd->m = m;
759 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
760 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
761 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
762 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
763
764 d++;
765 sd++;
766
767 if (++q->pidx == q->size) {
768 q->pidx = 0;
769 q->gen ^= 1;
770 sd = q->sdesc;
771 d = q->desc;
772 }
773 q->credits++;
774 q->db_pending++;
775 }
776
777done:
778 if (q->db_pending >= 32) {
779 q->db_pending = 0;
781 }
782}
783
784
793static void
795{
796 u_int cidx = q->cidx;
797
798 while (q->credits--) {
799 struct rx_sw_desc *d = &q->sdesc[cidx];
800
801 if (d->flags & RX_SW_DESC_INUSE) {
802 bus_dmamap_unload(q->entry_tag, d->map);
803 bus_dmamap_destroy(q->entry_tag, d->map);
804 if (q->zone == zone_pack) {
805 m_init(d->m, M_NOWAIT, MT_DATA, M_EXT);
806 uma_zfree(zone_pack, d->m);
807 } else {
808 m_init(d->m, M_NOWAIT, MT_DATA, 0);
809 m_free_raw(d->m);
810 uma_zfree(q->zone, d->rxsd_cl);
811 }
812 }
813
814 d->rxsd_cl = NULL;
815 d->m = NULL;
816 if (++cidx == q->size)
817 cidx = 0;
818 }
819}
820
821static __inline void
822__refill_fl(adapter_t *adap, struct sge_fl *fl)
823{
824 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
825}
826
827static __inline void
828__refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
829{
830 uint32_t reclaimable = fl->size - fl->credits;
831
832 if (reclaimable > 0)
833 refill_fl(adap, fl, min(max, reclaimable));
834}
835
845static void
846recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
847{
848 struct rx_desc *from = &q->desc[idx];
849 struct rx_desc *to = &q->desc[q->pidx];
850
851 q->sdesc[q->pidx] = q->sdesc[idx];
852 to->addr_lo = from->addr_lo; // already big endian
853 to->addr_hi = from->addr_hi; // likewise
854 wmb(); /* necessary ? */
855 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
857 q->credits++;
858
859 if (++q->pidx == q->size) {
860 q->pidx = 0;
861 q->gen ^= 1;
862 }
864}
865
866static void
867alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868{
869 uint32_t *addr;
870
871 addr = arg;
872 *addr = segs[0].ds_addr;
873}
874
875static int
876alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
879{
880 size_t len = nelem * elem_size;
881 void *s = NULL;
882 void *p = NULL;
883 int err;
884
885 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886 BUS_SPACE_MAXADDR_32BIT,
887 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888 len, 0, NULL, NULL, tag)) != 0) {
889 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
890 return (ENOMEM);
891 }
892
893 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
894 map)) != 0) {
895 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
896 return (ENOMEM);
897 }
898
899 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
900 bzero(p, len);
901 *(void **)desc = p;
902
903 if (sw_size) {
904 len = nelem * sw_size;
905 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
906 *(void **)sdesc = s;
907 }
908 if (parent_entry_tag == NULL)
909 return (0);
910
911 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915 NULL, NULL, entry_tag)) != 0) {
916 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
917 return (ENOMEM);
918 }
919 return (0);
920}
921
922static void
923sge_slow_intr_handler(void *arg, int ncount)
924{
925 adapter_t *sc = arg;
926
929 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
930}
931
961static void
962sge_timer_cb(void *arg)
963{
964 adapter_t *sc = arg;
965 if ((sc->flags & USING_MSIX) == 0) {
966
967 struct port_info *pi;
968 struct sge_qset *qs;
969 struct sge_txq *txq;
970 int i, j;
971 int reclaim_ofl, refill_rx;
972
973 if (sc->open_device_map == 0)
974 return;
975
976 for (i = 0; i < sc->params.nports; i++) {
977 pi = &sc->port[i];
978 for (j = 0; j < pi->nqsets; j++) {
979 qs = &sc->sge.qs[pi->first_qset + j];
980 txq = &qs->txq[0];
981 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983 (qs->fl[1].credits < qs->fl[1].size));
984 if (reclaim_ofl || refill_rx) {
985 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
986 break;
987 }
988 }
989 }
990 }
991
992 if (sc->params.nports > 2) {
993 int i;
994
995 for_each_port(sc, i) {
996 struct port_info *pi = &sc->port[i];
997
999 F_SELEGRCNTX |
1001 }
1002 }
1003 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004 sc->open_device_map != 0)
1005 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1006}
1007
1008/*
1009 * This is meant to be a catch-all function to keep sge state private
1010 * to sge.c
1011 *
1012 */
1013int
1015{
1016 callout_init(&sc->sge_timer_ch, 1);
1017 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1019 return (0);
1020}
1021
1022int
1024{
1025 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1026 return (0);
1027}
1028
1029int
1031{
1032 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1033 return (0);
1034}
1035
1045static __inline void
1046refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1047{
1048
1049 /* mbufs are allocated on demand when a rspq entry is processed. */
1051 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1052}
1053
1054static void
1055sge_txq_reclaim_handler(void *arg, int ncount)
1056{
1057 struct sge_qset *qs = arg;
1058 int i;
1059
1060 for (i = 0; i < 3; i++)
1061 reclaim_completed_tx(qs, 16, i);
1062}
1063
1064static void
1065sge_timer_reclaim(void *arg, int ncount)
1066{
1067 struct port_info *pi = arg;
1068 int i, nqsets = pi->nqsets;
1069 adapter_t *sc = pi->adapter;
1070 struct sge_qset *qs;
1071 struct mtx *lock;
1072
1073 KASSERT((sc->flags & USING_MSIX) == 0,
1074 ("can't call timer reclaim for msi-x"));
1075
1076 for (i = 0; i < nqsets; i++) {
1077 qs = &sc->sge.qs[pi->first_qset + i];
1078
1080 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081 &sc->sge.qs[0].rspq.lock;
1082
1083 if (mtx_trylock(lock)) {
1084 /* XXX currently assume that we are *NOT* polling */
1085 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1086
1087 if (qs->fl[0].credits < qs->fl[0].size - 16)
1088 __refill_fl(sc, &qs->fl[0]);
1089 if (qs->fl[1].credits < qs->fl[1].size - 16)
1090 __refill_fl(sc, &qs->fl[1]);
1091
1092 if (status & (1 << qs->rspq.cntxt_id)) {
1093 if (qs->rspq.credits) {
1094 refill_rspq(sc, &qs->rspq, 1);
1095 qs->rspq.credits--;
1097 1 << qs->rspq.cntxt_id);
1098 }
1099 }
1100 mtx_unlock(lock);
1101 }
1102 }
1103}
1104
1112static void
1113init_qset_cntxt(struct sge_qset *qs, u_int id)
1114{
1115
1116 qs->rspq.cntxt_id = id;
1117 qs->fl[0].cntxt_id = 2 * id;
1118 qs->fl[1].cntxt_id = 2 * id + 1;
1123 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1124
1125 /* XXX: a sane limit is needed instead of INT_MAX */
1126 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1127 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1128 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1129}
1130
1131
1132static void
1133txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1134{
1135 txq->in_use += ndesc;
1136 /*
1137 * XXX we don't handle stopping of queue
1138 * presumably start handles this when we bump against the end
1139 */
1140 txqs->gen = txq->gen;
1141 txq->unacked += ndesc;
1142 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1143 txq->unacked &= 31;
1144 txqs->pidx = txq->pidx;
1145 txq->pidx += ndesc;
1146#ifdef INVARIANTS
1147 if (((txqs->pidx > txq->cidx) &&
1148 (txq->pidx < txqs->pidx) &&
1149 (txq->pidx >= txq->cidx)) ||
1150 ((txqs->pidx < txq->cidx) &&
1151 (txq->pidx >= txq-> cidx)) ||
1152 ((txqs->pidx < txq->cidx) &&
1153 (txq->cidx < txqs->pidx)))
1154 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1155 txqs->pidx, txq->pidx, txq->cidx);
1156#endif
1157 if (txq->pidx >= txq->size) {
1158 txq->pidx -= txq->size;
1159 txq->gen ^= 1;
1160 }
1161
1162}
1163
1172static __inline unsigned int
1173calc_tx_descs(const struct mbuf *m, int nsegs)
1174{
1175 unsigned int flits;
1176
1177 if (m->m_pkthdr.len <= PIO_LEN)
1178 return 1;
1179
1180 flits = sgl_len(nsegs) + 2;
1181 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1182 flits++;
1183
1184 return flits_to_desc(flits);
1185}
1186
1197static __inline void
1198make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1199{
1200 int i, idx;
1201
1202 for (idx = 0, i = 0; i < nsegs; i++) {
1203 /*
1204 * firmware doesn't like empty segments
1205 */
1206 if (segs[i].ds_len == 0)
1207 continue;
1208 if (i && idx == 0)
1209 ++sgp;
1210
1211 sgp->len[idx] = htobe32(segs[i].ds_len);
1212 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1213 idx ^= 1;
1214 }
1215
1216 if (idx) {
1217 sgp->len[idx] = 0;
1218 sgp->addr[idx] = 0;
1219 }
1220}
1221
1234static __inline void
1235check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1236{
1237#if USE_GTS
1238 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1239 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1240 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1241#ifdef T3_TRACE
1242 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1243 q->cntxt_id);
1244#endif
1247 }
1248#else
1249 if (mustring || ++q->db_pending >= 32) {
1250 wmb(); /* write descriptors before telling HW */
1253 q->db_pending = 0;
1254 }
1255#endif
1256}
1257
1258static __inline void
1259wr_gen2(struct tx_desc *d, unsigned int gen)
1260{
1261#if SGE_NUM_GENBITS == 2
1262 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1263#endif
1264}
1265
1283static void
1284write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1285 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1286 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1287{
1288
1289 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1290 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1291
1292 if (__predict_true(ndesc == 1)) {
1293 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1294 V_WR_SGLSFLT(flits)) | wr_hi,
1295 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1296 wr_lo);
1297
1298 wr_gen2(txd, txqs->gen);
1299
1300 } else {
1301 unsigned int ogen = txqs->gen;
1302 const uint64_t *fp = (const uint64_t *)sgl;
1303 struct work_request_hdr *wp = wrp;
1304
1305 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1306 V_WR_SGLSFLT(flits)) | wr_hi;
1307
1308 while (sgl_flits) {
1309 unsigned int avail = WR_FLITS - flits;
1310
1311 if (avail > sgl_flits)
1312 avail = sgl_flits;
1313 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1314 sgl_flits -= avail;
1315 ndesc--;
1316 if (!sgl_flits)
1317 break;
1318
1319 fp += avail;
1320 txd++;
1321 txsd++;
1322 if (++txqs->pidx == txq->size) {
1323 txqs->pidx = 0;
1324 txqs->gen ^= 1;
1325 txd = txq->desc;
1326 txsd = txq->sdesc;
1327 }
1328
1329 /*
1330 * when the head of the mbuf chain
1331 * is freed all clusters will be freed
1332 * with it
1333 */
1334 wrp = (struct work_request_hdr *)txd;
1335 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1336 V_WR_SGLSFLT(1)) | wr_hi;
1337 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1338 sgl_flits + 1)) |
1339 V_WR_GEN(txqs->gen)) | wr_lo;
1340 wr_gen2(txd, txqs->gen);
1341 flits = 1;
1342 }
1343 wrp->wrh_hi |= htonl(F_WR_EOP);
1344 wmb();
1345 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1346 wr_gen2((struct tx_desc *)wp, ogen);
1347 }
1348}
1349
1350/* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1351#define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1352
1353#define GET_VTAG(cntrl, m) \
1354do { \
1355 if ((m)->m_flags & M_VLANTAG) \
1356 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1357} while (0)
1358
1359static int
1360t3_encap(struct sge_qset *qs, struct mbuf **m)
1361{
1362 adapter_t *sc;
1363 struct mbuf *m0;
1364 struct sge_txq *txq;
1365 struct txq_state txqs;
1366 struct port_info *pi;
1367 unsigned int ndesc, flits, cntrl, mlen;
1368 int err, nsegs, tso_info = 0;
1369
1370 struct work_request_hdr *wrp;
1371 struct tx_sw_desc *txsd;
1372 struct sg_ent *sgp, *sgl;
1373 uint32_t wr_hi, wr_lo, sgl_flits;
1374 bus_dma_segment_t segs[TX_MAX_SEGS];
1375
1376 struct tx_desc *txd;
1377
1378 pi = qs->port;
1379 sc = pi->adapter;
1380 txq = &qs->txq[TXQ_ETH];
1381 txd = &txq->desc[txq->pidx];
1382 txsd = &txq->sdesc[txq->pidx];
1383 sgl = txq->txq_sgl;
1384
1385 prefetch(txd);
1386 m0 = *m;
1387
1388 mtx_assert(&qs->lock, MA_OWNED);
1389 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1390 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1391
1392 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1395
1396 if (m0->m_nextpkt != NULL) {
1397 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1398 ndesc = 1;
1399 mlen = 0;
1400 } else {
1401 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1402 &m0, segs, &nsegs))) {
1403 if (cxgb_debug)
1404 printf("failed ... err=%d\n", err);
1405 return (err);
1406 }
1407 mlen = m0->m_pkthdr.len;
1408 ndesc = calc_tx_descs(m0, nsegs);
1409 }
1410 txq_prod(txq, ndesc, &txqs);
1411
1412 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1413 txsd->m = m0;
1414
1415 if (m0->m_nextpkt != NULL) {
1416 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1417 int i, fidx;
1418
1419 if (nsegs > 7)
1420 panic("trying to coalesce %d packets in to one WR", nsegs);
1421 txq->txq_coalesced += nsegs;
1422 wrp = (struct work_request_hdr *)txd;
1423 flits = nsegs*2 + 1;
1424
1425 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1426 struct cpl_tx_pkt_batch_entry *cbe;
1427 uint64_t flit;
1428 uint32_t *hflit = (uint32_t *)&flit;
1429 int cflags = m0->m_pkthdr.csum_flags;
1430
1432 GET_VTAG(cntrl, m0);
1434 if (__predict_false(!(cflags & CSUM_IP)))
1436 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1439
1440 hflit[0] = htonl(cntrl);
1441 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 flit |= htobe64(1 << 24);
1443 cbe = &cpl_batch->pkt_entry[i];
1444 cbe->cntrl = hflit[0];
1445 cbe->len = hflit[1];
1446 cbe->addr = htobe64(segs[i].ds_addr);
1447 }
1448
1449 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1450 V_WR_SGLSFLT(flits)) |
1452 wr_lo = htonl(V_WR_LEN(flits) |
1453 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1454 set_wr_hdr(wrp, wr_hi, wr_lo);
1455 wmb();
1456 ETHER_BPF_MTAP(pi->ifp, m0);
1457 wr_gen2(txd, txqs.gen);
1458 check_ring_tx_db(sc, txq, 0);
1459 return (0);
1460 } else if (tso_info) {
1461 uint16_t eth_type;
1462 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1463 struct ether_header *eh;
1464 void *l3hdr;
1465 struct tcphdr *tcp;
1466
1467 txd->flit[2] = 0;
1468 GET_VTAG(cntrl, m0);
1470 hdr->cntrl = htonl(cntrl);
1471 hdr->len = htonl(mlen | 0x80000000);
1472
1473 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1474 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 m0, mlen, m0->m_pkthdr.tso_segsz,
1476 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 panic("tx tso packet too small");
1478 }
1479
1480 /* Make sure that ether, ip, tcp headers are all in m0 */
1481 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1482 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1483 if (__predict_false(m0 == NULL)) {
1484 /* XXX panic probably an overreaction */
1485 panic("couldn't fit header into mbuf");
1486 }
1487 }
1488
1489 eh = mtod(m0, struct ether_header *);
1490 eth_type = eh->ether_type;
1491 if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 struct ether_vlan_header *evh = (void *)eh;
1493
1494 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1495 l3hdr = evh + 1;
1496 eth_type = evh->evl_proto;
1497 } else {
1498 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1499 l3hdr = eh + 1;
1500 }
1501
1502 if (eth_type == htons(ETHERTYPE_IP)) {
1503 struct ip *ip = l3hdr;
1504
1505 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1506 tcp = (struct tcphdr *)(ip + 1);
1507 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 struct ip6_hdr *ip6 = l3hdr;
1509
1510 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 ("%s: CSUM_TSO with ip6_nxt %d",
1512 __func__, ip6->ip6_nxt));
1513
1514 tso_info |= F_LSO_IPV6;
1515 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1516 tcp = (struct tcphdr *)(ip6 + 1);
1517 } else
1518 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1519
1520 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1521 hdr->lso_info = htonl(tso_info);
1522
1523 if (__predict_false(mlen <= PIO_LEN)) {
1524 /*
1525 * pkt not undersized but fits in PIO_LEN
1526 * Indicates a TSO bug at the higher levels.
1527 */
1528 txsd->m = NULL;
1529 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1530 flits = (mlen + 7) / 8 + 3;
1531 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1533 F_WR_SOP | F_WR_EOP | txqs.compl);
1534 wr_lo = htonl(V_WR_LEN(flits) |
1535 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1536 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1537 wmb();
1538 ETHER_BPF_MTAP(pi->ifp, m0);
1539 wr_gen2(txd, txqs.gen);
1540 check_ring_tx_db(sc, txq, 0);
1541 m_freem(m0);
1542 return (0);
1543 }
1544 flits = 3;
1545 } else {
1546 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1547
1548 GET_VTAG(cntrl, m0);
1550 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1552 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1555 cpl->cntrl = htonl(cntrl);
1556 cpl->len = htonl(mlen | 0x80000000);
1557
1558 if (mlen <= PIO_LEN) {
1559 txsd->m = NULL;
1560 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1561 flits = (mlen + 7) / 8 + 2;
1562
1563 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1565 F_WR_SOP | F_WR_EOP | txqs.compl);
1566 wr_lo = htonl(V_WR_LEN(flits) |
1567 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1568 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1569 wmb();
1570 ETHER_BPF_MTAP(pi->ifp, m0);
1571 wr_gen2(txd, txqs.gen);
1572 check_ring_tx_db(sc, txq, 0);
1573 m_freem(m0);
1574 return (0);
1575 }
1576 flits = 2;
1577 }
1578 wrp = (struct work_request_hdr *)txd;
1579 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1580 make_sgl(sgp, segs, nsegs);
1581
1582 sgl_flits = sgl_len(nsegs);
1583
1584 ETHER_BPF_MTAP(pi->ifp, m0);
1585
1586 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1588 wr_lo = htonl(V_WR_TID(txq->token));
1589 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1590 sgl_flits, wr_hi, wr_lo);
1591 check_ring_tx_db(sc, txq, 0);
1592
1593 return (0);
1594}
1595
1596#ifdef DEBUGNET
1597int
1598cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m)
1599{
1600 int error;
1601
1602 error = t3_encap(qs, m);
1603 if (error == 0)
1604 check_ring_tx_db(qs->port->adapter, &qs->txq[TXQ_ETH], 1);
1605 else if (*m != NULL) {
1606 m_freem(*m);
1607 *m = NULL;
1608 }
1609 return (error);
1610}
1611#endif
1612
1613void
1615{
1616 struct sge_qset *qs = arg;
1617 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1618
1619 if (qs->coalescing != 0 &&
1621 TXQ_RING_EMPTY(qs))
1622 qs->coalescing = 0;
1623 else if (qs->coalescing == 0 &&
1625 qs->coalescing = 1;
1626 if (TXQ_TRYLOCK(qs)) {
1627 qs->qs_flags |= QS_FLUSHING;
1629 qs->qs_flags &= ~QS_FLUSHING;
1630 TXQ_UNLOCK(qs);
1631 }
1632 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1633 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1634 qs, txq->txq_watchdog.c_cpu);
1635}
1636
1637static void
1639{
1640 struct sge_qset *qs = arg;
1641 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1642
1643 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1644 qs->coalescing = 1;
1645 if (TXQ_TRYLOCK(qs)) {
1646 qs->qs_flags |= QS_TIMEOUT;
1648 qs->qs_flags &= ~QS_TIMEOUT;
1649 TXQ_UNLOCK(qs);
1650 }
1651}
1652
1653static void
1655{
1656 struct mbuf *m_head = NULL;
1657 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1658 struct port_info *pi = qs->port;
1659 struct ifnet *ifp = pi->ifp;
1660
1661 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1663
1664 if (!pi->link_config.link_ok) {
1665 TXQ_RING_FLUSH(qs);
1666 return;
1667 }
1668 TXQ_LOCK_ASSERT(qs);
1669 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1670 pi->link_config.link_ok) {
1672
1673 if (txq->size - txq->in_use <= TX_MAX_DESC)
1674 break;
1675
1676 if ((m_head = cxgb_dequeue(qs)) == NULL)
1677 break;
1678 /*
1679 * Encapsulation can modify our pointer, and or make it
1680 * NULL on failure. In that event, we can't requeue.
1681 */
1682 if (t3_encap(qs, &m_head) || m_head == NULL)
1683 break;
1684
1685 m_head = NULL;
1686 }
1687
1688 if (txq->db_pending)
1689 check_ring_tx_db(pi->adapter, txq, 1);
1690
1691 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1692 pi->link_config.link_ok)
1693 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1694 qs, txq->txq_timer.c_cpu);
1695 if (m_head != NULL)
1696 m_freem(m_head);
1697}
1698
1699static int
1700cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1701{
1702 struct port_info *pi = qs->port;
1703 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1704 struct buf_ring *br = txq->txq_mr;
1705 int error, avail;
1706
1707 avail = txq->size - txq->in_use;
1708 TXQ_LOCK_ASSERT(qs);
1709
1710 /*
1711 * We can only do a direct transmit if the following are true:
1712 * - we aren't coalescing (ring < 3/4 full)
1713 * - the link is up -- checked in caller
1714 * - there are no packets enqueued already
1715 * - there is space in hardware transmit queue
1716 */
1717 if (check_pkt_coalesce(qs) == 0 &&
1718 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1719 if (t3_encap(qs, &m)) {
1720 if (m != NULL &&
1721 (error = drbr_enqueue(ifp, br, m)) != 0)
1722 return (error);
1723 } else {
1724 if (txq->db_pending)
1725 check_ring_tx_db(pi->adapter, txq, 1);
1726
1727 /*
1728 * We've bypassed the buf ring so we need to update
1729 * the stats directly
1730 */
1731 txq->txq_direct_packets++;
1732 txq->txq_direct_bytes += m->m_pkthdr.len;
1733 }
1734 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1735 return (error);
1736
1738 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1739 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1741 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1742 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1743 qs, txq->txq_timer.c_cpu);
1744 return (0);
1745}
1746
1747int
1748cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1749{
1750 struct sge_qset *qs;
1751 struct port_info *pi = ifp->if_softc;
1752 int error, qidx = pi->first_qset;
1753
1754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1755 ||(!pi->link_config.link_ok)) {
1756 m_freem(m);
1757 return (0);
1758 }
1759
1760 /* check if flowid is set */
1761 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1762 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1763
1764 qs = &pi->adapter->sge.qs[qidx];
1765
1766 if (TXQ_TRYLOCK(qs)) {
1767 /* XXX running */
1768 error = cxgb_transmit_locked(ifp, qs, m);
1769 TXQ_UNLOCK(qs);
1770 } else
1771 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1772 return (error);
1773}
1774
1775void
1776cxgb_qflush(struct ifnet *ifp)
1777{
1778 /*
1779 * flush any enqueued mbufs in the buf_rings
1780 * and in the transmit queues
1781 * no-op for now
1782 */
1783 return;
1784}
1785
1798static __inline void
1799write_imm(struct tx_desc *d, caddr_t src,
1800 unsigned int len, unsigned int gen)
1801{
1802 struct work_request_hdr *from = (struct work_request_hdr *)src;
1803 struct work_request_hdr *to = (struct work_request_hdr *)d;
1804 uint32_t wr_hi, wr_lo;
1805
1806 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1807 ("%s: invalid len %d", __func__, len));
1808
1809 memcpy(&to[1], &from[1], len - sizeof(*from));
1810 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1811 V_WR_BCNTLFLT(len & 7));
1812 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1813 set_wr_hdr(to, wr_hi, wr_lo);
1814 wmb();
1815 wr_gen2(d, gen);
1816}
1817
1836static __inline int
1838 struct mbuf *m, unsigned int ndesc,
1839 unsigned int qid)
1840{
1841 /*
1842 * XXX We currently only use this for checking the control queue
1843 * the control queue is only used for binding qsets which happens
1844 * at init time so we are guaranteed enough descriptors
1845 */
1846 if (__predict_false(mbufq_len(&q->sendq))) {
1847addq_exit: (void )mbufq_enqueue(&q->sendq, m);
1848 return 1;
1849 }
1850 if (__predict_false(q->size - q->in_use < ndesc)) {
1851
1852 struct sge_qset *qs = txq_to_qset(q, qid);
1853
1854 setbit(&qs->txq_stopped, qid);
1855 if (should_restart_tx(q) &&
1857 return 2;
1858
1859 q->stops++;
1860 goto addq_exit;
1861 }
1862 return 0;
1863}
1864
1865
1874static __inline void
1876{
1877 unsigned int reclaim = q->processed - q->cleaned;
1878
1879 q->in_use -= reclaim;
1880 q->cleaned += reclaim;
1881}
1882
1893static int
1894ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1895{
1896 int ret;
1897 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1898 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1899
1900 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1901
1902 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1903 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1904
1905 TXQ_LOCK(qs);
1906again: reclaim_completed_tx_imm(q);
1907
1908 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1909 if (__predict_false(ret)) {
1910 if (ret == 1) {
1911 TXQ_UNLOCK(qs);
1912 return (ENOSPC);
1913 }
1914 goto again;
1915 }
1916 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1917
1918 q->in_use++;
1919 if (++q->pidx >= q->size) {
1920 q->pidx = 0;
1921 q->gen ^= 1;
1922 }
1923 TXQ_UNLOCK(qs);
1924 wmb();
1927
1928 m_free(m);
1929 return (0);
1930}
1931
1932
1939static void
1940restart_ctrlq(void *data, int npending)
1941{
1942 struct mbuf *m;
1943 struct sge_qset *qs = (struct sge_qset *)data;
1944 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1945 adapter_t *adap = qs->port->adapter;
1946
1947 TXQ_LOCK(qs);
1948again: reclaim_completed_tx_imm(q);
1949
1950 while (q->in_use < q->size &&
1951 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1952
1953 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1954 m_free(m);
1955
1956 if (++q->pidx >= q->size) {
1957 q->pidx = 0;
1958 q->gen ^= 1;
1959 }
1960 q->in_use++;
1961 }
1962 if (mbufq_len(&q->sendq)) {
1963 setbit(&qs->txq_stopped, TXQ_CTRL);
1964
1965 if (should_restart_tx(q) &&
1967 goto again;
1968 q->stops++;
1969 }
1970 TXQ_UNLOCK(qs);
1973}
1974
1975
1976/*
1977 * Send a management message through control queue 0
1978 */
1979int
1980t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1981{
1982 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1983}
1984
1994static void
1996{
1997 int i;
1998
2000 if (q->txq[TXQ_ETH].txq_mr != NULL)
2001 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
2002 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
2003 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
2004 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
2005 }
2006
2007 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2008 if (q->fl[i].desc) {
2009 mtx_lock_spin(&sc->sge.reg_lock);
2010 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
2011 mtx_unlock_spin(&sc->sge.reg_lock);
2012 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
2013 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
2014 q->fl[i].desc_map);
2015 bus_dma_tag_destroy(q->fl[i].desc_tag);
2016 bus_dma_tag_destroy(q->fl[i].entry_tag);
2017 }
2018 if (q->fl[i].sdesc) {
2019 free_rx_bufs(sc, &q->fl[i]);
2020 free(q->fl[i].sdesc, M_DEVBUF);
2021 }
2022 }
2023
2024 mtx_unlock(&q->lock);
2025 MTX_DESTROY(&q->lock);
2026 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2027 if (q->txq[i].desc) {
2028 mtx_lock_spin(&sc->sge.reg_lock);
2029 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2030 mtx_unlock_spin(&sc->sge.reg_lock);
2031 bus_dmamap_unload(q->txq[i].desc_tag,
2032 q->txq[i].desc_map);
2033 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2034 q->txq[i].desc_map);
2035 bus_dma_tag_destroy(q->txq[i].desc_tag);
2036 bus_dma_tag_destroy(q->txq[i].entry_tag);
2037 }
2038 if (q->txq[i].sdesc) {
2039 free(q->txq[i].sdesc, M_DEVBUF);
2040 }
2041 }
2042
2043 if (q->rspq.desc) {
2044 mtx_lock_spin(&sc->sge.reg_lock);
2046 mtx_unlock_spin(&sc->sge.reg_lock);
2047
2048 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2049 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2050 q->rspq.desc_map);
2051 bus_dma_tag_destroy(q->rspq.desc_tag);
2052 MTX_DESTROY(&q->rspq.lock);
2053 }
2054
2055#if defined(INET6) || defined(INET)
2056 tcp_lro_free(&q->lro.ctrl);
2057#endif
2058
2059 bzero(q, sizeof(*q));
2060}
2061
2068void
2070{
2071 int i;
2072
2073 for (i = 0; i < nqsets; ++i) {
2074 TXQ_LOCK(&sc->sge.qs[i]);
2075 t3_free_qset(sc, &sc->sge.qs[i]);
2076 }
2077}
2078
2086void
2088{
2090}
2091
2105void
2107{
2108
2110}
2111
2125void
2126t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2127{
2128 struct tx_sw_desc *txsd;
2129 unsigned int cidx, mask;
2130 struct sge_txq *q = &qs->txq[queue];
2131
2132#ifdef T3_TRACE
2133 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2134 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2135#endif
2136 cidx = q->cidx;
2137 mask = q->size - 1;
2138 txsd = &q->sdesc[cidx];
2139
2140 mtx_assert(&qs->lock, MA_OWNED);
2141 while (reclaimable--) {
2142 prefetch(q->sdesc[(cidx + 1) & mask].m);
2143 prefetch(q->sdesc[(cidx + 2) & mask].m);
2144
2145 if (txsd->m != NULL) {
2146 if (txsd->flags & TX_SW_DESC_MAPPED) {
2147 bus_dmamap_unload(q->entry_tag, txsd->map);
2148 txsd->flags &= ~TX_SW_DESC_MAPPED;
2149 }
2150 m_freem_list(txsd->m);
2151 txsd->m = NULL;
2152 } else
2153 q->txq_skipped++;
2154
2155 ++txsd;
2156 if (++cidx == q->size) {
2157 cidx = 0;
2158 txsd = q->sdesc;
2159 }
2160 }
2161 q->cidx = cidx;
2162
2163}
2164
2173static __inline int
2175 const struct sge_rspq *q)
2176{
2177 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2178}
2179
2180#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2181#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2182 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2183 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2184 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2185
2186/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2187#define NOMEM_INTR_DELAY 2500
2188
2189#ifdef TCP_OFFLOAD
2202static void
2203write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2204 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2205{
2206 unsigned int sgl_flits, flits;
2207 int i, idx, nsegs, wrlen;
2208 struct work_request_hdr *from;
2209 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2210 struct tx_desc *d = &q->desc[pidx];
2211 struct txq_state txqs;
2212 struct sglist_seg *segs;
2213 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2214 struct sglist *sgl;
2215
2216 from = (void *)(oh + 1); /* Start of WR within mbuf */
2217 wrlen = m->m_len - sizeof(*oh);
2218
2219 if (!(oh->flags & F_HDR_SGL)) {
2220 write_imm(d, (caddr_t)from, wrlen, gen);
2221
2222 /*
2223 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2224 * t3_push_frames and freed in wr_ack. Others, like those sent
2225 * down by close_conn, t3_send_reset, etc. should be freed here.
2226 */
2227 if (!(oh->flags & F_HDR_DF))
2228 m_free(m);
2229 return;
2230 }
2231
2232 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2233
2234 sgl = oh->sgl;
2235 flits = wrlen / 8;
2236 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2237
2238 nsegs = sgl->sg_nseg;
2239 segs = sgl->sg_segs;
2240 for (idx = 0, i = 0; i < nsegs; i++) {
2241 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2242 if (i && idx == 0)
2243 ++sgp;
2244 sgp->len[idx] = htobe32(segs[i].ss_len);
2245 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2246 idx ^= 1;
2247 }
2248 if (idx) {
2249 sgp->len[idx] = 0;
2250 sgp->addr[idx] = 0;
2251 }
2252
2253 sgl_flits = sgl_len(nsegs);
2254 txqs.gen = gen;
2255 txqs.pidx = pidx;
2256 txqs.compl = 0;
2257
2258 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2259 from->wrh_hi, from->wrh_lo);
2260}
2261
2270static int
2271ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2272{
2273 int ret;
2274 unsigned int ndesc;
2275 unsigned int pidx, gen;
2276 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2277 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2278
2279 ndesc = G_HDR_NDESC(oh->flags);
2280
2281 TXQ_LOCK(qs);
2282again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2283 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2284 if (__predict_false(ret)) {
2285 if (ret == 1) {
2286 TXQ_UNLOCK(qs);
2287 return (EINTR);
2288 }
2289 goto again;
2290 }
2291
2292 gen = q->gen;
2293 q->in_use += ndesc;
2294 pidx = q->pidx;
2295 q->pidx += ndesc;
2296 if (q->pidx >= q->size) {
2297 q->pidx -= q->size;
2298 q->gen ^= 1;
2299 }
2300
2301 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2302 check_ring_tx_db(adap, q, 1);
2303 TXQ_UNLOCK(qs);
2304
2305 return (0);
2306}
2307
2314static void
2315restart_offloadq(void *data, int npending)
2316{
2317 struct mbuf *m;
2318 struct sge_qset *qs = data;
2319 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2320 adapter_t *adap = qs->port->adapter;
2321
2322 TXQ_LOCK(qs);
2323again:
2324 while ((m = mbufq_first(&q->sendq)) != NULL) {
2325 unsigned int gen, pidx;
2326 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2327 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2328
2329 if (__predict_false(q->size - q->in_use < ndesc)) {
2330 setbit(&qs->txq_stopped, TXQ_OFLD);
2331 if (should_restart_tx(q) &&
2333 goto again;
2334 q->stops++;
2335 break;
2336 }
2337
2338 gen = q->gen;
2339 q->in_use += ndesc;
2340 pidx = q->pidx;
2341 q->pidx += ndesc;
2342 if (q->pidx >= q->size) {
2343 q->pidx -= q->size;
2344 q->gen ^= 1;
2345 }
2346
2347 (void)mbufq_dequeue(&q->sendq);
2348 TXQ_UNLOCK(qs);
2349 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2350 TXQ_LOCK(qs);
2351 }
2352#if USE_GTS
2353 set_bit(TXQ_RUNNING, &q->flags);
2354 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2355#endif
2356 TXQ_UNLOCK(qs);
2357 wmb();
2360}
2361
2370int
2371t3_offload_tx(struct adapter *sc, struct mbuf *m)
2372{
2373 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2374 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2375
2376 if (oh->flags & F_HDR_CTRL) {
2377 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2378 return (ctrl_xmit(sc, qs, m));
2379 } else
2380 return (ofld_xmit(sc, qs, m));
2381}
2382#endif
2383
2384static void
2386{
2387 struct adapter *sc = qs->port->adapter;
2388
2389 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2392 qs->txq[TXQ_OFLD].restarts++;
2393 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2394 }
2395
2396 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2399 qs->txq[TXQ_CTRL].restarts++;
2400 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2401 }
2402}
2403
2419int
2420t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2421 const struct qset_params *p, int ntxq, struct port_info *pi)
2422{
2423 struct sge_qset *q = &sc->sge.qs[id];
2424 int i, ret = 0;
2425
2426 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2427 q->port = pi;
2428 q->adap = sc;
2429
2430 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2431 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2432 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2433 goto err;
2434 }
2435 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2436 M_NOWAIT | M_ZERO)) == NULL) {
2437 device_printf(sc->dev, "failed to allocate ifq\n");
2438 goto err;
2439 }
2440 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2441 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2442 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2443 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2444 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2445
2446 init_qset_cntxt(q, id);
2447 q->idx = id;
2448 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2449 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2450 &q->fl[0].desc, &q->fl[0].sdesc,
2451 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2452 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2453 printf("error %d from alloc ring fl0\n", ret);
2454 goto err;
2455 }
2456
2457 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2458 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2459 &q->fl[1].desc, &q->fl[1].sdesc,
2460 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2461 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2462 printf("error %d from alloc ring fl1\n", ret);
2463 goto err;
2464 }
2465
2466 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2467 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2468 &q->rspq.desc_tag, &q->rspq.desc_map,
2469 NULL, NULL)) != 0) {
2470 printf("error %d from alloc ring rspq\n", ret);
2471 goto err;
2472 }
2473
2474 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2475 device_get_unit(sc->dev), irq_vec_idx);
2476 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2477
2478 for (i = 0; i < ntxq; ++i) {
2479 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2480
2481 if ((ret = alloc_ring(sc, p->txq_size[i],
2482 sizeof(struct tx_desc), sz,
2483 &q->txq[i].phys_addr, &q->txq[i].desc,
2484 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2485 &q->txq[i].desc_map,
2486 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2487 printf("error %d from alloc ring tx %i\n", ret, i);
2488 goto err;
2489 }
2490 mbufq_init(&q->txq[i].sendq, INT_MAX);
2491 q->txq[i].gen = 1;
2492 q->txq[i].size = p->txq_size[i];
2493 }
2494
2495#ifdef TCP_OFFLOAD
2496 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2497#endif
2498 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2499 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2500 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2501
2502 q->fl[0].gen = q->fl[1].gen = 1;
2503 q->fl[0].size = p->fl_size;
2504 q->fl[1].size = p->jumbo_size;
2505
2506 q->rspq.gen = 1;
2507 q->rspq.cidx = 0;
2508 q->rspq.size = p->rspq_size;
2509
2510 q->txq[TXQ_ETH].stop_thres = nports *
2512
2513 q->fl[0].buf_size = MCLBYTES;
2514 q->fl[0].zone = zone_pack;
2515 q->fl[0].type = EXT_PACKET;
2516
2517 if (p->jumbo_buf_size == MJUM16BYTES) {
2518 q->fl[1].zone = zone_jumbo16;
2519 q->fl[1].type = EXT_JUMBO16;
2520 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2521 q->fl[1].zone = zone_jumbo9;
2522 q->fl[1].type = EXT_JUMBO9;
2523 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2524 q->fl[1].zone = zone_jumbop;
2525 q->fl[1].type = EXT_JUMBOP;
2526 } else {
2527 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2528 ret = EDOOFUS;
2529 goto err;
2530 }
2531 q->fl[1].buf_size = p->jumbo_buf_size;
2532
2533 /* Allocate and setup the lro_ctrl structure */
2534 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2535#if defined(INET6) || defined(INET)
2536 ret = tcp_lro_init(&q->lro.ctrl);
2537 if (ret) {
2538 printf("error %d from tcp_lro_init\n", ret);
2539 goto err;
2540 }
2541#endif
2542 q->lro.ctrl.ifp = pi->ifp;
2543
2544 mtx_lock_spin(&sc->sge.reg_lock);
2545 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2546 q->rspq.phys_addr, q->rspq.size,
2547 q->fl[0].buf_size, 1, 0);
2548 if (ret) {
2549 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2550 goto err_unlock;
2551 }
2552
2553 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2554 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2555 q->fl[i].phys_addr, q->fl[i].size,
2556 q->fl[i].buf_size, p->cong_thres, 1,
2557 0);
2558 if (ret) {
2559 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2560 goto err_unlock;
2561 }
2562 }
2563
2566 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2567 1, 0);
2568 if (ret) {
2569 printf("error %d from t3_sge_init_ecntxt\n", ret);
2570 goto err_unlock;
2571 }
2572
2573 if (ntxq > 1) {
2574 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2577 q->txq[TXQ_OFLD].size, 0, 1, 0);
2578 if (ret) {
2579 printf("error %d from t3_sge_init_ecntxt\n", ret);
2580 goto err_unlock;
2581 }
2582 }
2583
2584 if (ntxq > 2) {
2585 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2586 SGE_CNTXT_CTRL, id,
2588 q->txq[TXQ_CTRL].size,
2589 q->txq[TXQ_CTRL].token, 1, 0);
2590 if (ret) {
2591 printf("error %d from t3_sge_init_ecntxt\n", ret);
2592 goto err_unlock;
2593 }
2594 }
2595
2596 mtx_unlock_spin(&sc->sge.reg_lock);
2598
2599 refill_fl(sc, &q->fl[0], q->fl[0].size);
2600 refill_fl(sc, &q->fl[1], q->fl[1].size);
2601 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2602
2605
2606 return (0);
2607
2608err_unlock:
2609 mtx_unlock_spin(&sc->sge.reg_lock);
2610err:
2611 TXQ_LOCK(q);
2612 t3_free_qset(sc, q);
2613
2614 return (ret);
2615}
2616
2617/*
2618 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2619 * ethernet data. Hardware assistance with various checksums and any vlan tag
2620 * will also be taken into account here.
2621 */
2622void
2623t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2624{
2625 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2626 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2627 struct ifnet *ifp = pi->ifp;
2628
2629 if (cpl->vlan_valid) {
2630 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2631 m->m_flags |= M_VLANTAG;
2632 }
2633
2634 m->m_pkthdr.rcvif = ifp;
2635 /*
2636 * adjust after conversion to mbuf chain
2637 */
2638 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2639 m->m_len -= (sizeof(*cpl) + ethpad);
2640 m->m_data += (sizeof(*cpl) + ethpad);
2641
2642 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2643 struct ether_header *eh = mtod(m, void *);
2644 uint16_t eh_type;
2645
2646 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2647 struct ether_vlan_header *evh = mtod(m, void *);
2648
2649 eh_type = evh->evl_proto;
2650 } else
2651 eh_type = eh->ether_type;
2652
2653 if (ifp->if_capenable & IFCAP_RXCSUM &&
2654 eh_type == htons(ETHERTYPE_IP)) {
2655 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2656 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2657 m->m_pkthdr.csum_data = 0xffff;
2658 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2659 eh_type == htons(ETHERTYPE_IPV6)) {
2660 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2661 CSUM_PSEUDO_HDR);
2662 m->m_pkthdr.csum_data = 0xffff;
2663 }
2664 }
2665}
2666
2683static int
2684get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2685 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2686{
2687
2688 unsigned int len_cq = ntohl(r->len_cq);
2689 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2690 int mask, cidx = fl->cidx;
2691 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2692 uint32_t len = G_RSPD_LEN(len_cq);
2693 uint32_t flags = M_EXT;
2694 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2695 caddr_t cl;
2696 struct mbuf *m;
2697 int ret = 0;
2698
2699 mask = fl->size - 1;
2700 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2701 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2702 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2703 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2704
2705 fl->credits--;
2706 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2707
2708 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2709 sopeop == RSPQ_SOP_EOP) {
2710 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2711 goto skip_recycle;
2712 cl = mtod(m, void *);
2713 memcpy(cl, sd->rxsd_cl, len);
2714 recycle_rx_buf(adap, fl, fl->cidx);
2715 m->m_pkthdr.len = m->m_len = len;
2716 m->m_flags = 0;
2717 mh->mh_head = mh->mh_tail = m;
2718 ret = 1;
2719 goto done;
2720 } else {
2721 skip_recycle:
2722 bus_dmamap_unload(fl->entry_tag, sd->map);
2723 cl = sd->rxsd_cl;
2724 m = sd->m;
2725
2726 if ((sopeop == RSPQ_SOP_EOP) ||
2727 (sopeop == RSPQ_SOP))
2728 flags |= M_PKTHDR;
2729 m_init(m, M_NOWAIT, MT_DATA, flags);
2730 if (fl->zone == zone_pack) {
2731 /*
2732 * restore clobbered data pointer
2733 */
2734 m->m_data = m->m_ext.ext_buf;
2735 } else {
2736 m_cljset(m, cl, fl->type);
2737 }
2738 m->m_len = len;
2739 }
2740 switch(sopeop) {
2741 case RSPQ_SOP_EOP:
2742 ret = 1;
2743 /* FALLTHROUGH */
2744 case RSPQ_SOP:
2745 mh->mh_head = mh->mh_tail = m;
2746 m->m_pkthdr.len = len;
2747 break;
2748 case RSPQ_EOP:
2749 ret = 1;
2750 /* FALLTHROUGH */
2751 case RSPQ_NSOP_NEOP:
2752 if (mh->mh_tail == NULL) {
2753 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2754 m_freem(m);
2755 m = NULL;
2756 break;
2757 }
2758 mh->mh_tail->m_next = m;
2759 mh->mh_tail = m;
2760 mh->mh_head->m_pkthdr.len += len;
2761 break;
2762 }
2763 if (cxgb_debug && m != NULL)
2764 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2765done:
2766 if (++fl->cidx == fl->size)
2767 fl->cidx = 0;
2768
2769 return (ret);
2770}
2771
2781static __inline void
2783{
2784 unsigned int credits;
2785
2786#if USE_GTS
2787 if (flags & F_RSPD_TXQ0_GTS)
2788 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2789#endif
2790 credits = G_RSPD_TXQ0_CR(flags);
2791 if (credits)
2792 qs->txq[TXQ_ETH].processed += credits;
2793
2794 credits = G_RSPD_TXQ2_CR(flags);
2795 if (credits)
2796 qs->txq[TXQ_CTRL].processed += credits;
2797
2798# if USE_GTS
2799 if (flags & F_RSPD_TXQ1_GTS)
2800 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2801# endif
2802 credits = G_RSPD_TXQ1_CR(flags);
2803 if (credits)
2804 qs->txq[TXQ_OFLD].processed += credits;
2805
2806}
2807
2808static void
2810 unsigned int sleeping)
2811{
2812 ;
2813}
2814
2830static int
2831process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2832{
2833 struct sge_rspq *rspq = &qs->rspq;
2834 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2835 int budget_left = budget;
2836 unsigned int sleeping = 0;
2837#if defined(INET6) || defined(INET)
2838 int lro_enabled = qs->lro.enabled;
2839 int skip_lro;
2840 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2841#endif
2842 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2843#ifdef DEBUG
2844 static int last_holdoff = 0;
2845 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2846 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2847 last_holdoff = rspq->holdoff_tmr;
2848 }
2849#endif
2850 rspq->next_holdoff = rspq->holdoff_tmr;
2851
2852 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2853 int eth, eop = 0, ethpad = 0;
2854 uint32_t flags = ntohl(r->flags);
2855 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2856 uint8_t opcode = r->rss_hdr.opcode;
2857
2858 eth = (opcode == CPL_RX_PKT);
2859
2860 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2861 struct mbuf *m;
2862
2863 if (cxgb_debug)
2864 printf("async notification\n");
2865
2866 if (mh->mh_head == NULL) {
2867 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2868 m = mh->mh_head;
2869 } else {
2870 m = m_gethdr(M_NOWAIT, MT_DATA);
2871 }
2872 if (m == NULL)
2873 goto no_mem;
2874
2875 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2876 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2877 *mtod(m, uint8_t *) = CPL_ASYNC_NOTIF;
2878 opcode = CPL_ASYNC_NOTIF;
2879 eop = 1;
2880 rspq->async_notif++;
2881 goto skip;
2882 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2883 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2884
2885 if (m == NULL) {
2886 no_mem:
2888 budget_left--;
2889 break;
2890 }
2891 if (mh->mh_head == NULL)
2892 mh->mh_head = m;
2893 else
2894 mh->mh_tail->m_next = m;
2895 mh->mh_tail = m;
2896
2897 get_imm_packet(adap, r, m);
2898 mh->mh_head->m_pkthdr.len += m->m_len;
2899 eop = 1;
2900 rspq->imm_data++;
2901 } else if (r->len_cq) {
2902 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2903
2904 eop = get_packet(adap, drop_thresh, qs, mh, r);
2905 if (eop) {
2906 if (r->rss_hdr.hash_type && !adap->timestamp) {
2907 M_HASHTYPE_SET(mh->mh_head,
2908 M_HASHTYPE_OPAQUE_HASH);
2909 mh->mh_head->m_pkthdr.flowid = rss_hash;
2910 }
2911 }
2912
2913 ethpad = 2;
2914 } else {
2915 rspq->pure_rsps++;
2916 }
2917 skip:
2918 if (flags & RSPD_CTRL_MASK) {
2919 sleeping |= flags & RSPD_GTS_MASK;
2921 }
2922
2923 if (!eth && eop) {
2924 rspq->offload_pkts++;
2925#ifdef TCP_OFFLOAD
2926 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2927#else
2928 m_freem(mh->mh_head);
2929#endif
2930 mh->mh_head = NULL;
2931 } else if (eth && eop) {
2932 struct mbuf *m = mh->mh_head;
2933
2934 t3_rx_eth(adap, m, ethpad);
2935
2936 /*
2937 * The T304 sends incoming packets on any qset. If LRO
2938 * is also enabled, we could end up sending packet up
2939 * lro_ctrl->ifp's input. That is incorrect.
2940 *
2941 * The mbuf's rcvif was derived from the cpl header and
2942 * is accurate. Skip LRO and just use that.
2943 */
2944#if defined(INET6) || defined(INET)
2945 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2946
2947 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2948 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2949 ) {
2950 /* successfully queue'd for LRO */
2951 } else
2952#endif
2953 {
2954 /*
2955 * LRO not enabled, packet unsuitable for LRO,
2956 * or unable to queue. Pass it up right now in
2957 * either case.
2958 */
2959 struct ifnet *ifp = m->m_pkthdr.rcvif;
2960 (*ifp->if_input)(ifp, m);
2961 }
2962 mh->mh_head = NULL;
2963
2964 }
2965
2966 r++;
2967 if (__predict_false(++rspq->cidx == rspq->size)) {
2968 rspq->cidx = 0;
2969 rspq->gen ^= 1;
2970 r = rspq->desc;
2971 }
2972
2973 if (++rspq->credits >= 64) {
2974 refill_rspq(adap, rspq, rspq->credits);
2975 rspq->credits = 0;
2976 }
2977 __refill_fl_lt(adap, &qs->fl[0], 32);
2978 __refill_fl_lt(adap, &qs->fl[1], 32);
2979 --budget_left;
2980 }
2981
2982#if defined(INET6) || defined(INET)
2983 /* Flush LRO */
2984 tcp_lro_flush_all(lro_ctrl);
2985#endif
2986
2987 if (sleeping)
2988 check_ring_db(adap, qs, sleeping);
2989
2990 mb(); /* commit Tx queue processed updates */
2991 if (__predict_false(qs->txq_stopped > 1))
2992 restart_tx(qs);
2993
2994 __refill_fl_lt(adap, &qs->fl[0], 512);
2995 __refill_fl_lt(adap, &qs->fl[1], 512);
2996 budget -= budget_left;
2997 return (budget);
2998}
2999
3000/*
3001 * A helper function that processes responses and issues GTS.
3002 */
3003static __inline int
3005{
3006 int work;
3007 static int last_holdoff = 0;
3008
3009 work = process_responses(adap, rspq_to_qset(rq), -1);
3010
3011 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3012 printf("next_holdoff=%d\n", rq->next_holdoff);
3013 last_holdoff = rq->next_holdoff;
3014 }
3015 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3017
3018 return (work);
3019}
3020
3021#ifdef DEBUGNET
3022int
3023cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs)
3024{
3025
3026 return (process_responses_gts(adap, &qs->rspq));
3027}
3028#endif
3029
3030/*
3031 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3032 * Handles data events from SGE response queues as well as error and other
3033 * async events as they all use the same interrupt pin. We use one SGE
3034 * response queue per port in this mode and protect all response queues with
3035 * queue 0's lock.
3036 */
3037void
3038t3b_intr(void *data)
3039{
3040 uint32_t i, map;
3041 adapter_t *adap = data;
3042 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3043
3044 t3_write_reg(adap, A_PL_CLI, 0);
3045 map = t3_read_reg(adap, A_SG_DATA_INTR);
3046
3047 if (!map)
3048 return;
3049
3050 if (__predict_false(map & F_ERRINTR)) {
3052 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3053 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3054 }
3055
3056 mtx_lock(&q0->lock);
3057 for_each_port(adap, i)
3058 if (map & (1 << i))
3059 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3060 mtx_unlock(&q0->lock);
3061}
3062
3063/*
3064 * The MSI interrupt handler. This needs to handle data events from SGE
3065 * response queues as well as error and other async events as they all use
3066 * the same MSI vector. We use one SGE response queue per port in this mode
3067 * and protect all response queues with queue 0's lock.
3068 */
3069void
3070t3_intr_msi(void *data)
3071{
3072 adapter_t *adap = data;
3073 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3074 int i, new_packets = 0;
3075
3076 mtx_lock(&q0->lock);
3077
3078 for_each_port(adap, i)
3079 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3080 new_packets = 1;
3081 mtx_unlock(&q0->lock);
3082 if (new_packets == 0) {
3084 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3085 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3086 }
3087}
3088
3089void
3090t3_intr_msix(void *data)
3091{
3092 struct sge_qset *qs = data;
3093 adapter_t *adap = qs->port->adapter;
3094 struct sge_rspq *rspq = &qs->rspq;
3095
3096 if (process_responses_gts(adap, rspq) == 0)
3097 rspq->unhandled_irqs++;
3098}
3099
3100#define QDUMP_SBUF_SIZE 32 * 400
3101static int
3102t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3103{
3104 struct sge_rspq *rspq;
3105 struct sge_qset *qs;
3106 int i, err, dump_end, idx;
3107 struct sbuf *sb;
3108 struct rsp_desc *rspd;
3109 uint32_t data[4];
3110
3111 rspq = arg1;
3112 qs = rspq_to_qset(rspq);
3113 if (rspq->rspq_dump_count == 0)
3114 return (0);
3115 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3116 log(LOG_WARNING,
3117 "dump count is too large %d\n", rspq->rspq_dump_count);
3118 rspq->rspq_dump_count = 0;
3119 return (EINVAL);
3120 }
3121 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3122 log(LOG_WARNING,
3123 "dump start of %d is greater than queue size\n",
3124 rspq->rspq_dump_start);
3125 rspq->rspq_dump_start = 0;
3126 return (EINVAL);
3127 }
3128 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3129 if (err)
3130 return (err);
3131 err = sysctl_wire_old_buffer(req, 0);
3132 if (err)
3133 return (err);
3134 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3135
3136 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3137 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3138 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3139 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3140 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3141
3142 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3143 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3144
3145 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3146 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3147 idx = i & (RSPQ_Q_SIZE-1);
3148
3149 rspd = &rspq->desc[idx];
3150 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3151 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3152 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3153 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3154 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3155 be32toh(rspd->len_cq), rspd->intr_gen);
3156 }
3157
3158 err = sbuf_finish(sb);
3159 sbuf_delete(sb);
3160 return (err);
3161}
3162
3163static int
3164t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3165{
3166 struct sge_txq *txq;
3167 struct sge_qset *qs;
3168 int i, j, err, dump_end;
3169 struct sbuf *sb;
3170 struct tx_desc *txd;
3171 uint32_t *WR, wr_hi, wr_lo, gen;
3172 uint32_t data[4];
3173
3174 txq = arg1;
3175 qs = txq_to_qset(txq, TXQ_ETH);
3176 if (txq->txq_dump_count == 0) {
3177 return (0);
3178 }
3179 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3180 log(LOG_WARNING,
3181 "dump count is too large %d\n", txq->txq_dump_count);
3182 txq->txq_dump_count = 1;
3183 return (EINVAL);
3184 }
3185 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3186 log(LOG_WARNING,
3187 "dump start of %d is greater than queue size\n",
3188 txq->txq_dump_start);
3189 txq->txq_dump_start = 0;
3190 return (EINVAL);
3191 }
3192 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3193 if (err)
3194 return (err);
3195 err = sysctl_wire_old_buffer(req, 0);
3196 if (err)
3197 return (err);
3198 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3199
3200 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3201 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3202 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3203 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3204 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3205 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3206 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3207 txq->txq_dump_start,
3208 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3209
3210 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3211 for (i = txq->txq_dump_start; i < dump_end; i++) {
3212 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3213 WR = (uint32_t *)txd->flit;
3214 wr_hi = ntohl(WR[0]);
3215 wr_lo = ntohl(WR[1]);
3216 gen = G_WR_GEN(wr_lo);
3217
3218 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3219 wr_hi, wr_lo, gen);
3220 for (j = 2; j < 30; j += 4)
3221 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3222 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3223
3224 }
3225 err = sbuf_finish(sb);
3226 sbuf_delete(sb);
3227 return (err);
3228}
3229
3230static int
3231t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3232{
3233 struct sge_txq *txq;
3234 struct sge_qset *qs;
3235 int i, j, err, dump_end;
3236 struct sbuf *sb;
3237 struct tx_desc *txd;
3238 uint32_t *WR, wr_hi, wr_lo, gen;
3239
3240 txq = arg1;
3241 qs = txq_to_qset(txq, TXQ_CTRL);
3242 if (txq->txq_dump_count == 0) {
3243 return (0);
3244 }
3245 if (txq->txq_dump_count > 256) {
3246 log(LOG_WARNING,
3247 "dump count is too large %d\n", txq->txq_dump_count);
3248 txq->txq_dump_count = 1;
3249 return (EINVAL);
3250 }
3251 if (txq->txq_dump_start > 255) {
3252 log(LOG_WARNING,
3253 "dump start of %d is greater than queue size\n",
3254 txq->txq_dump_start);
3255 txq->txq_dump_start = 0;
3256 return (EINVAL);
3257 }
3258
3259 err = sysctl_wire_old_buffer(req, 0);
3260 if (err != 0)
3261 return (err);
3262 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3263 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3264 txq->txq_dump_start,
3265 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3266
3267 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3268 for (i = txq->txq_dump_start; i < dump_end; i++) {
3269 txd = &txq->desc[i & (255)];
3270 WR = (uint32_t *)txd->flit;
3271 wr_hi = ntohl(WR[0]);
3272 wr_lo = ntohl(WR[1]);
3273 gen = G_WR_GEN(wr_lo);
3274
3275 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3276 wr_hi, wr_lo, gen);
3277 for (j = 2; j < 30; j += 4)
3278 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3279 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3280
3281 }
3282 err = sbuf_finish(sb);
3283 sbuf_delete(sb);
3284 return (err);
3285}
3286
3287static int
3288t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3289{
3290 adapter_t *sc = arg1;
3291 struct qset_params *qsp = &sc->params.sge.qset[0];
3292 int coalesce_usecs;
3293 struct sge_qset *qs;
3294 int i, j, err, nqsets = 0;
3295 struct mtx *lock;
3296
3297 if ((sc->flags & FULL_INIT_DONE) == 0)
3298 return (ENXIO);
3299
3300 coalesce_usecs = qsp->coalesce_usecs;
3301 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3302
3303 if (err != 0) {
3304 return (err);
3305 }
3306 if (coalesce_usecs == qsp->coalesce_usecs)
3307 return (0);
3308
3309 for (i = 0; i < sc->params.nports; i++)
3310 for (j = 0; j < sc->port[i].nqsets; j++)
3311 nqsets++;
3312
3313 coalesce_usecs = max(1, coalesce_usecs);
3314
3315 for (i = 0; i < nqsets; i++) {
3316 qs = &sc->sge.qs[i];
3317 qsp = &sc->params.sge.qset[i];
3318 qsp->coalesce_usecs = coalesce_usecs;
3319
3320 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3321 &sc->sge.qs[0].rspq.lock;
3322
3323 mtx_lock(lock);
3324 t3_update_qset_coalesce(qs, qsp);
3327 mtx_unlock(lock);
3328 }
3329
3330 return (0);
3331}
3332
3333static int
3334t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3335{
3336 adapter_t *sc = arg1;
3337 int rc, timestamp;
3338
3339 if ((sc->flags & FULL_INIT_DONE) == 0)
3340 return (ENXIO);
3341
3342 timestamp = sc->timestamp;
3343 rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3344
3345 if (rc != 0)
3346 return (rc);
3347
3348 if (timestamp != sc->timestamp) {
3350 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3351 sc->timestamp = timestamp;
3352 }
3353
3354 return (0);
3355}
3356
3357void
3359{
3360 struct sysctl_ctx_list *ctx;
3361 struct sysctl_oid_list *children;
3362
3363 ctx = device_get_sysctl_ctx(sc->dev);
3364 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3365
3366 /* random information */
3367 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3368 "firmware_version",
3369 CTLFLAG_RD, sc->fw_version,
3370 0, "firmware version");
3371 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3372 "hw_revision",
3373 CTLFLAG_RD, &sc->params.rev,
3374 0, "chip model");
3375 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3376 "port_types",
3377 CTLFLAG_RD, sc->port_types,
3378 0, "type of ports");
3379 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3380 "enable_debug",
3381 CTLFLAG_RW, &cxgb_debug,
3382 0, "enable verbose debugging output");
3383 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3384 CTLFLAG_RD, &sc->tunq_coalesce,
3385 "#tunneled packets freed");
3386 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3387 "txq_overrun",
3388 CTLFLAG_RD, &txq_fills,
3389 0, "#times txq overrun");
3390 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3391 "core_clock",
3392 CTLFLAG_RD, &sc->params.vpd.cclk,
3393 0, "core clock frequency (in KHz)");
3394}
3395
3396
3397static const char *rspq_name = "rspq";
3398static const char *txq_names[] =
3399{
3400 "txq_eth",
3401 "txq_ofld",
3402 "txq_ctrl"
3403};
3404
3405static int
3406sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3407{
3408 struct port_info *p = arg1;
3409 uint64_t *parg;
3410
3411 if (!p)
3412 return (EINVAL);
3413
3415 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3416
3417 return (sysctl_handle_64(oidp, parg, 0, req));
3418}
3419
3420void
3422{
3423 struct sysctl_ctx_list *ctx;
3424 struct sysctl_oid_list *children;
3425 int i, j;
3426
3427 ctx = device_get_sysctl_ctx(sc->dev);
3428 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3429
3430 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3431 "intr_coal",
3432 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3434 "I", "interrupt coalescing timer (us)");
3435
3436 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3437 "pkt_timestamp",
3438 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3440 "I", "provide packet timestamp instead of connection hash");
3441
3442 for (i = 0; i < sc->params.nports; i++) {
3443 struct port_info *pi = &sc->port[i];
3444 struct sysctl_oid *poid;
3445 struct sysctl_oid_list *poidlist;
3446 struct mac_stats *mstats = &pi->mac.stats;
3447
3448 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3449 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3450 pi->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3451 "port statistics");
3452 poidlist = SYSCTL_CHILDREN(poid);
3453 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3454 "nqsets", CTLFLAG_RD, &pi->nqsets,
3455 0, "#queue sets");
3456
3457 for (j = 0; j < pi->nqsets; j++) {
3458 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3459 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3460 *ctrlqpoid, *lropoid;
3461 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3462 *txqpoidlist, *ctrlqpoidlist,
3463 *lropoidlist;
3464 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3465
3466 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3467
3468 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3469 qs->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3470 "qset statistics");
3471 qspoidlist = SYSCTL_CHILDREN(qspoid);
3472
3473 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3474 CTLFLAG_RD, &qs->fl[0].empty, 0,
3475 "freelist #0 empty");
3476 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3477 CTLFLAG_RD, &qs->fl[1].empty, 0,
3478 "freelist #1 empty");
3479
3480 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3481 rspq_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3482 "rspq statistics");
3483 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3484
3485 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3486 txq_names[0], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3487 "txq statistics");
3488 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3489
3490 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3491 txq_names[2], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3492 "ctrlq statistics");
3493 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3494
3495 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3496 "lro_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3497 "LRO statistics");
3498 lropoidlist = SYSCTL_CHILDREN(lropoid);
3499
3500 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3501 CTLFLAG_RD, &qs->rspq.size,
3502 0, "#entries in response queue");
3503 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3504 CTLFLAG_RD, &qs->rspq.cidx,
3505 0, "consumer index");
3506 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3507 CTLFLAG_RD, &qs->rspq.credits,
3508 0, "#credits");
3509 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3510 CTLFLAG_RD, &qs->rspq.starved,
3511 0, "#times starved");
3512 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3513 CTLFLAG_RD, &qs->rspq.phys_addr,
3514 "physical_address_of the queue");
3515 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3516 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3517 0, "start rspq dump entry");
3518 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3519 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3520 0, "#rspq entries to dump");
3521 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3522 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3523 &qs->rspq, 0, t3_dump_rspq, "A",
3524 "dump of the response queue");
3525
3526 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3527 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3528 "#tunneled packets dropped");
3529 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3530 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3531 0, "#tunneled packets waiting to be sent");
3532#if 0
3533 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3534 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3535 0, "#tunneled packets queue producer index");
3536 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3537 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3538 0, "#tunneled packets queue consumer index");
3539#endif
3540 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3541 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3542 0, "#tunneled packets processed by the card");
3543 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3544 CTLFLAG_RD, &txq->cleaned,
3545 0, "#tunneled packets cleaned");
3546 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3547 CTLFLAG_RD, &txq->in_use,
3548 0, "#tunneled packet slots in use");
3549 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3550 CTLFLAG_RD, &txq->txq_frees,
3551 "#tunneled packets freed");
3552 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3553 CTLFLAG_RD, &txq->txq_skipped,
3554 0, "#tunneled packet descriptors skipped");
3555 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3556 CTLFLAG_RD, &txq->txq_coalesced,
3557 "#tunneled packets coalesced");
3558 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3559 CTLFLAG_RD, &txq->txq_enqueued,
3560 0, "#tunneled packets enqueued to hardware");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3562 CTLFLAG_RD, &qs->txq_stopped,
3563 0, "tx queues stopped");
3564 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3565 CTLFLAG_RD, &txq->phys_addr,
3566 "physical_address_of the queue");
3567 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3568 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3569 0, "txq generation");
3570 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3571 CTLFLAG_RD, &txq->cidx,
3572 0, "hardware queue cidx");
3573 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3574 CTLFLAG_RD, &txq->pidx,
3575 0, "hardware queue pidx");
3576 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3577 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3578 0, "txq start idx for dump");
3579 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3580 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3581 0, "txq #entries to dump");
3582 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3583 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3584 &qs->txq[TXQ_ETH], 0, t3_dump_txq_eth, "A",
3585 "dump of the transmit queue");
3586
3587 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3588 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3589 0, "ctrlq start idx for dump");
3590 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3591 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3592 0, "ctrl #entries to dump");
3593 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3594 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3595 &qs->txq[TXQ_CTRL], 0, t3_dump_txq_ctrl, "A",
3596 "dump of the transmit queue");
3597
3598 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
3599 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3600 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3601 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3602 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3603 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3604 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3605 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3606 }
3607
3608 /* Now add a node for mac stats. */
3609 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3610 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC statistics");
3611 poidlist = SYSCTL_CHILDREN(poid);
3612
3613 /*
3614 * We (ab)use the length argument (arg2) to pass on the offset
3615 * of the data that we are interested in. This is only required
3616 * for the quad counters that are updated from the hardware (we
3617 * make sure that we return the latest value).
3618 * sysctl_handle_macstat first updates *all* the counters from
3619 * the hardware, and then returns the latest value of the
3620 * requested counter. Best would be to update only the
3621 * requested counter from hardware, but t3_mac_update_stats()
3622 * hides all the register details and we don't want to dive into
3623 * all that here.
3624 */
3625#define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3626 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, \
3627 offsetof(struct mac_stats, a), sysctl_handle_macstat, "QU", 0)
3628 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3629 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3630 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3631 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3632 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3633 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3634 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3635 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3636 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3637 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3638 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3639 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3640 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3641 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3642 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3643 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3644 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3645 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3646 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3647 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3648 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3649 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3650 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3651 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3652 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3653 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3654 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3655 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3656 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3657 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3658 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3659 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3660 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3661 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3662 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3663 CXGB_SYSCTL_ADD_QUAD(rx_short);
3664 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3665 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3666 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3667 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3668 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3669 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3670 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3671 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3672 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3673 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3674#undef CXGB_SYSCTL_ADD_QUAD
3675
3676#define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3677 CTLFLAG_RD, &mstats->a, 0)
3678 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3679 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3680 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3681 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3682 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3683 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3684 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3685 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3686 CXGB_SYSCTL_ADD_ULONG(num_resets);
3687 CXGB_SYSCTL_ADD_ULONG(link_faults);
3688#undef CXGB_SYSCTL_ADD_ULONG
3689 }
3690}
3691
3702int
3703t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3704 unsigned char *data)
3705{
3706 if (qnum >= 6)
3707 return (EINVAL);
3708
3709 if (qnum < 3) {
3710 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3711 return -EINVAL;
3712 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3713 return sizeof(struct tx_desc);
3714 }
3715
3716 if (qnum == 3) {
3717 if (!qs->rspq.desc || idx >= qs->rspq.size)
3718 return (EINVAL);
3719 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3720 return sizeof(struct rsp_desc);
3721 }
3722
3723 qnum -= 4;
3724 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3725 return (EINVAL);
3726 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3727 return sizeof(struct rx_desc);
3728}
static __inline struct sge_qset * txq_to_qset(struct sge_txq *q, int qidx)
Definition: cxgb_adapter.h:538
static __inline struct sge_qset * rspq_to_qset(struct sge_rspq *q)
Definition: cxgb_adapter.h:532
#define RSPQ_NAME_LEN
Definition: cxgb_adapter.h:195
@ TXQ_ETH
Definition: cxgb_adapter.h:152
@ TXQ_OFLD
Definition: cxgb_adapter.h:153
@ TXQ_CTRL
Definition: cxgb_adapter.h:154
static __inline void t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
Definition: cxgb_adapter.h:437
#define JUMBO_Q_SIZE
Definition: cxgb_adapter.h:146
#define PIO_LEN
Definition: cxgb_adapter.h:161
static __inline uint32_t t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
Definition: cxgb_adapter.h:431
#define TX_OFLD_Q_SIZE
Definition: cxgb_adapter.h:149
struct ifnet * ifp
Definition: cxgb_adapter.h:1
#define TX_CTRL_Q_SIZE
Definition: cxgb_adapter.h:150
#define FL_Q_SIZE
Definition: cxgb_adapter.h:145
void cxgb_refresh_stats(struct port_info *)
Definition: cxgb_main.c:2378
struct mtx lock
Definition: cxgb_adapter.h:10
#define PORT_NAME_LEN
Definition: cxgb_adapter.h:25
#define MTX_DESTROY
Definition: cxgb_adapter.h:82
#define QS_NAME_LEN
Definition: cxgb_adapter.h:291
uint32_t nqsets
Definition: cxgb_adapter.h:15
#define RSPQ_Q_SIZE
Definition: cxgb_adapter.h:147
#define QS_TIMEOUT
Definition: cxgb_adapter.h:276
@ USING_MSI
Definition: cxgb_adapter.h:125
@ USING_MSIX
Definition: cxgb_adapter.h:126
@ FULL_INIT_DONE
Definition: cxgb_adapter.h:124
#define MTX_INIT
Definition: cxgb_adapter.h:81
#define desc_reclaimable(q)
Definition: cxgb_adapter.h:521
#define WR_LEN
Definition: cxgb_adapter.h:160
int flags
Definition: cxgb_adapter.h:3
#define TX_ETH_Q_SIZE
Definition: cxgb_adapter.h:148
#define QS_FLUSHING
Definition: cxgb_adapter.h:275
void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val)
Definition: cxgb_t3_hw.c:103
#define WR_FLITS
Definition: cxgb_common.h:143
int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
Definition: cxgb_t3_hw.c:2744
@ SGE_TXQ_PER_SET
Definition: cxgb_common.h:117
@ SGE_RXQ_PER_SET
Definition: cxgb_common.h:116
@ SGE_QSETS
Definition: cxgb_common.h:115
int t3_slow_intr_handler(adapter_t *adapter)
Definition: cxgb_t3_hw.c:2249
static unsigned int core_ticks_per_usec(const adapter_t *adap)
Definition: cxgb_common.h:662
int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable, enum sge_context_type type, int respq, u64 base_addr, unsigned int size, unsigned int token, int gen, unsigned int cidx)
Definition: cxgb_t3_hw.c:2564
@ SGE_CNTXT_OFLD
Definition: cxgb_common.h:123
@ SGE_CNTXT_ETH
Definition: cxgb_common.h:122
@ SGE_CNTXT_CTRL
Definition: cxgb_common.h:124
int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx, u64 base_addr, unsigned int size, unsigned int fl_thres, int gen, unsigned int cidx)
Definition: cxgb_t3_hw.c:2646
int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
Definition: cxgb_t3_hw.c:2937
static int is_offload(const adapter_t *adap)
Definition: cxgb_common.h:657
int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
Definition: cxgb_t3_hw.c:2889
#define TX_DESC_FLITS
Definition: cxgb_common.h:142
#define for_each_port(adapter, iter)
Definition: cxgb_common.h:642
@ T3_REV_C
Definition: cxgb_common.h:411
@ IMMED_PKT_SIZE
Definition: cxgb_common.h:129
@ AN_PKT_SIZE
Definition: cxgb_common.h:128
int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
Definition: cxgb_t3_hw.c:2720
void t3_fatal_err(adapter_t *adapter)
Definition: cxgb_main.c:1134
int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable, u64 base_addr, unsigned int size, unsigned int esize, unsigned int cong_thres, int gen, unsigned int cidx)
Definition: cxgb_t3_hw.c:2607
int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
Definition: cxgb_t3_hw.c:2768
#define FW_CTRL_SGEEC_START
#define FW_TUNNEL_TID_START
#define FW_TUNNEL_SGEEC_START
#define FW_CTRL_TID_START
#define FW_WROPCODE_TUNNEL_TX_PKT
#define FW_OFLD_SGEEC_START
#define prefetch(x)
Definition: cxgb_osdep.h:92
#define TX_MAX_SIZE
Definition: cxgb_osdep.h:80
#define TX_MAX_DESC
Definition: cxgb_osdep.h:83
#define test_and_clear_bit(bit, p)
Definition: cxgb_osdep.h:129
#define CH_ALERT(adap, fmt,...)
Definition: cxgb_osdep.h:125
#define TX_MAX_SEGS
Definition: cxgb_osdep.h:81
#define F_DROPPKT
Definition: cxgb_regs.h:77
#define A_SG_KDOORBELL
Definition: cxgb_regs.h:130
#define F_ERRINTR
Definition: cxgb_regs.h:214
#define A_SG_EGR_RCQ_DRB_THRSH
Definition: cxgb_regs.h:412
#define F_EGRGENCTRL
Definition: cxgb_regs.h:81
#define A_SG_LO_DRB_LO_THRSH
Definition: cxgb_regs.h:242
#define A_SG_TIMER_TICK
Definition: cxgb_regs.h:552
#define F_OPTONEINTMULTQ
Definition: cxgb_regs.h:65
#define A_PL_INT_ENABLE0
Definition: cxgb_regs.h:6369
#define A_PL_CLI
Definition: cxgb_regs.h:6491
#define V_NEWINDEX(x)
Definition: cxgb_regs.h:155
#define F_BIGENDIANINGRESS
Definition: cxgb_regs.h:120
#define A_SG_CMDQ_CREDIT_TH
Definition: cxgb_regs.h:540
#define A_SG_CONTROL
Definition: cxgb_regs.h:37
#define A_SG_RSPQ_CREDIT_RETURN
Definition: cxgb_regs.h:203
#define V_NEWTIMER(x)
Definition: cxgb_regs.h:150
#define F_CQCRDTCTRL
Definition: cxgb_regs.h:69
#define A_SG_HI_DRB_HI_THRSH
Definition: cxgb_regs.h:221
#define A_SG_INT_CAUSE
Definition: cxgb_regs.h:431
#define F_ENABLERXPKTTMSTPRSS
Definition: cxgb_regs.h:3916
#define A_SG_LO_DRB_HI_THRSH
Definition: cxgb_regs.h:235
#define S_RSPQ0DISABLED
Definition: cxgb_regs.h:289
#define F_GLOBALENABLE
Definition: cxgb_regs.h:128
#define F_FATLPERREN
Definition: cxgb_regs.h:49
#define V_USERSPACESIZE(x)
Definition: cxgb_regs.h:85
#define F_AVOIDCQOVFL
Definition: cxgb_regs.h:61
#define F_ISCSICOALESCING
Definition: cxgb_regs.h:124
#define V_BASE1(x)
Definition: cxgb_regs.h:564
#define V_HIRCQDRBTHRSH(x)
Definition: cxgb_regs.h:416
#define F_SELEGRCNTX
Definition: cxgb_regs.h:134
#define V_LORCQDRBTHRSH(x)
Definition: cxgb_regs.h:421
#define F_RSPQCREDITOVERFOW
Definition: cxgb_regs.h:529
#define V_TIMEOUT(x)
Definition: cxgb_regs.h:544
#define V_RSPQ(x)
Definition: cxgb_regs.h:145
#define A_SG_RSPQ_FL_STATUS
Definition: cxgb_regs.h:250
#define F_TNLFLMODE
Definition: cxgb_regs.h:45
#define A_SG_GTS
Definition: cxgb_regs.h:141
#define F_RSPQDISABLED
Definition: cxgb_regs.h:525
#define V_EGRCNTX(x)
Definition: cxgb_regs.h:138
#define V_CREDITS(x)
Definition: cxgb_regs.h:207
#define F_CONGMODE
Definition: cxgb_regs.h:41
#define A_SG_OCO_BASE
Definition: cxgb_regs.h:560
#define F_FLMODE
Definition: cxgb_regs.h:99
#define A_SG_DATA_INTR
Definition: cxgb_regs.h:210
#define A_SG_DRB_PRI_THRESH
Definition: cxgb_regs.h:572
#define V_HOSTPAGESIZE(x)
Definition: cxgb_regs.h:90
#define A_TP_PC_CONFIG2
Definition: cxgb_regs.h:3888
#define V_THRESHOLD(x)
Definition: cxgb_regs.h:549
#define V_PKTSHIFT(x)
Definition: cxgb_regs.h:103
#define F_ONEINTMULTQ
Definition: cxgb_regs.h:108
#define A_SG_HI_DRB_LO_THRSH
Definition: cxgb_regs.h:228
#define TXQ_LOCK(qs)
Definition: cxgb_sge.c:232
#define TX_SW_DESC_MAPPED
Definition: cxgb_sge.c:172
static __inline uint64_t check_pkt_coalesce(struct sge_qset *qs)
Definition: cxgb_sge.c:256
static int alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size, bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
Definition: cxgb_sge.c:876
static __inline unsigned int sgl_len(unsigned int n)
Definition: cxgb_sge.c:471
int txq_fills
Definition: cxgb_sge.c:80
int cxgb_txq_buf_ring_size
Definition: cxgb_sge.c:88
int t3_sge_free(struct adapter *sc)
Definition: cxgb_sge.c:660
static __inline int process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
Definition: cxgb_sge.c:3004
int t3_sge_alloc(adapter_t *sc)
Definition: cxgb_sge.c:606
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
Definition: cxgb_sge.c:679
#define TX_RECLAIM_MIN
Definition: cxgb_sge.c:103
void t3_intr_msi(void *data)
Definition: cxgb_sge.c:3070
void t3_sge_prep(adapter_t *adap, struct sge_params *p)
Definition: cxgb_sge.c:550
void t3_sge_err_intr_handler(adapter_t *adapter)
Definition: cxgb_sge.c:522
void t3_add_configured_sysctls(adapter_t *sc)
Definition: cxgb_sge.c:3421
#define TXQ_RING_NEEDS_ENQUEUE(qs)
Definition: cxgb_sge.c:235
static void sge_txq_reclaim_handler(void *arg, int ncount)
Definition: cxgb_sge.c:1055
static __inline void make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
Definition: cxgb_sge.c:1198
static int cxgb_tx_coalesce_force
Definition: cxgb_sge.c:92
static __inline void __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
Definition: cxgb_sge.c:828
static void refill_fl(adapter_t *sc, struct sge_fl *q, int n)
Definition: cxgb_sge.c:708
SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0, "size of per-queue mbuf ring")
struct tx_desc __packed
static int t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3288
#define TXQ_RING_DEQUEUE_COND(qs, func, arg)
Definition: cxgb_sge.c:238
static void init_qset_cntxt(struct sge_qset *qs, u_int id)
Definition: cxgb_sge.c:1113
static int cxgb_tx_coalesce_enable_start
Definition: cxgb_sge.c:106
static __inline void handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
Definition: cxgb_sge.c:2782
#define RSPQ_SOP_EOP
Definition: cxgb_sge.c:177
#define TXQ_LOCK_ASSERT(qs)
Definition: cxgb_sge.c:230
static int cxgb_tx_coalesce_enable_stop
Definition: cxgb_sge.c:110
static void restart_ctrlq(void *data, int npending)
Definition: cxgb_sge.c:1940
@ TXQ_LAST_PKT_DB
Definition: cxgb_sge.c:147
@ TXQ_RUNNING
Definition: cxgb_sge.c:146
#define TX_RECLAIM_PERIOD
Definition: cxgb_sge.c:140
static __inline void __refill_fl(adapter_t *adap, struct sge_fl *fl)
Definition: cxgb_sge.c:822
#define USE_GTS
Definition: cxgb_sge.c:130
static __inline void refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
Definition: cxgb_sge.c:1046
static void cxgb_tx_timeout(void *arg)
Definition: cxgb_sge.c:1638
int t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
Definition: cxgb_sge.c:1980
#define TXQ_RING_FLUSH(qs)
Definition: cxgb_sge.c:237
static int sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3406
static int t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3164
static void t3_free_qset(adapter_t *sc, struct sge_qset *q)
Definition: cxgb_sge.c:1995
static const char * txq_names[]
Definition: cxgb_sge.c:3398
static __inline void check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
Definition: cxgb_sge.c:1235
#define RX_SW_DESC_MAP_CREATED
Definition: cxgb_sge.c:169
static void write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs, const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
Definition: cxgb_sge.c:1284
#define TXQ_TRYLOCK(qs)
Definition: cxgb_sge.c:231
void t3_intr_msix(void *data)
Definition: cxgb_sge.c:3090
#define RSPQ_EOP
Definition: cxgb_sge.c:175
int cxgb_debug
Definition: cxgb_sge.c:243
static __inline int reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
Definition: cxgb_sge.c:372
int nmbjumbop
static int recycle_enable
Definition: cxgb_sge.c:123
static __inline void wr_gen2(struct tx_desc *d, unsigned int gen)
Definition: cxgb_sge.c:1259
void t3_add_attach_sysctls(adapter_t *sc)
Definition: cxgb_sge.c:3358
#define RSPQ_SOP
Definition: cxgb_sge.c:176
__FBSDID("$FreeBSD$")
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data)
Definition: cxgb_sge.c:3703
static int t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3231
static int process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
Definition: cxgb_sge.c:2831
void cxgb_tx_watchdog(void *arg)
Definition: cxgb_sge.c:1614
int t3_sge_init_port(struct port_info *pi)
Definition: cxgb_sge.c:1030
int multiq_tx_enable
Definition: cxgb_sge.c:81
void t3_free_sge_resources(adapter_t *sc, int nqsets)
Definition: cxgb_sge.c:2069
static int cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
Definition: cxgb_sge.c:1700
#define TCPPKTHDRSIZE
Definition: cxgb_sge.c:1351
static void alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
Definition: cxgb_sge.c:867
static __inline u_int flits_to_desc(u_int n)
Definition: cxgb_sge.c:501
void cxgb_qflush(struct ifnet *ifp)
Definition: cxgb_sge.c:1776
#define COALESCE_STOP_MIN
Definition: cxgb_sge.c:100
void t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
Definition: cxgb_sge.c:2623
static int ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
Definition: cxgb_sge.c:1894
#define CXGB_SYSCTL_ADD_QUAD(a)
#define RSPD_GTS_MASK
Definition: cxgb_sge.c:2180
#define COALESCE_START_DEFAULT
Definition: cxgb_sge.c:97
#define COALESCE_START_MAX
Definition: cxgb_sge.c:98
#define SGE_FRAMINGERR
Definition: cxgb_sge.c:511
#define QDUMP_SBUF_SIZE
Definition: cxgb_sge.c:3100
static void recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
Definition: cxgb_sge.c:846
int cxgb_use_16k_clusters
static __inline int should_restart_tx(const struct sge_txq *q)
Definition: cxgb_sge.c:412
static void sge_timer_cb(void *arg)
Definition: cxgb_sge.c:962
static int get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
Definition: cxgb_sge.c:483
static int t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3334
static __inline int is_new_response(const struct rsp_desc *r, const struct sge_rspq *q)
Definition: cxgb_sge.c:2174
static __inline void reclaim_completed_tx_imm(struct sge_txq *q)
Definition: cxgb_sge.c:1875
static void refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
Definition: cxgb_sge.c:688
void t3_sge_stop(adapter_t *sc)
Definition: cxgb_sge.c:2106
#define TXQ_UNLOCK(qs)
Definition: cxgb_sge.c:233
#define TXQ_RING_EMPTY(qs)
Definition: cxgb_sge.c:234
static int cxgb_tx_reclaim_threshold
Definition: cxgb_sge.c:114
#define TX_RECLAIM_MAX
Definition: cxgb_sge.c:102
static void free_rx_bufs(adapter_t *sc, struct sge_fl *q)
Definition: cxgb_sge.c:794
static void cxgb_start_locked(struct sge_qset *qs)
Definition: cxgb_sge.c:1654
static void restart_tx(struct sge_qset *qs)
Definition: cxgb_sge.c:2385
#define RX_SW_DESC_INUSE
Definition: cxgb_sge.c:171
static void sge_slow_intr_handler(void *arg, int ncount)
Definition: cxgb_sge.c:923
static void set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
Definition: cxgb_sge.c:303
void t3_sge_init(adapter_t *adap, struct sge_params *p)
Definition: cxgb_sge.c:430
static uint8_t flit_desc_map[]
Definition: cxgb_sge.c:213
static int t3_encap(struct sge_qset *qs, struct mbuf **m)
Definition: cxgb_sge.c:1360
static __inline int check_desc_avail(adapter_t *adap, struct sge_txq *q, struct mbuf *m, unsigned int ndesc, unsigned int qid)
Definition: cxgb_sge.c:1837
static int coalesce_check(struct mbuf *m, void *arg)
Definition: cxgb_sge.c:319
static __inline unsigned int calc_tx_descs(const struct mbuf *m, int nsegs)
Definition: cxgb_sge.c:1173
#define TXQ_RING_DEQUEUE(qs)
Definition: cxgb_sge.c:240
#define RSPD_CTRL_MASK
Definition: cxgb_sge.c:2181
int t3_sge_init_adapter(adapter_t *sc)
Definition: cxgb_sge.c:1014
struct sysctl_oid_list sysctl__hw_cxgb_children
#define TX_RECLAIM_DEFAULT
Definition: cxgb_sge.c:101
void t3_sge_start(adapter_t *sc)
Definition: cxgb_sge.c:2087
static void txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
Definition: cxgb_sge.c:1133
void t3b_intr(void *data)
Definition: cxgb_sge.c:3038
static int get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, struct t3_mbuf_hdr *mh, struct rsp_desc *r)
Definition: cxgb_sge.c:2684
#define COALESCE_STOP_DEFAULT
Definition: cxgb_sge.c:99
void t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
Definition: cxgb_sge.c:2126
static void check_ring_db(adapter_t *adap, struct sge_qset *qs, unsigned int sleeping)
Definition: cxgb_sge.c:2809
#define SGE_PARERR
Definition: cxgb_sge.c:506
#define RSPQ_NSOP_NEOP
Definition: cxgb_sge.c:174
int nmbjumbo16
static const char * rspq_name
Definition: cxgb_sge.c:3397
#define SGE_RX_DROP_THRES
Definition: cxgb_sge.c:133
static struct mbuf * cxgb_dequeue(struct sge_qset *qs)
Definition: cxgb_sge.c:337
static void sge_timer_reclaim(void *arg, int ncount)
Definition: cxgb_sge.c:1065
static __inline void write_imm(struct tx_desc *d, caddr_t src, unsigned int len, unsigned int gen)
Definition: cxgb_sge.c:1799
int cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
Definition: cxgb_sge.c:1748
#define GET_VTAG(cntrl, m)
Definition: cxgb_sge.c:1353
#define SGE_FATALERR
Definition: cxgb_sge.c:512
int t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, const struct qset_params *p, int ntxq, struct port_info *pi)
Definition: cxgb_sge.c:2420
#define CXGB_SYSCTL_ADD_ULONG(a)
#define NOMEM_INTR_DELAY
Definition: cxgb_sge.c:2187
static int t3_dump_rspq(SYSCTL_HANDLER_ARGS)
Definition: cxgb_sge.c:3102
int t3_sge_reset_adapter(adapter_t *sc)
Definition: cxgb_sge.c:1023
#define SGE_RX_COPY_THRES
Definition: cxgb_sge.c:134
int nmbjumbo9
#define G_RSPD_TXQ1_CR(x)
#define F_RSPD_ASYNC_NOTIF
#define G_RSPD_TXQ2_CR(x)
#define F_RSPD_IMM_DATA_VALID
#define V_FLD_GEN1(x)
#define F_RSPD_FLQ
#define F_RSPD_GEN2
#define F_RSPD_TXQ0_GTS
#define G_RSPD_TXQ0_CR(x)
#define F_RSPD_TXQ1_GTS
#define G_RSPD_SOP_EOP(x)
#define G_RSPD_LEN(x)
#define V_FLD_GEN2(x)
#define V_LSO_MSS(x)
Definition: cxgb_t3_cpl.h:1232
@ CPL_ETH_II_VLAN
Definition: cxgb_t3_cpl.h:165
@ CPL_ETH_II
Definition: cxgb_t3_cpl.h:164
#define F_WR_EOP
Definition: cxgb_t3_cpl.h:306
#define V_LSO_TCPHDR_WORDS(x)
Definition: cxgb_t3_cpl.h:1242
#define G_WR_GEN(x)
Definition: cxgb_t3_cpl.h:335
#define V_WR_LEN(x)
Definition: cxgb_t3_cpl.h:320
#define F_TXPKT_L4CSUM_DIS
Definition: cxgb_t3_cpl.h:1214
#define V_WR_DATATYPE(x)
Definition: cxgb_t3_cpl.h:297
#define V_WR_OP(x)
Definition: cxgb_t3_cpl.h:314
#define S_WR_COMPL
Definition: cxgb_t3_cpl.h:300
#define V_WR_GEN(x)
Definition: cxgb_t3_cpl.h:333
#define V_WR_SGLSFLT(x)
Definition: cxgb_t3_cpl.h:264
@ CPL_TX_PKT_LSO
Definition: cxgb_t3_cpl.h:57
@ NUM_CPL_CMDS
Definition: cxgb_t3_cpl.h:109
@ CPL_RX_PKT
Definition: cxgb_t3_cpl.h:73
@ CPL_RX_DATA
Definition: cxgb_t3_cpl.h:88
@ CPL_TX_PKT
Definition: cxgb_t3_cpl.h:49
@ CPL_ASYNC_NOTIF
Definition: cxgb_t3_cpl.h:100
#define V_LSO_IPHDR_WORDS(x)
Definition: cxgb_t3_cpl.h:1247
#define F_WR_SOP
Definition: cxgb_t3_cpl.h:310
#define V_TXPKT_OPCODE(x)
Definition: cxgb_t3_cpl.h:1226
#define V_TXPKT_INTF(x)
Definition: cxgb_t3_cpl.h:1205
#define F_LSO_IPV6
Definition: cxgb_t3_cpl.h:1252
#define V_LSO_ETH_TYPE(x)
Definition: cxgb_t3_cpl.h:1237
#define V_WR_TID(x)
Definition: cxgb_t3_cpl.h:325
#define V_WR_BCNTLFLT(x)
Definition: cxgb_t3_cpl.h:269
#define F_TXPKT_IPCSUM_DIS
Definition: cxgb_t3_cpl.h:1210
static __inline void m_freem_list(struct mbuf *m)
Definition: mvec.h:42
int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m, bus_dma_segment_t *segs, int *nsegs)
Definition: uipc_mvec.c:55
void busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs)
Definition: uipc_mvec.c:96
unsigned int rev
Definition: cxgb_common.h:403
unsigned int nports
Definition: cxgb_common.h:399
struct sge_params sge
Definition: cxgb_common.h:388
struct vpd_params vpd
Definition: cxgb_common.h:391
int timestamp
Definition: cxgb_adapter.h:395
uint32_t open_device_map
Definition: cxgb_adapter.h:381
uint8_t rxpkt_map[8]
Definition: cxgb_adapter.h:337
char port_types[MAX_NPORTS+1]
Definition: cxgb_adapter.h:380
unsigned int slow_intr_mask
Definition: cxgb_adapter.h:363
char fw_version[64]
Definition: cxgb_adapter.h:379
struct port_info port[MAX_NPORTS]
Definition: cxgb_adapter.h:373
struct callout sge_timer_ch
Definition: cxgb_adapter.h:355
struct adapter_params params
Definition: cxgb_adapter.h:362
struct task slow_intr_task
Definition: cxgb_adapter.h:351
bus_dma_tag_t tx_dmat
Definition: cxgb_adapter.h:324
bus_dma_tag_t parent_dmat
Definition: cxgb_adapter.h:321
bus_dma_tag_t rx_jumbo_dmat
Definition: cxgb_adapter.h:323
bus_dma_tag_t rx_dmat
Definition: cxgb_adapter.h:322
struct sge sge
Definition: cxgb_adapter.h:367
struct taskqueue * tq
Definition: cxgb_adapter.h:353
device_t dev
Definition: cxgb_adapter.h:307
struct mac_stats stats
Definition: cxgb_common.h:492
__be16 len
Definition: cxgb_t3_cpl.h:986
__be16 csum
Definition: cxgb_t3_cpl.h:1303
__u8 csum_valid
Definition: cxgb_t3_cpl.h:1300
__be16 len
Definition: cxgb_t3_cpl.h:1305
__u8 vlan_valid
Definition: cxgb_t3_cpl.h:1298
__be16 vlan
Definition: cxgb_t3_cpl.h:1304
Definition: cxgb_t3_cpl.h:1185
__be32 cntrl
Definition: cxgb_t3_cpl.h:1186
__be64 addr
Definition: cxgb_t3_cpl.h:1188
__be32 len
Definition: cxgb_t3_cpl.h:1187
struct cpl_tx_pkt_batch_entry pkt_entry[7]
Definition: cxgb_t3_cpl.h:1193
__be32 cntrl
Definition: cxgb_t3_cpl.h:1161
__be32 len
Definition: cxgb_t3_cpl.h:1162
struct lro_ctrl ctrl
Definition: cxgb_adapter.h:165
unsigned short enabled
Definition: cxgb_adapter.h:164
uint32_t nqsets
Definition: cxgb_adapter.h:107
struct cmac mac
Definition: cxgb_adapter.h:98
struct ifnet * ifp
Definition: cxgb_adapter.h:93
uint32_t first_qset
Definition: cxgb_adapter.h:106
struct task timer_reclaim_task
Definition: cxgb_adapter.h:113
char namebuf[PORT_NAME_LEN]
Definition: cxgb_adapter.h:119
struct link_config link_config
Definition: cxgb_adapter.h:100
uint32_t txpkt_intf
Definition: cxgb_adapter.h:105
struct adapter * adapter
Definition: cxgb_adapter.h:92
unsigned int polling
Definition: cxgb_common.h:311
unsigned int jumbo_size
Definition: cxgb_common.h:316
unsigned int coalesce_usecs
Definition: cxgb_common.h:313
unsigned int fl_size
Definition: cxgb_common.h:315
unsigned int rspq_size
Definition: cxgb_common.h:314
unsigned int cong_thres
Definition: cxgb_common.h:319
unsigned int jumbo_buf_size
Definition: cxgb_common.h:317
unsigned int txq_size[SGE_TXQ_PER_SET]
Definition: cxgb_common.h:318
bus_dma_segment_t seg
Definition: cxgb_sge.c:200
uint8_t imm_data[47]
Definition: cxgb_sge.c:165
struct rss_header rss_hdr
Definition: cxgb_sge.c:162
uint32_t len_cq
Definition: cxgb_sge.c:164
uint32_t flags
Definition: cxgb_sge.c:163
uint8_t intr_gen
Definition: cxgb_sge.c:166
__u8 hash_type
Definition: cxgb_t3_cpl.h:224
__be32 rss_hash_val
Definition: cxgb_t3_cpl.h:228
uint32_t gen2
Definition: cxgb_sge.c:157
uint32_t addr_lo
Definition: cxgb_sge.c:155
uint32_t addr_hi
Definition: cxgb_sge.c:158
uint32_t len_gen
Definition: cxgb_sge.c:156
int flags
Definition: cxgb_sge.c:189
caddr_t rxsd_cl
Definition: cxgb_sge.c:186
struct mbuf * m
Definition: cxgb_sge.c:187
bus_dmamap_t map
Definition: cxgb_sge.c:188
__be64 addr[2]
Definition: cxgb_common.h:134
__be32 len[2]
Definition: cxgb_common.h:133
uint32_t cidx
Definition: cxgb_adapter.h:208
bus_dmamap_t desc_map
Definition: cxgb_adapter.h:216
struct rx_desc * desc
Definition: cxgb_adapter.h:219
bus_addr_t phys_addr
Definition: cxgb_adapter.h:212
uint32_t cntxt_id
Definition: cxgb_adapter.h:213
uint32_t credits
Definition: cxgb_adapter.h:206
bus_dma_tag_t desc_tag
Definition: cxgb_adapter.h:215
int type
Definition: cxgb_adapter.h:221
uint32_t buf_size
Definition: cxgb_adapter.h:205
bus_dma_tag_t entry_tag
Definition: cxgb_adapter.h:217
uint32_t size
Definition: cxgb_adapter.h:207
uma_zone_t zone
Definition: cxgb_adapter.h:218
uint32_t gen
Definition: cxgb_adapter.h:210
uint32_t pidx
Definition: cxgb_adapter.h:209
struct rx_sw_desc * sdesc
Definition: cxgb_adapter.h:220
uint32_t empty
Definition: cxgb_adapter.h:214
uint32_t db_pending
Definition: cxgb_adapter.h:211
struct qset_params qset[SGE_QSETS]
Definition: cxgb_common.h:325
unsigned int max_pkt_size
Definition: cxgb_common.h:324
struct adapter * adap
Definition: cxgb_adapter.h:285
char namebuf[QS_NAME_LEN]
Definition: cxgb_adapter.h:292
struct mtx lock
Definition: cxgb_adapter.h:290
uint32_t txq_stopped
Definition: cxgb_adapter.h:283
struct sge_fl fl[SGE_RXQ_PER_SET]
Definition: cxgb_adapter.h:280
int qs_flags
Definition: cxgb_adapter.h:287
struct sge_txq txq[SGE_TXQ_PER_SET]
Definition: cxgb_adapter.h:282
struct port_info * port
Definition: cxgb_adapter.h:284
struct lro_state lro
Definition: cxgb_adapter.h:281
int coalescing
Definition: cxgb_adapter.h:288
struct sge_rspq rspq
Definition: cxgb_adapter.h:279
uint32_t polling
Definition: cxgb_adapter.h:177
uint32_t holdoff_tmr
Definition: cxgb_adapter.h:178
uint32_t pure_rsps
Definition: cxgb_adapter.h:184
bus_addr_t phys_addr
Definition: cxgb_adapter.h:188
struct rsp_desc * desc
Definition: cxgb_adapter.h:193
uint32_t size
Definition: cxgb_adapter.h:174
uint32_t cidx
Definition: cxgb_adapter.h:175
uint32_t async_notif
Definition: cxgb_adapter.h:181
bus_dma_tag_t desc_tag
Definition: cxgb_adapter.h:189
uint32_t imm_data
Definition: cxgb_adapter.h:180
uint32_t rspq_dump_start
Definition: cxgb_adapter.h:197
struct mtx lock
Definition: cxgb_adapter.h:194
char lockbuf[RSPQ_NAME_LEN]
Definition: cxgb_adapter.h:196
bus_dmamap_t desc_map
Definition: cxgb_adapter.h:190
uint32_t rspq_dump_count
Definition: cxgb_adapter.h:198
uint32_t credits
Definition: cxgb_adapter.h:173
uint32_t cntxt_id
Definition: cxgb_adapter.h:182
uint32_t offload_pkts
Definition: cxgb_adapter.h:183
uint32_t starved
Definition: cxgb_adapter.h:186
struct t3_mbuf_hdr rspq_mh
Definition: cxgb_adapter.h:192
uint32_t unhandled_irqs
Definition: cxgb_adapter.h:185
uint32_t gen
Definition: cxgb_adapter.h:176
uint32_t next_holdoff
Definition: cxgb_adapter.h:179
uint32_t processed
Definition: cxgb_adapter.h:233
uint32_t txq_skipped
Definition: cxgb_adapter.h:260
uint64_t txq_direct_bytes
Definition: cxgb_adapter.h:265
uint32_t txq_dump_start
Definition: cxgb_adapter.h:262
uint32_t token
Definition: cxgb_adapter.h:243
bus_dmamap_t desc_map
Definition: cxgb_adapter.h:251
uint64_t txq_frees
Definition: cxgb_adapter.h:266
bus_dma_tag_t desc_tag
Definition: cxgb_adapter.h:250
struct tx_sw_desc * sdesc
Definition: cxgb_adapter.h:242
struct mbufq sendq
Definition: cxgb_adapter.h:253
uint32_t unacked
Definition: cxgb_adapter.h:239
uint32_t gen
Definition: cxgb_adapter.h:238
uint64_t restarts
Definition: cxgb_adapter.h:249
uint64_t txq_coalesced
Definition: cxgb_adapter.h:259
struct callout txq_watchdog
Definition: cxgb_adapter.h:258
struct tx_desc * desc
Definition: cxgb_adapter.h:241
bus_dma_tag_t entry_tag
Definition: cxgb_adapter.h:252
bus_addr_t phys_addr
Definition: cxgb_adapter.h:244
uint32_t size
Definition: cxgb_adapter.h:232
uint64_t stops
Definition: cxgb_adapter.h:248
struct callout txq_timer
Definition: cxgb_adapter.h:257
uint32_t cntxt_id
Definition: cxgb_adapter.h:247
uint32_t in_use
Definition: cxgb_adapter.h:231
uint32_t txq_enqueued
Definition: cxgb_adapter.h:261
uint32_t txq_dump_count
Definition: cxgb_adapter.h:263
struct sg_ent txq_sgl[TX_MAX_SEGS/2+1]
Definition: cxgb_adapter.h:267
struct buf_ring * txq_mr
Definition: cxgb_adapter.h:255
uint32_t stop_thres
Definition: cxgb_adapter.h:235
uint64_t txq_direct_packets
Definition: cxgb_adapter.h:264
uint32_t cidx
Definition: cxgb_adapter.h:236
struct task qresume_task
Definition: cxgb_adapter.h:245
uint32_t pidx
Definition: cxgb_adapter.h:237
uint64_t flags
Definition: cxgb_adapter.h:230
uint32_t db_pending
Definition: cxgb_adapter.h:240
struct task qreclaim_task
Definition: cxgb_adapter.h:246
uint32_t cleaned
Definition: cxgb_adapter.h:234
struct ifaltq * txq_ifq
Definition: cxgb_adapter.h:256
struct mtx reg_lock
Definition: cxgb_adapter.h:297
struct sge_qset qs[SGE_QSETS]
Definition: cxgb_adapter.h:296
struct mbuf * mh_head
Definition: cxgb_osdep.h:62
struct mbuf * mh_tail
Definition: cxgb_osdep.h:63
uint64_t flit[TX_DESC_FLITS]
Definition: cxgb_sge.c:151
bus_dmamap_t map
Definition: cxgb_sge.c:181
struct mbuf * m
Definition: cxgb_sge.c:180
int flags
Definition: cxgb_sge.c:182
unsigned int compl
Definition: cxgb_sge.c:193
unsigned int gen
Definition: cxgb_sge.c:194
unsigned int pidx
Definition: cxgb_sge.c:195
unsigned int cclk
Definition: cxgb_common.h:351