39#include <sys/kernel.h>
40#include <sys/module.h>
43#include <machine/bus.h>
44#include <machine/resource.h>
47#include <sys/sysctl.h>
48#include <sys/taskqueue.h>
55#include <sys/syslog.h>
56#include <sys/socket.h>
57#include <sys/sglist.h>
60#include <net/if_var.h>
62#include <net/ethernet.h>
63#include <net/if_vlan_var.h>
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/ip.h>
68#include <netinet/ip6.h>
69#include <netinet/tcp.h>
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
90 "size of per-queue mbuf ring");
93SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
95 "coalesce small packets into a single work request regardless of ring state");
97#define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
98#define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99#define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
100#define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
101#define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
102#define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
103#define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
107SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
109 "coalesce enable threshold");
111SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
113 "coalesce disable threshold");
115SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
117 "tx cleaning minimum threshold");
132#define SGE_RX_SM_BUF_SIZE 1536
133#define SGE_RX_DROP_THRES 16
134#define SGE_RX_COPY_THRES 128
140#define TX_RECLAIM_PERIOD (hz >> 1)
169#define RX_SW_DESC_MAP_CREATED (1 << 0)
170#define TX_SW_DESC_MAP_CREATED (1 << 1)
171#define RX_SW_DESC_INUSE (1 << 3)
172#define TX_SW_DESC_MAPPED (1 << 4)
174#define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
175#define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
176#define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
177#define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
215#if SGE_NUM_GENBITS == 1
216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220#elif SGE_NUM_GENBITS == 2
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
226# error "SGE_NUM_GENBITS must be 1 or 2"
230#define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
231#define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232#define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233#define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234#define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235#define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237#define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238#define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240#define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
255static __inline uint64_t
266 fill = &sc->tunq_fill[qs->
idx];
284 return (sc->tunq_coalesce);
292#if _BYTE_ORDER == _LITTLE_ENDIAN
294 wr_hilo |= (((uint64_t)wr_lo)<<32);
297 wr_hilo |= (((uint64_t)wr_hi)<<32);
299 wrp->wrh_hilo = wr_hilo;
323 if ((m->m_next != NULL) ||
324 ((mtod(m, vm_offset_t) & PAGE_MASK) + m->m_len > PAGE_SIZE))
328 (ci->
nbytes + m->m_len <= 10500))) {
339 struct mbuf *m, *m_head, *m_tail;
346 m_head = m_tail = NULL;
350 if (m_head == NULL) {
352 }
else if (m != NULL) {
353 m_tail->m_nextpkt = m;
358 panic(
"trying to coalesce %d packets in to one WR", ci.
count);
381 if (reclaim < reclaim_min)
384 mtx_assert(&qs->
lock, MA_OWNED);
398cxgb_debugnet_poll_tx(
struct sge_qset *qs)
440#if SGE_NUM_GENBITS == 1
470static __inline
unsigned int
473 return ((3 * n) / 2 + (n & 1));
488 m->m_len =
sizeof(*cpl) + ntohs(cpl->
len);
491 m->m_len =
sizeof(*cpl) + ntohs(cpl->
len);
494 m->m_ext.ext_buf = NULL;
495 m->m_ext.ext_type = 0;
496 memcpy(mtod(m, uint8_t *), resp->
imm_data, m->m_len);
506#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
507 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
508 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
509 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
511#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
512#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
524 unsigned int v, status;
540 "packet delivered to disabled response queue (0x%x)\n",
552 int i,
nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
559 while (!powerof2(fl_q_size))
567 jumbo_buf_size = MJUM16BYTES;
570 jumbo_buf_size = MJUM9BYTES;
572 while (!powerof2(jumbo_q_size))
576 device_printf(adap->
dev,
577 "Insufficient clusters and/or jumbo buffers.\n");
610 if (bus_dma_tag_create( bus_get_dma_tag(sc->
dev),
615 BUS_SPACE_MAXSIZE_32BIT,
616 BUS_SPACE_UNRESTRICTED,
617 BUS_SPACE_MAXSIZE_32BIT,
621 device_printf(sc->
dev,
"Cannot allocate parent DMA tag\n");
628 if (bus_dma_tag_create(sc->
parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
629 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
630 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->
rx_dmat)) {
631 device_printf(sc->
dev,
"Cannot allocate RX DMA tag\n");
638 if (bus_dma_tag_create(sc->
parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
639 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
641 device_printf(sc->
dev,
"Cannot allocate RX jumbo DMA tag\n");
648 if (bus_dma_tag_create(sc->
parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
652 device_printf(sc->
dev,
"Cannot allocate TX DMA tag\n");
664 bus_dma_tag_destroy(sc->
tx_dmat);
670 bus_dma_tag_destroy(sc->
rx_dmat);
686#if !defined(__i386__) && !defined(__amd64__)
693 cb_arg->
seg = segs[0];
723 if (q->
zone == zone_pack) {
724 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
726 cl = m->m_ext.ext_buf;
728 if ((cl = m_cljget(NULL, M_NOWAIT, q->
buf_size)) == NULL)
730 if ((m = m_gethdr_raw(M_NOWAIT, 0)) == NULL) {
731 uma_zfree(q->
zone, cl);
736 if ((err = bus_dmamap_create(q->
entry_tag, 0, &sd->
map))) {
737 log(LOG_WARNING,
"bus_dmamap_create failed %d\n", err);
738 uma_zfree(q->
zone, cl);
743#if !defined(__i386__) && !defined(__amd64__)
747 if (err != 0 || cb_arg.
error) {
748 if (q->
zone != zone_pack)
749 uma_zfree(q->
zone, cl);
754 cb_arg.
seg.ds_addr = pmap_kextract((vm_offset_t)cl);
759 d->
addr_lo = htobe32(cb_arg.
seg.ds_addr & 0xffffffff);
760 d->
addr_hi = htobe32(((uint64_t)cb_arg.
seg.ds_addr >>32) & 0xffffffff);
796 u_int cidx = q->
cidx;
804 if (q->
zone == zone_pack) {
805 m_init(d->
m, M_NOWAIT, MT_DATA, M_EXT);
806 uma_zfree(zone_pack, d->
m);
808 m_init(d->
m, M_NOWAIT, MT_DATA, 0);
816 if (++cidx == q->
size)
833 refill_fl(adap, fl, min(max, reclaimable));
872 *addr = segs[0].ds_addr;
877 bus_addr_t *phys,
void *desc,
void *sdesc, bus_dma_tag_t *tag,
878 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
880 size_t len = nelem * elem_size;
885 if ((err = bus_dma_tag_create(sc->
parent_dmat, PAGE_SIZE, 0,
886 BUS_SPACE_MAXADDR_32BIT,
887 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888 len, 0, NULL, NULL, tag)) != 0) {
889 device_printf(sc->
dev,
"Cannot allocate descriptor tag\n");
893 if ((err = bus_dmamem_alloc(*tag, (
void **)&p, BUS_DMA_NOWAIT,
895 device_printf(sc->
dev,
"Cannot allocate descriptor memory\n");
904 len = nelem * sw_size;
905 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
908 if (parent_entry_tag == NULL)
911 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
915 NULL, NULL, entry_tag)) != 0) {
916 device_printf(sc->
dev,
"Cannot allocate descriptor entry tag\n");
971 int reclaim_ofl, refill_rx;
978 for (j = 0; j < pi->
nqsets; j++) {
984 if (reclaim_ofl || refill_rx) {
1060 for (i = 0; i < 3; i++)
1074 (
"can't call timer reclaim for msi-x"));
1076 for (i = 0; i <
nqsets; i++) {
1083 if (mtx_trylock(
lock)) {
1151 (txq->
pidx >= txq-> cidx)) ||
1154 panic(
"txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1172static __inline
unsigned int
1177 if (m->m_pkthdr.len <=
PIO_LEN)
1181 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1202 for (idx = 0, i = 0; i < nsegs; i++) {
1206 if (segs[i].ds_len == 0)
1211 sgp->
len[idx] = htobe32(segs[i].ds_len);
1212 sgp->
addr[idx] = htobe64(segs[i].ds_addr);
1242 T3_TRACE1(adap->tb[q->
cntxt_id & 7],
"doorbell Tx, cntxt %d",
1261#if SGE_NUM_GENBITS == 2
1285 const struct sge_txq *txq,
const struct sg_ent *sgl,
unsigned int flits,
1286 unsigned int sgl_flits,
unsigned int wr_hi,
unsigned int wr_lo)
1292 if (__predict_true(ndesc == 1)) {
1301 unsigned int ogen = txqs->
gen;
1302 const uint64_t *fp = (
const uint64_t *)sgl;
1309 unsigned int avail =
WR_FLITS - flits;
1311 if (avail > sgl_flits)
1313 memcpy(&txd->
flit[flits], fp, avail *
sizeof(*fp));
1351#define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1353#define GET_VTAG(cntrl, m) \
1355 if ((m)->m_flags & M_VLANTAG) \
1356 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1367 unsigned int ndesc, flits, cntrl, mlen;
1368 int err, nsegs, tso_info = 0;
1372 struct sg_ent *sgp, *sgl;
1373 uint32_t wr_hi, wr_lo, sgl_flits;
1388 mtx_assert(&qs->
lock, MA_OWNED);
1390 KASSERT(m0->m_flags & M_PKTHDR, (
"not packet header\n"));
1392 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 tso_info =
V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1396 if (m0->m_nextpkt != NULL) {
1402 &m0, segs, &nsegs))) {
1404 printf(
"failed ... err=%d\n", err);
1407 mlen = m0->m_pkthdr.len;
1412 KASSERT(m0->m_pkthdr.len, (
"empty packet nsegs=%d", nsegs));
1415 if (m0->m_nextpkt != NULL) {
1420 panic(
"trying to coalesce %d packets in to one WR", nsegs);
1423 flits = nsegs*2 + 1;
1425 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1428 uint32_t *hflit = (uint32_t *)&flit;
1429 int cflags = m0->m_pkthdr.csum_flags;
1434 if (__predict_false(!(cflags & CSUM_IP)))
1436 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1440 hflit[0] = htonl(
cntrl);
1441 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 flit |= htobe64(1 << 24);
1444 cbe->
cntrl = hflit[0];
1445 cbe->
len = hflit[1];
1446 cbe->
addr = htobe64(segs[i].ds_addr);
1456 ETHER_BPF_MTAP(pi->
ifp, m0);
1460 }
else if (tso_info) {
1463 struct ether_header *eh;
1470 hdr->
cntrl = htonl(cntrl);
1471 hdr->
len = htonl(mlen | 0x80000000);
1474 printf(
"mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 m0, mlen, m0->m_pkthdr.tso_segsz,
1476 (
int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 panic(
"tx tso packet too small");
1483 if (__predict_false(m0 == NULL)) {
1485 panic(
"couldn't fit header into mbuf");
1489 eh = mtod(m0,
struct ether_header *);
1490 eth_type = eh->ether_type;
1491 if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 struct ether_vlan_header *evh = (
void *)eh;
1496 eth_type = evh->evl_proto;
1502 if (eth_type == htons(ETHERTYPE_IP)) {
1503 struct ip *ip = l3hdr;
1506 tcp = (
struct tcphdr *)(ip + 1);
1507 }
else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 struct ip6_hdr *ip6 = l3hdr;
1510 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 (
"%s: CSUM_TSO with ip6_nxt %d",
1512 __func__, ip6->ip6_nxt));
1516 tcp = (
struct tcphdr *)(ip6 + 1);
1518 panic(
"%s: CSUM_TSO but neither ip nor ip6", __func__);
1523 if (__predict_false(mlen <=
PIO_LEN)) {
1529 m_copydata(m0, 0, mlen, (caddr_t)&txd->
flit[3]);
1530 flits = (mlen + 7) / 8 + 3;
1538 ETHER_BPF_MTAP(pi->
ifp, m0);
1550 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1552 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1556 cpl->
len = htonl(mlen | 0x80000000);
1560 m_copydata(m0, 0, mlen, (caddr_t)&txd->
flit[2]);
1561 flits = (mlen + 7) / 8 + 2;
1570 ETHER_BPF_MTAP(pi->
ifp, m0);
1579 sgp = (ndesc == 1) ? (
struct sg_ent *)&txd->
flit[flits] : sgl;
1584 ETHER_BPF_MTAP(pi->
ifp, m0);
1586 KASSERT(ndesc <= 4, (
"ndesc too large %d", ndesc));
1598cxgb_debugnet_encap(
struct sge_qset *qs,
struct mbuf **m)
1605 else if (*m != NULL) {
1632 if (qs->
port->
ifp->if_drv_flags & IFF_DRV_RUNNING)
1656 struct mbuf *m_head = NULL;
1659 struct ifnet *
ifp = pi->
ifp;
1682 if (
t3_encap(qs, &m_head) || m_head == NULL)
1704 struct buf_ring *br = txq->
txq_mr;
1721 (error = drbr_enqueue(
ifp, br, m)) != 0)
1734 }
else if ((error = drbr_enqueue(
ifp, br, m)) != 0)
1754 if ((
ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1761 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1800 unsigned int len,
unsigned int gen)
1806 KASSERT(len <= WR_LEN && len >=
sizeof(*from),
1807 (
"%s: invalid len %d", __func__, len));
1809 memcpy(&to[1], &from[1], len -
sizeof(*from));
1838 struct mbuf *m,
unsigned int ndesc,
1846 if (__predict_false(mbufq_len(&q->
sendq))) {
1847addq_exit: (void )mbufq_enqueue(&q->
sendq, m);
1850 if (__predict_false(q->
size - q->
in_use < ndesc)) {
1900 KASSERT(m->m_len <=
WR_LEN, (
"%s: bad tx data", __func__));
1909 if (__predict_false(ret)) {
1951 (m = mbufq_dequeue(&q->
sendq)) != NULL) {
1962 if (mbufq_len(&q->
sendq)) {
2020 free(q->
fl[i].
sdesc, M_DEVBUF);
2024 mtx_unlock(&q->
lock);
2055#if defined(INET6) || defined(INET)
2059 bzero(q,
sizeof(*q));
2073 for (i = 0; i <
nqsets; ++i) {
2129 unsigned int cidx, mask;
2134 "reclaiming %u Tx descriptors at cidx %u", reclaimable,
cidx);
2140 mtx_assert(&qs->
lock, MA_OWNED);
2141 while (reclaimable--) {
2145 if (txsd->
m != NULL) {
2148 txsd->
flags &= ~TX_SW_DESC_MAPPED;
2180#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2181#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2182 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2183 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2184 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2187#define NOMEM_INTR_DELAY 2500
2204 unsigned int pidx,
unsigned int gen,
unsigned int ndesc)
2206 unsigned int sgl_flits, flits;
2207 int i, idx, nsegs, wrlen;
2212 struct sglist_seg *segs;
2213 struct ofld_hdr *oh = mtod(m,
struct ofld_hdr *);
2216 from = (
void *)(oh + 1);
2217 wrlen = m->m_len -
sizeof(*oh);
2219 if (!(oh->flags & F_HDR_SGL)) {
2220 write_imm(d, (caddr_t)from, wrlen, gen);
2227 if (!(oh->flags & F_HDR_DF))
2232 memcpy(&d->
flit[1], &from[1], wrlen -
sizeof(*from));
2236 sgp = (ndesc == 1) ? (
struct sg_ent *)&d->
flit[flits] : t3sgl;
2238 nsegs = sgl->sg_nseg;
2239 segs = sgl->sg_segs;
2240 for (idx = 0, i = 0; i < nsegs; i++) {
2241 KASSERT(segs[i].ss_len, (
"%s: 0 len in sgl", __func__));
2244 sgp->
len[idx] = htobe32(segs[i].ss_len);
2245 sgp->
addr[idx] = htobe64(segs[i].ss_paddr);
2259 from->wrh_hi, from->wrh_lo);
2275 unsigned int pidx, gen;
2277 struct ofld_hdr *oh = mtod(m,
struct ofld_hdr *);
2279 ndesc = G_HDR_NDESC(oh->flags);
2284 if (__predict_false(ret)) {
2301 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2315restart_offloadq(
void *data,
int npending)
2324 while ((m = mbufq_first(&q->
sendq)) != NULL) {
2326 struct ofld_hdr *oh = mtod(m,
struct ofld_hdr *);
2327 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2329 if (__predict_false(q->
size - q->
in_use < ndesc)) {
2347 (void)mbufq_dequeue(&q->
sendq);
2349 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2371t3_offload_tx(
struct adapter *sc,
struct mbuf *m)
2373 struct ofld_hdr *oh = mtod(m,
struct ofld_hdr *);
2376 if (oh->flags & F_HDR_CTRL) {
2377 m_adj(m,
sizeof (*oh));
2380 return (ofld_xmit(sc, qs, m));
2431 M_DEVBUF, M_WAITOK, &q->
lock)) == NULL) {
2432 device_printf(sc->
dev,
"failed to allocate mbuf ring\n");
2436 M_NOWAIT | M_ZERO)) == NULL) {
2437 device_printf(sc->
dev,
"failed to allocate ifq\n");
2453 printf(
"error %d from alloc ring fl0\n", ret);
2462 printf(
"error %d from alloc ring fl1\n", ret);
2469 NULL, NULL)) != 0) {
2470 printf(
"error %d from alloc ring rspq\n", ret);
2475 device_get_unit(sc->
dev), irq_vec_idx);
2478 for (i = 0; i < ntxq; ++i) {
2487 printf(
"error %d from alloc ring tx %i\n", ret, i);
2490 mbufq_init(&q->
txq[i].
sendq, INT_MAX);
2514 q->
fl[0].
zone = zone_pack;
2515 q->
fl[0].
type = EXT_PACKET;
2518 q->
fl[1].
zone = zone_jumbo16;
2519 q->
fl[1].
type = EXT_JUMBO16;
2521 q->
fl[1].
zone = zone_jumbo9;
2522 q->
fl[1].
type = EXT_JUMBO9;
2524 q->
fl[1].
zone = zone_jumbop;
2525 q->
fl[1].
type = EXT_JUMBOP;
2527 KASSERT(0, (
"can't deal with jumbo_buf_size %d.", p->
jumbo_buf_size));
2535#if defined(INET6) || defined(INET)
2536 ret = tcp_lro_init(&q->
lro.
ctrl);
2538 printf(
"error %d from tcp_lro_init\n", ret);
2549 printf(
"error %d from t3_sge_init_rspcntxt\n", ret);
2559 printf(
"error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2569 printf(
"error %d from t3_sge_init_ecntxt\n", ret);
2579 printf(
"error %d from t3_sge_init_ecntxt\n", ret);
2591 printf(
"error %d from t3_sge_init_ecntxt\n", ret);
2627 struct ifnet *
ifp = pi->
ifp;
2630 m->m_pkthdr.ether_vtag = ntohs(cpl->
vlan);
2631 m->m_flags |= M_VLANTAG;
2634 m->m_pkthdr.rcvif =
ifp;
2638 m->m_pkthdr.len -= (
sizeof(*cpl) + ethpad);
2639 m->m_len -= (
sizeof(*cpl) + ethpad);
2640 m->m_data += (
sizeof(*cpl) + ethpad);
2643 struct ether_header *eh = mtod(m,
void *);
2646 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2647 struct ether_vlan_header *evh = mtod(m,
void *);
2649 eh_type = evh->evl_proto;
2651 eh_type = eh->ether_type;
2653 if (
ifp->if_capenable & IFCAP_RXCSUM &&
2654 eh_type == htons(ETHERTYPE_IP)) {
2655 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2656 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2657 m->m_pkthdr.csum_data = 0xffff;
2658 }
else if (
ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2659 eh_type == htons(ETHERTYPE_IPV6)) {
2660 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2662 m->m_pkthdr.csum_data = 0xffff;
2688 unsigned int len_cq = ntohl(r->
len_cq);
2693 uint32_t
flags = M_EXT;
2699 mask = fl->
size - 1;
2706 bus_dmamap_sync(fl->
entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2710 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2712 cl = mtod(m,
void *);
2713 memcpy(cl, sd->rxsd_cl, len);
2715 m->m_pkthdr.len = m->m_len = len;
2722 bus_dmamap_unload(fl->
entry_tag, sd->map);
2729 m_init(m, M_NOWAIT, MT_DATA,
flags);
2730 if (fl->
zone == zone_pack) {
2734 m->m_data = m->m_ext.ext_buf;
2736 m_cljset(m, cl, fl->
type);
2746 m->m_pkthdr.len = len;
2753 log(LOG_ERR,
"discarding intermediate descriptor entry\n");
2760 mh->
mh_head->m_pkthdr.len += len;
2764 printf(
"len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2784 unsigned int credits;
2810 unsigned int sleeping)
2835 int budget_left = budget;
2836 unsigned int sleeping = 0;
2837#if defined(INET6) || defined(INET)
2840 struct lro_ctrl *lro_ctrl = &qs->
lro.
ctrl;
2844 static int last_holdoff = 0;
2853 int eth, eop = 0, ethpad = 0;
2864 printf(
"async notification\n");
2867 mh->
mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2870 m = m_gethdr(M_NOWAIT, MT_DATA);
2883 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2898 mh->
mh_head->m_pkthdr.len += m->m_len;
2904 eop =
get_packet(adap, drop_thresh, qs, mh, r);
2908 M_HASHTYPE_OPAQUE_HASH);
2909 mh->
mh_head->m_pkthdr.flowid = rss_hash;
2926 adap->cpl_handler[opcode](qs, r, mh->
mh_head);
2931 }
else if (eth && eop) {
2944#if defined(INET6) || defined(INET)
2945 skip_lro = __predict_false(qs->
port->
ifp != m->m_pkthdr.rcvif);
2947 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2948 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2959 struct ifnet *
ifp = m->m_pkthdr.rcvif;
2960 (*
ifp->if_input)(
ifp, m);
2967 if (__predict_false(++rspq->
cidx == rspq->
size)) {
2982#if defined(INET6) || defined(INET)
2984 tcp_lro_flush_all(lro_ctrl);
2996 budget -= budget_left;
3007 static int last_holdoff = 0;
3056 mtx_lock(&q0->
lock);
3060 mtx_unlock(&q0->
lock);
3074 int i, new_packets = 0;
3076 mtx_lock(&q0->
lock);
3081 mtx_unlock(&q0->
lock);
3082 if (new_packets == 0) {
3100#define QDUMP_SBUF_SIZE 32 * 400
3106 int i, err, dump_end,
idx;
3123 "dump start of %d is greater than queue size\n",
3131 err = sysctl_wire_old_buffer(req, 0);
3136 sbuf_printf(sb,
" \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3137 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3138 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3139 sbuf_printf(sb,
" generation=%u CQ mode=%u FL threshold=%u\n",
3140 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3149 rspd = &rspq->
desc[idx];
3150 sbuf_printf(sb,
"\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3151 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3152 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3153 sbuf_printf(sb,
"\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3154 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3155 be32toh(rspd->len_cq), rspd->intr_gen);
3158 err = sbuf_finish(sb);
3168 int i, j, err, dump_end;
3171 uint32_t *WR, wr_hi, wr_lo, gen;
3187 "dump start of %d is greater than queue size\n",
3195 err = sysctl_wire_old_buffer(req, 0);
3200 sbuf_printf(sb,
" \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3201 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3202 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3203 sbuf_printf(sb,
" TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3204 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3205 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3206 sbuf_printf(sb,
" qid=%d start=%d -> end=%d\n", qs->
idx,
3213 WR = (uint32_t *)txd->
flit;
3214 wr_hi = ntohl(WR[0]);
3215 wr_lo = ntohl(WR[1]);
3218 sbuf_printf(sb,
" wr_hi %08x wr_lo %08x gen %d\n",
3220 for (j = 2; j < 30; j += 4)
3221 sbuf_printf(sb,
"\t%08x %08x %08x %08x \n",
3222 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3225 err = sbuf_finish(sb);
3235 int i, j, err, dump_end;
3238 uint32_t *WR, wr_hi, wr_lo, gen;
3253 "dump start of %d is greater than queue size\n",
3259 err = sysctl_wire_old_buffer(req, 0);
3263 sbuf_printf(sb,
" qid=%d start=%d -> end=%d\n", qs->
idx,
3269 txd = &txq->
desc[i & (255)];
3270 WR = (uint32_t *)txd->
flit;
3271 wr_hi = ntohl(WR[0]);
3272 wr_lo = ntohl(WR[1]);
3275 sbuf_printf(sb,
" wr_hi %08x wr_lo %08x gen %d\n",
3277 for (j = 2; j < 30; j += 4)
3278 sbuf_printf(sb,
"\t%08x %08x %08x %08x \n",
3279 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3282 err = sbuf_finish(sb);
3294 int i, j, err,
nqsets = 0;
3301 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3313 coalesce_usecs = max(1, coalesce_usecs);
3315 for (i = 0; i <
nqsets; i++) {
3316 qs = &sc->
sge.
qs[i];
3343 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3360 struct sysctl_ctx_list *ctx;
3361 struct sysctl_oid_list *children;
3363 ctx = device_get_sysctl_ctx(sc->
dev);
3364 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->
dev));
3367 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3370 0,
"firmware version");
3371 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3375 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3378 0,
"type of ports");
3379 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3382 0,
"enable verbose debugging output");
3383 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
"tunq_coalesce",
3384 CTLFLAG_RD, &sc->tunq_coalesce,
3385 "#tunneled packets freed");
3386 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3389 0,
"#times txq overrun");
3390 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3393 0,
"core clock frequency (in KHz)");
3415 parg = (uint64_t *) ((uint8_t *)&p->
mac.
stats + arg2);
3417 return (sysctl_handle_64(oidp, parg, 0, req));
3423 struct sysctl_ctx_list *ctx;
3424 struct sysctl_oid_list *children;
3427 ctx = device_get_sysctl_ctx(sc->
dev);
3428 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->
dev));
3430 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3432 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3434 "I",
"interrupt coalescing timer (us)");
3436 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3438 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3440 "I",
"provide packet timestamp instead of connection hash");
3444 struct sysctl_oid *poid;
3445 struct sysctl_oid_list *poidlist;
3449 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3450 pi->
namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3452 poidlist = SYSCTL_CHILDREN(poid);
3453 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3454 "nqsets", CTLFLAG_RD, &pi->
nqsets,
3457 for (j = 0; j < pi->
nqsets; j++) {
3459 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3460 *ctrlqpoid, *lropoid;
3461 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3462 *txqpoidlist, *ctrlqpoidlist,
3468 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3469 qs->
namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3471 qspoidlist = SYSCTL_CHILDREN(qspoid);
3473 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO,
"fl0_empty",
3474 CTLFLAG_RD, &qs->
fl[0].
empty, 0,
3475 "freelist #0 empty");
3476 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO,
"fl1_empty",
3477 CTLFLAG_RD, &qs->
fl[1].
empty, 0,
3478 "freelist #1 empty");
3480 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3481 rspq_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3483 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3485 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3486 txq_names[0], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3488 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3490 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3491 txq_names[2], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3492 "ctrlq statistics");
3493 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3495 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3496 "lro_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3498 lropoidlist = SYSCTL_CHILDREN(lropoid);
3500 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"size",
3502 0,
"#entries in response queue");
3503 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"cidx",
3505 0,
"consumer index");
3506 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"credits",
3509 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"starved",
3511 0,
"#times starved");
3512 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO,
"phys_addr",
3514 "physical_address_of the queue");
3515 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"dump_start",
3517 0,
"start rspq dump entry");
3518 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO,
"dump_count",
3520 0,
"#rspq entries to dump");
3521 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO,
"qdump",
3522 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3524 "dump of the response queue");
3526 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO,
"dropped",
3528 "#tunneled packets dropped");
3529 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"sendqlen",
3531 0,
"#tunneled packets waiting to be sent");
3533 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"queue_pidx",
3535 0,
"#tunneled packets queue producer index");
3536 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"queue_cidx",
3538 0,
"#tunneled packets queue consumer index");
3540 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"processed",
3542 0,
"#tunneled packets processed by the card");
3543 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"cleaned",
3545 0,
"#tunneled packets cleaned");
3546 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"in_use",
3547 CTLFLAG_RD, &txq->
in_use,
3548 0,
"#tunneled packet slots in use");
3549 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO,
"frees",
3551 "#tunneled packets freed");
3552 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"skipped",
3554 0,
"#tunneled packet descriptors skipped");
3555 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO,
"coalesced",
3557 "#tunneled packets coalesced");
3558 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"enqueued",
3560 0,
"#tunneled packets enqueued to hardware");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"stopped_flags",
3563 0,
"tx queues stopped");
3564 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO,
"phys_addr",
3566 "physical_address_of the queue");
3567 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"qgen",
3569 0,
"txq generation");
3570 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"hw_cidx",
3571 CTLFLAG_RD, &txq->
cidx,
3572 0,
"hardware queue cidx");
3573 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"hw_pidx",
3574 CTLFLAG_RD, &txq->
pidx,
3575 0,
"hardware queue pidx");
3576 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"dump_start",
3578 0,
"txq start idx for dump");
3579 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO,
"dump_count",
3581 0,
"txq #entries to dump");
3582 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO,
"qdump",
3583 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3585 "dump of the transmit queue");
3587 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO,
"dump_start",
3589 0,
"ctrlq start idx for dump");
3590 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO,
"dump_count",
3592 0,
"ctrl #entries to dump");
3593 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO,
"qdump",
3594 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3596 "dump of the transmit queue");
3598 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO,
"lro_queued",
3599 CTLFLAG_RD, &qs->
lro.
ctrl.lro_queued, 0, NULL);
3600 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO,
"lro_flushed",
3601 CTLFLAG_RD, &qs->
lro.
ctrl.lro_flushed, 0, NULL);
3602 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO,
"lro_bad_csum",
3603 CTLFLAG_RD, &qs->
lro.
ctrl.lro_bad_csum, 0, NULL);
3604 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO,
"lro_cnt",
3605 CTLFLAG_RD, &qs->
lro.
ctrl.lro_cnt, 0, NULL);
3609 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
"mac_stats",
3610 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"MAC statistics");
3611 poidlist = SYSCTL_CHILDREN(poid);
3625#define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3626 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, \
3627 offsetof(struct mac_stats, a), sysctl_handle_macstat, "QU", 0)
3674#undef CXGB_SYSCTL_ADD_QUAD
3676#define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3677 CTLFLAG_RD, &mstats->a, 0)
3688#undef CXGB_SYSCTL_ADD_ULONG
3704 unsigned char *data)
3713 return sizeof(
struct tx_desc);
3726 memcpy(data, &qs->
fl[qnum].
desc[idx],
sizeof(
struct rx_desc));
3727 return sizeof(
struct rx_desc);
static __inline struct sge_qset * txq_to_qset(struct sge_txq *q, int qidx)
static __inline struct sge_qset * rspq_to_qset(struct sge_rspq *q)
static __inline void t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
static __inline uint32_t t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
void cxgb_refresh_stats(struct port_info *)
#define desc_reclaimable(q)
void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val)
int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
int t3_slow_intr_handler(adapter_t *adapter)
static unsigned int core_ticks_per_usec(const adapter_t *adap)
int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable, enum sge_context_type type, int respq, u64 base_addr, unsigned int size, unsigned int token, int gen, unsigned int cidx)
int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx, u64 base_addr, unsigned int size, unsigned int fl_thres, int gen, unsigned int cidx)
int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
static int is_offload(const adapter_t *adap)
int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
#define for_each_port(adapter, iter)
int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
void t3_fatal_err(adapter_t *adapter)
int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable, u64 base_addr, unsigned int size, unsigned int esize, unsigned int cong_thres, int gen, unsigned int cidx)
int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
#define FW_CTRL_SGEEC_START
#define FW_TUNNEL_TID_START
#define FW_TUNNEL_SGEEC_START
#define FW_CTRL_TID_START
#define FW_WROPCODE_TUNNEL_TX_PKT
#define FW_OFLD_SGEEC_START
#define test_and_clear_bit(bit, p)
#define CH_ALERT(adap, fmt,...)
#define A_SG_EGR_RCQ_DRB_THRSH
#define A_SG_LO_DRB_LO_THRSH
#define F_BIGENDIANINGRESS
#define A_SG_CMDQ_CREDIT_TH
#define A_SG_RSPQ_CREDIT_RETURN
#define A_SG_HI_DRB_HI_THRSH
#define F_ENABLERXPKTTMSTPRSS
#define A_SG_LO_DRB_HI_THRSH
#define V_USERSPACESIZE(x)
#define F_ISCSICOALESCING
#define V_HIRCQDRBTHRSH(x)
#define V_LORCQDRBTHRSH(x)
#define F_RSPQCREDITOVERFOW
#define A_SG_RSPQ_FL_STATUS
#define A_SG_DRB_PRI_THRESH
#define V_HOSTPAGESIZE(x)
#define A_SG_HI_DRB_LO_THRSH
#define TX_SW_DESC_MAPPED
static __inline uint64_t check_pkt_coalesce(struct sge_qset *qs)
static int alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size, bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
static __inline unsigned int sgl_len(unsigned int n)
int cxgb_txq_buf_ring_size
int t3_sge_free(struct adapter *sc)
static __inline int process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
int t3_sge_alloc(adapter_t *sc)
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
void t3_intr_msi(void *data)
void t3_sge_prep(adapter_t *adap, struct sge_params *p)
void t3_sge_err_intr_handler(adapter_t *adapter)
void t3_add_configured_sysctls(adapter_t *sc)
#define TXQ_RING_NEEDS_ENQUEUE(qs)
static void sge_txq_reclaim_handler(void *arg, int ncount)
static __inline void make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
static int cxgb_tx_coalesce_force
static __inline void __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
static void refill_fl(adapter_t *sc, struct sge_fl *q, int n)
SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0, "size of per-queue mbuf ring")
static int t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
#define TXQ_RING_DEQUEUE_COND(qs, func, arg)
static void init_qset_cntxt(struct sge_qset *qs, u_int id)
static int cxgb_tx_coalesce_enable_start
static __inline void handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
#define TXQ_LOCK_ASSERT(qs)
static int cxgb_tx_coalesce_enable_stop
static void restart_ctrlq(void *data, int npending)
#define TX_RECLAIM_PERIOD
static __inline void __refill_fl(adapter_t *adap, struct sge_fl *fl)
static __inline void refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
static void cxgb_tx_timeout(void *arg)
int t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
#define TXQ_RING_FLUSH(qs)
static int sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
static int t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
static void t3_free_qset(adapter_t *sc, struct sge_qset *q)
static const char * txq_names[]
static __inline void check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
#define RX_SW_DESC_MAP_CREATED
static void write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs, const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
void t3_intr_msix(void *data)
static __inline int reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
static int recycle_enable
static __inline void wr_gen2(struct tx_desc *d, unsigned int gen)
void t3_add_attach_sysctls(adapter_t *sc)
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data)
static int t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
static int process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
void cxgb_tx_watchdog(void *arg)
int t3_sge_init_port(struct port_info *pi)
void t3_free_sge_resources(adapter_t *sc, int nqsets)
static int cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
static void alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
static __inline u_int flits_to_desc(u_int n)
void cxgb_qflush(struct ifnet *ifp)
#define COALESCE_STOP_MIN
void t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
static int ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
#define CXGB_SYSCTL_ADD_QUAD(a)
#define COALESCE_START_DEFAULT
#define COALESCE_START_MAX
static void recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
int cxgb_use_16k_clusters
static __inline int should_restart_tx(const struct sge_txq *q)
static void sge_timer_cb(void *arg)
static int get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
static int t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
static __inline int is_new_response(const struct rsp_desc *r, const struct sge_rspq *q)
static __inline void reclaim_completed_tx_imm(struct sge_txq *q)
static void refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
void t3_sge_stop(adapter_t *sc)
#define TXQ_RING_EMPTY(qs)
static int cxgb_tx_reclaim_threshold
static void free_rx_bufs(adapter_t *sc, struct sge_fl *q)
static void cxgb_start_locked(struct sge_qset *qs)
static void restart_tx(struct sge_qset *qs)
static void sge_slow_intr_handler(void *arg, int ncount)
static void set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
void t3_sge_init(adapter_t *adap, struct sge_params *p)
static uint8_t flit_desc_map[]
static int t3_encap(struct sge_qset *qs, struct mbuf **m)
static __inline int check_desc_avail(adapter_t *adap, struct sge_txq *q, struct mbuf *m, unsigned int ndesc, unsigned int qid)
static int coalesce_check(struct mbuf *m, void *arg)
static __inline unsigned int calc_tx_descs(const struct mbuf *m, int nsegs)
#define TXQ_RING_DEQUEUE(qs)
int t3_sge_init_adapter(adapter_t *sc)
struct sysctl_oid_list sysctl__hw_cxgb_children
#define TX_RECLAIM_DEFAULT
void t3_sge_start(adapter_t *sc)
static void txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
void t3b_intr(void *data)
static int get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, struct t3_mbuf_hdr *mh, struct rsp_desc *r)
#define COALESCE_STOP_DEFAULT
void t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
static void check_ring_db(adapter_t *adap, struct sge_qset *qs, unsigned int sleeping)
static const char * rspq_name
#define SGE_RX_DROP_THRES
static struct mbuf * cxgb_dequeue(struct sge_qset *qs)
static void sge_timer_reclaim(void *arg, int ncount)
static __inline void write_imm(struct tx_desc *d, caddr_t src, unsigned int len, unsigned int gen)
int cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
#define GET_VTAG(cntrl, m)
int t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, const struct qset_params *p, int ntxq, struct port_info *pi)
#define CXGB_SYSCTL_ADD_ULONG(a)
static int t3_dump_rspq(SYSCTL_HANDLER_ARGS)
int t3_sge_reset_adapter(adapter_t *sc)
#define SGE_RX_COPY_THRES
#define G_RSPD_TXQ1_CR(x)
#define F_RSPD_ASYNC_NOTIF
#define G_RSPD_TXQ2_CR(x)
#define F_RSPD_IMM_DATA_VALID
#define G_RSPD_TXQ0_CR(x)
#define G_RSPD_SOP_EOP(x)
#define V_LSO_TCPHDR_WORDS(x)
#define F_TXPKT_L4CSUM_DIS
#define V_LSO_IPHDR_WORDS(x)
#define V_TXPKT_OPCODE(x)
#define V_LSO_ETH_TYPE(x)
#define F_TXPKT_IPCSUM_DIS
static __inline void m_freem_list(struct mbuf *m)
int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m, bus_dma_segment_t *segs, int *nsegs)
void busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs)
char port_types[MAX_NPORTS+1]
unsigned int slow_intr_mask
struct port_info port[MAX_NPORTS]
struct callout sge_timer_ch
struct adapter_params params
struct task slow_intr_task
bus_dma_tag_t parent_dmat
bus_dma_tag_t rx_jumbo_dmat
struct cpl_tx_pkt_batch_entry pkt_entry[7]
struct task timer_reclaim_task
char namebuf[PORT_NAME_LEN]
struct link_config link_config
unsigned int coalesce_usecs
unsigned int jumbo_buf_size
unsigned int txq_size[SGE_TXQ_PER_SET]
struct rss_header rss_hdr
struct rx_sw_desc * sdesc
struct qset_params qset[SGE_QSETS]
unsigned int max_pkt_size
char namebuf[QS_NAME_LEN]
struct sge_fl fl[SGE_RXQ_PER_SET]
struct sge_txq txq[SGE_TXQ_PER_SET]
char lockbuf[RSPQ_NAME_LEN]
struct t3_mbuf_hdr rspq_mh
uint64_t txq_direct_bytes
struct tx_sw_desc * sdesc
struct callout txq_watchdog
struct sg_ent txq_sgl[TX_MAX_SEGS/2+1]
uint64_t txq_direct_packets
struct task qreclaim_task
struct sge_qset qs[SGE_QSETS]
uint64_t flit[TX_DESC_FLITS]