FreeBSD kernel CXGBE device code
t4_main.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include "opt_ddb.h"
34#include "opt_inet.h"
35#include "opt_inet6.h"
36#include "opt_kern_tls.h"
37#include "opt_ratelimit.h"
38#include "opt_rss.h"
39
40#include <sys/param.h>
41#include <sys/conf.h>
42#include <sys/priv.h>
43#include <sys/kernel.h>
44#include <sys/bus.h>
45#include <sys/eventhandler.h>
46#include <sys/module.h>
47#include <sys/malloc.h>
48#include <sys/queue.h>
49#include <sys/taskqueue.h>
50#include <sys/pciio.h>
51#include <dev/pci/pcireg.h>
52#include <dev/pci/pcivar.h>
53#include <dev/pci/pci_private.h>
54#include <sys/firmware.h>
55#include <sys/sbuf.h>
56#include <sys/smp.h>
57#include <sys/socket.h>
58#include <sys/sockio.h>
59#include <sys/sysctl.h>
60#include <net/ethernet.h>
61#include <net/if.h>
62#include <net/if_types.h>
63#include <net/if_dl.h>
64#include <net/if_vlan_var.h>
65#ifdef RSS
66#include <net/rss_config.h>
67#endif
68#include <netinet/in.h>
69#include <netinet/ip.h>
70#ifdef KERN_TLS
71#include <netinet/tcp_seq.h>
72#endif
73#if defined(__i386__) || defined(__amd64__)
74#include <machine/md_var.h>
75#include <machine/cputypes.h>
76#include <vm/vm.h>
77#include <vm/pmap.h>
78#endif
79#ifdef DDB
80#include <ddb/ddb.h>
81#include <ddb/db_lex.h>
82#endif
83
84#include "common/common.h"
85#include "common/t4_msg.h"
86#include "common/t4_regs.h"
88#include "cudbg/cudbg.h"
89#include "t4_clip.h"
90#include "t4_ioctl.h"
91#include "t4_l2t.h"
92#include "t4_mp_ring.h"
93#include "t4_if.h"
94#include "t4_smt.h"
95
96/* T4 bus driver interface */
97static int t4_probe(device_t);
98static int t4_attach(device_t);
99static int t4_detach(device_t);
100static int t4_child_location(device_t, device_t, struct sbuf *);
101static int t4_ready(device_t);
102static int t4_read_port_device(device_t, int, device_t *);
103static int t4_suspend(device_t);
104static int t4_resume(device_t);
105static int t4_reset_prepare(device_t, device_t);
106static int t4_reset_post(device_t, device_t);
107static device_method_t t4_methods[] = {
108 DEVMETHOD(device_probe, t4_probe),
109 DEVMETHOD(device_attach, t4_attach),
110 DEVMETHOD(device_detach, t4_detach),
111 DEVMETHOD(device_suspend, t4_suspend),
112 DEVMETHOD(device_resume, t4_resume),
113
114 DEVMETHOD(bus_child_location, t4_child_location),
115 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
116 DEVMETHOD(bus_reset_post, t4_reset_post),
117
118 DEVMETHOD(t4_is_main_ready, t4_ready),
120
121 DEVMETHOD_END
122};
123static driver_t t4_driver = {
124 "t4nex",
126 sizeof(struct adapter)
127};
128
129
130/* T4 port (cxgbe) interface */
131static int cxgbe_probe(device_t);
132static int cxgbe_attach(device_t);
133static int cxgbe_detach(device_t);
134device_method_t cxgbe_methods[] = {
135 DEVMETHOD(device_probe, cxgbe_probe),
136 DEVMETHOD(device_attach, cxgbe_attach),
137 DEVMETHOD(device_detach, cxgbe_detach),
138 { 0, 0 }
139};
140static driver_t cxgbe_driver = {
141 "cxgbe",
143 sizeof(struct port_info)
144};
145
146/* T4 VI (vcxgbe) interface */
147static int vcxgbe_probe(device_t);
148static int vcxgbe_attach(device_t);
149static int vcxgbe_detach(device_t);
150static device_method_t vcxgbe_methods[] = {
151 DEVMETHOD(device_probe, vcxgbe_probe),
152 DEVMETHOD(device_attach, vcxgbe_attach),
153 DEVMETHOD(device_detach, vcxgbe_detach),
154 { 0, 0 }
155};
156static driver_t vcxgbe_driver = {
157 "vcxgbe",
159 sizeof(struct vi_info)
160};
161
162static d_ioctl_t t4_ioctl;
163
164static struct cdevsw t4_cdevsw = {
165 .d_version = D_VERSION,
166 .d_ioctl = t4_ioctl,
167 .d_name = "t4nex",
168};
169
170/* T5 bus driver interface */
171static int t5_probe(device_t);
172static device_method_t t5_methods[] = {
173 DEVMETHOD(device_probe, t5_probe),
174 DEVMETHOD(device_attach, t4_attach),
175 DEVMETHOD(device_detach, t4_detach),
176 DEVMETHOD(device_suspend, t4_suspend),
177 DEVMETHOD(device_resume, t4_resume),
178
179 DEVMETHOD(bus_child_location, t4_child_location),
180 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
181 DEVMETHOD(bus_reset_post, t4_reset_post),
182
183 DEVMETHOD(t4_is_main_ready, t4_ready),
185
186 DEVMETHOD_END
187};
188static driver_t t5_driver = {
189 "t5nex",
191 sizeof(struct adapter)
192};
193
194
195/* T5 port (cxl) interface */
196static driver_t cxl_driver = {
197 "cxl",
199 sizeof(struct port_info)
200};
201
202/* T5 VI (vcxl) interface */
203static driver_t vcxl_driver = {
204 "vcxl",
206 sizeof(struct vi_info)
207};
208
209/* T6 bus driver interface */
210static int t6_probe(device_t);
211static device_method_t t6_methods[] = {
212 DEVMETHOD(device_probe, t6_probe),
213 DEVMETHOD(device_attach, t4_attach),
214 DEVMETHOD(device_detach, t4_detach),
215 DEVMETHOD(device_suspend, t4_suspend),
216 DEVMETHOD(device_resume, t4_resume),
217
218 DEVMETHOD(bus_child_location, t4_child_location),
219 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
220 DEVMETHOD(bus_reset_post, t4_reset_post),
221
222 DEVMETHOD(t4_is_main_ready, t4_ready),
224
225 DEVMETHOD_END
226};
227static driver_t t6_driver = {
228 "t6nex",
230 sizeof(struct adapter)
231};
232
233
234/* T6 port (cc) interface */
235static driver_t cc_driver = {
236 "cc",
238 sizeof(struct port_info)
239};
240
241/* T6 VI (vcc) interface */
242static driver_t vcc_driver = {
243 "vcc",
245 sizeof(struct vi_info)
246};
247
248/* ifnet interface */
249static void cxgbe_init(void *);
250static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
251static int cxgbe_transmit(struct ifnet *, struct mbuf *);
252static void cxgbe_qflush(struct ifnet *);
253#if defined(KERN_TLS) || defined(RATELIMIT)
254static int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
255 struct m_snd_tag **);
256#endif
257
258MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
259
260/*
261 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
262 * then ADAPTER_LOCK, then t4_uld_list_lock.
263 */
264static struct sx t4_list_lock;
266#ifdef TCP_OFFLOAD
267static struct sx t4_uld_list_lock;
268SLIST_HEAD(, uld_info) t4_uld_list;
269#endif
270
271/*
272 * Tunables. See tweak_tunables() too.
273 *
274 * Each tunable is set to a default value here if it's known at compile-time.
275 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
276 * provide a reasonable default (upto n) when the driver is loaded.
277 *
278 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
279 * T5 are under hw.cxl.
280 */
281SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
282 "cxgbe(4) parameters");
283SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
284 "cxgbe(4) T5+ parameters");
285SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
286 "cxgbe(4) TOE parameters");
287
288/*
289 * Number of queues for tx and rx, NIC and offload.
290 */
291#define NTXQ 16
292int t4_ntxq = -NTXQ;
293SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
294 "Number of TX queues per port");
295TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
296
297#define NRXQ 8
298int t4_nrxq = -NRXQ;
299SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
300 "Number of RX queues per port");
301TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
302
303#define NTXQ_VI 1
304static int t4_ntxq_vi = -NTXQ_VI;
305SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
306 "Number of TX queues per VI");
307
308#define NRXQ_VI 1
309static int t4_nrxq_vi = -NRXQ_VI;
310SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
311 "Number of RX queues per VI");
312
313static int t4_rsrv_noflowq = 0;
314SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
315 0, "Reserve TX queue 0 of each VI for non-flowid packets");
316
317#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
318#define NOFLDTXQ 8
319static int t4_nofldtxq = -NOFLDTXQ;
320SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
321 "Number of offload TX queues per port");
322
323#define NOFLDRXQ 2
324static int t4_nofldrxq = -NOFLDRXQ;
325SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
326 "Number of offload RX queues per port");
327
328#define NOFLDTXQ_VI 1
329static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
330SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
331 "Number of offload TX queues per VI");
332
333#define NOFLDRXQ_VI 1
334static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
335SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
336 "Number of offload RX queues per VI");
337
338#define TMR_IDX_OFLD 1
339int t4_tmr_idx_ofld = TMR_IDX_OFLD;
340SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
341 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
342
343#define PKTC_IDX_OFLD (-1)
344int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
345SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
346 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
347
348/* 0 means chip/fw default, non-zero number is value in microseconds */
349static u_long t4_toe_keepalive_idle = 0;
350SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
351 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
352
353/* 0 means chip/fw default, non-zero number is value in microseconds */
354static u_long t4_toe_keepalive_interval = 0;
355SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
356 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
357
358/* 0 means chip/fw default, non-zero number is # of keepalives before abort */
359static int t4_toe_keepalive_count = 0;
360SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
361 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
362
363/* 0 means chip/fw default, non-zero number is value in microseconds */
364static u_long t4_toe_rexmt_min = 0;
365SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
366 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
367
368/* 0 means chip/fw default, non-zero number is value in microseconds */
369static u_long t4_toe_rexmt_max = 0;
370SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
371 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
372
373/* 0 means chip/fw default, non-zero number is # of rexmt before abort */
374static int t4_toe_rexmt_count = 0;
375SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
376 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
377
378/* -1 means chip/fw default, other values are raw backoff values to use */
379static int t4_toe_rexmt_backoff[16] = {
380 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
381};
382SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
383 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
384 "cxgbe(4) TOE retransmit backoff values");
385SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
386 &t4_toe_rexmt_backoff[0], 0, "");
387SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
388 &t4_toe_rexmt_backoff[1], 0, "");
389SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
390 &t4_toe_rexmt_backoff[2], 0, "");
391SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
392 &t4_toe_rexmt_backoff[3], 0, "");
393SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
394 &t4_toe_rexmt_backoff[4], 0, "");
395SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
396 &t4_toe_rexmt_backoff[5], 0, "");
397SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
398 &t4_toe_rexmt_backoff[6], 0, "");
399SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
400 &t4_toe_rexmt_backoff[7], 0, "");
401SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
402 &t4_toe_rexmt_backoff[8], 0, "");
403SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
404 &t4_toe_rexmt_backoff[9], 0, "");
405SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
406 &t4_toe_rexmt_backoff[10], 0, "");
407SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
408 &t4_toe_rexmt_backoff[11], 0, "");
409SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
410 &t4_toe_rexmt_backoff[12], 0, "");
411SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
412 &t4_toe_rexmt_backoff[13], 0, "");
413SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
414 &t4_toe_rexmt_backoff[14], 0, "");
415SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
416 &t4_toe_rexmt_backoff[15], 0, "");
417
418static int t4_toe_tls_rx_timeout = 5;
419SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, tls_rx_timeout, CTLFLAG_RDTUN,
420 &t4_toe_tls_rx_timeout, 0,
421 "Timeout in seconds to downgrade TLS sockets to plain TOE");
422#endif
423
424#ifdef DEV_NETMAP
425#define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */
426#define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */
427static int t4_native_netmap = NN_EXTRA_VI;
428SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
429 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs");
430
431#define NNMTXQ 8
432static int t4_nnmtxq = -NNMTXQ;
433SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
434 "Number of netmap TX queues");
435
436#define NNMRXQ 8
437static int t4_nnmrxq = -NNMRXQ;
438SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
439 "Number of netmap RX queues");
440
441#define NNMTXQ_VI 2
442static int t4_nnmtxq_vi = -NNMTXQ_VI;
443SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
444 "Number of netmap TX queues per VI");
445
446#define NNMRXQ_VI 2
447static int t4_nnmrxq_vi = -NNMRXQ_VI;
448SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
449 "Number of netmap RX queues per VI");
450#endif
451
452/*
453 * Holdoff parameters for ports.
454 */
455#define TMR_IDX 1
456int t4_tmr_idx = TMR_IDX;
457SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
458 0, "Holdoff timer index");
459TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
460
461#define PKTC_IDX (-1)
463SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
464 0, "Holdoff packet counter index");
465TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
466
467/*
468 * Size (# of entries) of each tx and rx queue.
469 */
470unsigned int t4_qsize_txq = TX_EQ_QSIZE;
471SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
472 "Number of descriptors in each TX queue");
473
474unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
475SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
476 "Number of descriptors in each RX queue");
477
478/*
479 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
480 */
482SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
483 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
484
485/*
486 * Configuration file. All the _CF names here are special.
487 */
488#define DEFAULT_CF "default"
489#define BUILTIN_CF "built-in"
490#define FLASH_CF "flash"
491#define UWIRE_CF "uwire"
492#define FPGA_CF "fpga"
493static char t4_cfg_file[32] = DEFAULT_CF;
494SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
495 sizeof(t4_cfg_file), "Firmware configuration file");
496
497/*
498 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
499 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
500 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
501 * mark or when signalled to do so, 0 to never emit PAUSE.
502 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
503 * negotiated settings will override rx_pause/tx_pause.
504 * Otherwise rx_pause/tx_pause are applied forcibly.
505 */
506static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
507SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
508 &t4_pause_settings, 0,
509 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
510
511/*
512 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
513 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
514 * 0 to disable FEC.
515 */
516static int t4_fec = -1;
517SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
518 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
519
520/*
521 * Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
522 * issues to the firmware. If the firmware doesn't support FORCE_FEC then the
523 * driver runs as if this is set to 0.
524 * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
525 * 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the
526 * transceiver. Multiple FEC bits may not be okay but will be passed on to
527 * the firmware anyway (may result in l1cfg errors with old firmwares).
528 * 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO
529 * means set all FEC bits that are valid for the speed.
530 */
531static int t4_force_fec = -1;
532SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0,
533 "Controls the use of FORCE_FEC bit in L1 configuration.");
534
535/*
536 * Link autonegotiation.
537 * -1 to run with the firmware default.
538 * 0 to disable.
539 * 1 to enable.
540 */
541static int t4_autoneg = -1;
542SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
543 "Link autonegotiation");
544
545/*
546 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
547 * encouraged respectively). '-n' is the same as 'n' except the firmware
548 * version used in the checks is read from the firmware bundled with the driver.
549 */
550static int t4_fw_install = 1;
551SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
552 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
553
554/*
555 * ASIC features that will be used. Disable the ones you don't want so that the
556 * chip resources aren't wasted on features that will not be used.
557 */
558static int t4_nbmcaps_allowed = 0;
559SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
560 &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
561
562static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
563SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
564 &t4_linkcaps_allowed, 0, "Default link capabilities");
565
566static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
568SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
569 &t4_switchcaps_allowed, 0, "Default switch capabilities");
570
571#ifdef RATELIMIT
572static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
574#else
575static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
577#endif
578SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
579 &t4_niccaps_allowed, 0, "Default NIC capabilities");
580
581static int t4_toecaps_allowed = -1;
582SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
583 &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
584
585static int t4_rdmacaps_allowed = -1;
586SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
587 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
588
589static int t4_cryptocaps_allowed = -1;
590SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
591 &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
592
593static int t4_iscsicaps_allowed = -1;
594SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
595 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
596
597static int t4_fcoecaps_allowed = 0;
598SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
599 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
600
601static int t5_write_combine = 0;
602SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
603 0, "Use WC instead of UC for BAR2");
604
605static int t4_num_vis = 1;
606SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
607 "Number of VIs per port");
608
609/*
610 * PCIe Relaxed Ordering.
611 * -1: driver should figure out a good value.
612 * 0: disable RO.
613 * 1: enable RO.
614 * 2: leave RO alone.
615 */
616static int pcie_relaxed_ordering = -1;
617SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
618 &pcie_relaxed_ordering, 0,
619 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
620
621static int t4_panic_on_fatal_err = 0;
622SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN,
623 &t4_panic_on_fatal_err, 0, "panic on fatal errors");
624
625static int t4_reset_on_fatal_err = 0;
626SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
627 &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
628
629static int t4_tx_vm_wr = 0;
630SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
631 "Use VM work requests to transmit packets.");
632
633/*
634 * Set to non-zero to enable the attack filter. A packet that matches any of
635 * these conditions will get dropped on ingress:
636 * 1) IP && source address == destination address.
637 * 2) TCP/IP && source address is not a unicast address.
638 * 3) TCP/IP && destination address is not a unicast address.
639 * 4) IP && source address is loopback (127.x.y.z).
640 * 5) IP && destination address is loopback (127.x.y.z).
641 * 6) IPv6 && source address == destination address.
642 * 7) IPv6 && source address is not a unicast address.
643 * 8) IPv6 && source address is loopback (::1/128).
644 * 9) IPv6 && destination address is loopback (::1/128).
645 * 10) IPv6 && source address is unspecified (::/128).
646 * 11) IPv6 && destination address is unspecified (::/128).
647 * 12) TCP/IPv6 && source address is multicast (ff00::/8).
648 * 13) TCP/IPv6 && destination address is multicast (ff00::/8).
649 */
650static int t4_attack_filter = 0;
651SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN,
652 &t4_attack_filter, 0, "Drop suspicious traffic");
653
654static int t4_drop_ip_fragments = 0;
655SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN,
656 &t4_drop_ip_fragments, 0, "Drop IP fragments");
657
658static int t4_drop_pkts_with_l2_errors = 1;
659SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN,
660 &t4_drop_pkts_with_l2_errors, 0,
661 "Drop all frames with Layer 2 length or checksum errors");
662
663static int t4_drop_pkts_with_l3_errors = 0;
664SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN,
665 &t4_drop_pkts_with_l3_errors, 0,
666 "Drop all frames with IP version, length, or checksum errors");
667
668static int t4_drop_pkts_with_l4_errors = 0;
669SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
670 &t4_drop_pkts_with_l4_errors, 0,
671 "Drop all frames with Layer 4 length, checksum, or other errors");
672
673#ifdef TCP_OFFLOAD
674/*
675 * TOE tunables.
676 */
677static int t4_cop_managed_offloading = 0;
678SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
679 &t4_cop_managed_offloading, 0,
680 "COP (Connection Offload Policy) controls all TOE offload");
681#endif
682
683#ifdef KERN_TLS
684/*
685 * This enables KERN_TLS for all adapters if set.
686 */
687static int t4_kern_tls = 0;
688SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
689 "Enable KERN_TLS mode for all supported adapters");
690
691SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
692 "cxgbe(4) KERN_TLS parameters");
693
694static int t4_tls_inline_keys = 0;
695SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
696 &t4_tls_inline_keys, 0,
697 "Always pass TLS keys in work requests (1) or attempt to store TLS keys "
698 "in card memory.");
699
700static int t4_tls_combo_wrs = 0;
701SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
702 0, "Attempt to combine TCB field updates with TLS record work requests.");
703#endif
704
705/* Functions used by VIs to obtain unique MAC addresses for each VI. */
706static int vi_mac_funcs[] = {
714};
715
717 uint16_t intr_type; /* INTx, MSI, or MSI-X */
718 uint16_t num_vis; /* number of VIs for each port */
719 uint16_t nirq; /* Total # of vectors */
720 uint16_t ntxq; /* # of NIC txq's for each port */
721 uint16_t nrxq; /* # of NIC rxq's for each port */
722 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
723 uint16_t nofldrxq; /* # of TOE rxq's for each port */
724 uint16_t nnmtxq; /* # of netmap txq's */
725 uint16_t nnmrxq; /* # of netmap rxq's */
726
727 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
728 uint16_t ntxq_vi; /* # of NIC txq's */
729 uint16_t nrxq_vi; /* # of NIC rxq's */
730 uint16_t nofldtxq_vi; /* # of TOE txq's */
731 uint16_t nofldrxq_vi; /* # of TOE rxq's */
732 uint16_t nnmtxq_vi; /* # of netmap txq's */
733 uint16_t nnmrxq_vi; /* # of netmap rxq's */
734};
735
736static void setup_memwin(struct adapter *);
737static void position_memwin(struct adapter *, int, uint32_t);
738static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
739static int fwmtype_to_hwmtype(int);
740static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
741 uint32_t *);
742static int fixup_devlog_params(struct adapter *);
743static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
744static int contact_firmware(struct adapter *);
745static int partition_resources(struct adapter *);
746static int get_params__pre_init(struct adapter *);
747static int set_params__pre_init(struct adapter *);
748static int get_params__post_init(struct adapter *);
749static int set_params__post_init(struct adapter *);
750static void t4_set_desc(struct adapter *);
751static bool fixed_ifmedia(struct port_info *);
752static void build_medialist(struct port_info *);
753static void init_link_config(struct port_info *);
754static int fixup_link_config(struct port_info *);
755static int apply_link_config(struct port_info *);
756static int cxgbe_init_synchronized(struct vi_info *);
757static int cxgbe_uninit_synchronized(struct vi_info *);
758static int adapter_full_init(struct adapter *);
759static void adapter_full_uninit(struct adapter *);
760static int vi_full_init(struct vi_info *);
761static void vi_full_uninit(struct vi_info *);
762static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *);
763static void quiesce_txq(struct sge_txq *);
764static void quiesce_wrq(struct sge_wrq *);
765static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
766static void quiesce_vi(struct vi_info *);
767static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
768 driver_intr_t *, void *, char *);
769static int t4_free_irq(struct adapter *, struct irq *);
770static void t4_init_atid_table(struct adapter *);
771static void t4_free_atid_table(struct adapter *);
772static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
773static void vi_refresh_stats(struct vi_info *);
774static void cxgbe_refresh_stats(struct vi_info *);
775static void cxgbe_tick(void *);
776static void vi_tick(void *);
777static void cxgbe_sysctls(struct port_info *);
778static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
779static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
780static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
781static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
782static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
783static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS);
784static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
785static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
786static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
787static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
788static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
789static int sysctl_link_fec(SYSCTL_HANDLER_ARGS);
790static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
791static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
792static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
793static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
794static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
795static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
796static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
797static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
798static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
799static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
800static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
801static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
802static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
803static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
804static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
805static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
806static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
807static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
808static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
809static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
810static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
811static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
812static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
813static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
814static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
815static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
816static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
817static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
818static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
819static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
820static int sysctl_tids(SYSCTL_HANDLER_ARGS);
821static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
822static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS);
823static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
824static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
825static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
826static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
827static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
828static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
829static int sysctl_reset(SYSCTL_HANDLER_ARGS);
830#ifdef TCP_OFFLOAD
831static int sysctl_tls(SYSCTL_HANDLER_ARGS);
832static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
833static int sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS);
834static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
835static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
836static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
837static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
838static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
839static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
840static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
841#endif
842static int get_sge_context(struct adapter *, struct t4_sge_context *);
843static int load_fw(struct adapter *, struct t4_data *);
844static int load_cfg(struct adapter *, struct t4_data *);
845static int load_boot(struct adapter *, struct t4_bootrom *);
846static int load_bootcfg(struct adapter *, struct t4_data *);
847static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
848static void free_offload_policy(struct t4_offload_policy *);
849static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
850static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
851static int read_i2c(struct adapter *, struct t4_i2c_data *);
852static int clear_stats(struct adapter *, u_int);
853static int hold_clip_addr(struct adapter *, struct t4_clip_addr *);
854static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
855#ifdef TCP_OFFLOAD
856static int toe_capability(struct vi_info *, bool);
857static void t4_async_event(struct adapter *);
858#endif
859#ifdef KERN_TLS
860static int ktls_capability(struct adapter *, bool);
861#endif
862static int mod_event(module_t, int, void *);
863static int notify_siblings(device_t, int);
864static uint64_t vi_get_counter(struct ifnet *, ift_counter);
865static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
866static void enable_vxlan_rx(struct adapter *);
867static void reset_adapter_task(void *, int);
868static void fatal_error_task(void *, int);
869static void dump_devlog(struct adapter *);
870static void dump_cim_regs(struct adapter *);
871static void dump_cimla(struct adapter *);
872
873struct {
874 uint16_t device;
875 char *desc;
876} t4_pciids[] = {
877 {0xa000, "Chelsio Terminator 4 FPGA"},
878 {0x4400, "Chelsio T440-dbg"},
879 {0x4401, "Chelsio T420-CR"},
880 {0x4402, "Chelsio T422-CR"},
881 {0x4403, "Chelsio T440-CR"},
882 {0x4404, "Chelsio T420-BCH"},
883 {0x4405, "Chelsio T440-BCH"},
884 {0x4406, "Chelsio T440-CH"},
885 {0x4407, "Chelsio T420-SO"},
886 {0x4408, "Chelsio T420-CX"},
887 {0x4409, "Chelsio T420-BT"},
888 {0x440a, "Chelsio T404-BT"},
889 {0x440e, "Chelsio T440-LP-CR"},
890}, t5_pciids[] = {
891 {0xb000, "Chelsio Terminator 5 FPGA"},
892 {0x5400, "Chelsio T580-dbg"},
893 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
894 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
895 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
896 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
897 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
898 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
899 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
900 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
901 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
902 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
903 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
904 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
905 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
906 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
907 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
908 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
909 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
910
911 /* Custom */
912 {0x5483, "Custom T540-CR"},
913 {0x5484, "Custom T540-BT"},
914}, t6_pciids[] = {
915 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
916 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
917 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
918 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
919 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
920 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
921 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
922 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
923 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
924 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
925 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
926 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
927 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
928 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
929 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
930 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
931
932 /* Custom */
933 {0x6480, "Custom T6225-CR"},
934 {0x6481, "Custom T62100-CR"},
935 {0x6482, "Custom T6225-CR"},
936 {0x6483, "Custom T62100-CR"},
937 {0x6484, "Custom T64100-CR"},
938 {0x6485, "Custom T6240-SO"},
939 {0x6486, "Custom T6225-SO-CR"},
940 {0x6487, "Custom T6225-CR"},
942
943#ifdef TCP_OFFLOAD
944/*
945 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
946 * be exactly the same for both rxq and ofld_rxq.
947 */
948CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
949CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
950#endif
952
953static int
954t4_probe(device_t dev)
955{
956 int i;
957 uint16_t v = pci_get_vendor(dev);
958 uint16_t d = pci_get_device(dev);
959 uint8_t f = pci_get_function(dev);
960
961 if (v != PCI_VENDOR_ID_CHELSIO)
962 return (ENXIO);
963
964 /* Attach only to PF0 of the FPGA */
965 if (d == 0xa000 && f != 0)
966 return (ENXIO);
967
968 for (i = 0; i < nitems(t4_pciids); i++) {
969 if (d == t4_pciids[i].device) {
970 device_set_desc(dev, t4_pciids[i].desc);
971 return (BUS_PROBE_DEFAULT);
972 }
973 }
974
975 return (ENXIO);
976}
977
978static int
979t5_probe(device_t dev)
980{
981 int i;
982 uint16_t v = pci_get_vendor(dev);
983 uint16_t d = pci_get_device(dev);
984 uint8_t f = pci_get_function(dev);
985
986 if (v != PCI_VENDOR_ID_CHELSIO)
987 return (ENXIO);
988
989 /* Attach only to PF0 of the FPGA */
990 if (d == 0xb000 && f != 0)
991 return (ENXIO);
992
993 for (i = 0; i < nitems(t5_pciids); i++) {
994 if (d == t5_pciids[i].device) {
995 device_set_desc(dev, t5_pciids[i].desc);
996 return (BUS_PROBE_DEFAULT);
997 }
998 }
999
1000 return (ENXIO);
1001}
1002
1003static int
1004t6_probe(device_t dev)
1005{
1006 int i;
1007 uint16_t v = pci_get_vendor(dev);
1008 uint16_t d = pci_get_device(dev);
1009
1010 if (v != PCI_VENDOR_ID_CHELSIO)
1011 return (ENXIO);
1012
1013 for (i = 0; i < nitems(t6_pciids); i++) {
1014 if (d == t6_pciids[i].device) {
1015 device_set_desc(dev, t6_pciids[i].desc);
1016 return (BUS_PROBE_DEFAULT);
1017 }
1018 }
1019
1020 return (ENXIO);
1021}
1022
1023static void
1025{
1026 device_t root_port;
1027 uint32_t v;
1028
1029 /*
1030 * The T5 chips do not properly echo the No Snoop and Relaxed
1031 * Ordering attributes when replying to a TLP from a Root
1032 * Port. As a workaround, find the parent Root Port and
1033 * disable No Snoop and Relaxed Ordering. Note that this
1034 * affects all devices under this root port.
1035 */
1036 root_port = pci_find_pcie_root_port(dev);
1037 if (root_port == NULL) {
1038 device_printf(dev, "Unable to find parent root port\n");
1039 return;
1040 }
1041
1042 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
1043 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
1044 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
1045 0)
1046 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
1047 device_get_nameunit(root_port));
1048}
1049
1050static const struct devnames devnames[] = {
1051 {
1052 .nexus_name = "t4nex",
1053 .ifnet_name = "cxgbe",
1054 .vi_ifnet_name = "vcxgbe",
1055 .pf03_drv_name = "t4iov",
1056 .vf_nexus_name = "t4vf",
1057 .vf_ifnet_name = "cxgbev"
1058 }, {
1059 .nexus_name = "t5nex",
1060 .ifnet_name = "cxl",
1061 .vi_ifnet_name = "vcxl",
1062 .pf03_drv_name = "t5iov",
1063 .vf_nexus_name = "t5vf",
1064 .vf_ifnet_name = "cxlv"
1065 }, {
1066 .nexus_name = "t6nex",
1067 .ifnet_name = "cc",
1068 .vi_ifnet_name = "vcc",
1069 .pf03_drv_name = "t6iov",
1070 .vf_nexus_name = "t6vf",
1071 .vf_ifnet_name = "ccv"
1072 }
1073};
1074
1075void
1077{
1078 int id;
1079
1080 id = chip_id(sc);
1081 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
1082 sc->names = &devnames[id - CHELSIO_T4];
1083 else {
1084 device_printf(sc->dev, "chip id %d is not supported.\n", id);
1085 sc->names = NULL;
1086 }
1087}
1088
1089static int
1090t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
1091{
1092 const char *parent, *name;
1093 long value;
1094 int line, unit;
1095
1096 line = 0;
1097 parent = device_get_nameunit(sc->dev);
1098 name = sc->names->ifnet_name;
1099 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
1100 if (resource_long_value(name, unit, "port", &value) == 0 &&
1101 value == pi->port_id)
1102 return (unit);
1103 }
1104 return (-1);
1105}
1106
1107static int
1108t4_attach(device_t dev)
1109{
1110 struct adapter *sc;
1111 int rc = 0, i, j, rqidx, tqidx, nports;
1112 struct make_dev_args mda;
1113 struct intrs_and_queues iaq;
1114 struct sge *s;
1115 uint32_t *buf;
1116#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1117 int ofld_tqidx;
1118#endif
1119#ifdef TCP_OFFLOAD
1120 int ofld_rqidx;
1121#endif
1122#ifdef DEV_NETMAP
1123 int nm_rqidx, nm_tqidx;
1124#endif
1125 int num_vis;
1126
1127 sc = device_get_softc(dev);
1128 sc->dev = dev;
1129 sysctl_ctx_init(&sc->ctx);
1130 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
1131
1132 if ((pci_get_device(dev) & 0xff00) == 0x5400)
1134 pci_enable_busmaster(dev);
1135 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
1136 uint32_t v;
1137
1138 pci_set_max_read_req(dev, 4096);
1139 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
1140 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
1141 if (pcie_relaxed_ordering == 0 &&
1142 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
1143 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
1144 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1145 } else if (pcie_relaxed_ordering == 1 &&
1146 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
1147 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
1148 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1149 }
1150 }
1151
1154 sc->traceq = -1;
1155 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
1156 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
1157 device_get_nameunit(dev));
1158
1159 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
1160 device_get_nameunit(dev));
1161 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
1162 t4_add_adapter(sc);
1163
1164 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
1165 TAILQ_INIT(&sc->sfl);
1166 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
1167
1168 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1169
1170 sc->policy = NULL;
1171 rw_init(&sc->policy_lock, "connection offload policy");
1172
1173 callout_init(&sc->ktls_tick, 1);
1174
1175 refcount_init(&sc->vxlan_refcount, 0);
1176
1177 TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc);
1178 TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc);
1179
1180 sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1181 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq",
1182 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
1183 sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1184 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq",
1185 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue");
1186
1187 rc = t4_map_bars_0_and_4(sc);
1188 if (rc != 0)
1189 goto done; /* error message displayed already */
1190
1191 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1192
1193 /* Prepare the adapter for operation. */
1194 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1195 rc = -t4_prep_adapter(sc, buf);
1196 free(buf, M_CXGBE);
1197 if (rc != 0) {
1198 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1199 goto done;
1200 }
1201
1202 /*
1203 * This is the real PF# to which we're attaching. Works from within PCI
1204 * passthrough environments too, where pci_get_function() could return a
1205 * different PF# depending on the passthrough configuration. We need to
1206 * use the real PF# in all our communication with the firmware.
1207 */
1208 j = t4_read_reg(sc, A_PL_WHOAMI);
1209 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1210 sc->mbox = sc->pf;
1211
1212 t4_init_devnames(sc);
1213 if (sc->names == NULL) {
1214 rc = ENOTSUP;
1215 goto done; /* error message displayed already */
1216 }
1217
1218 /*
1219 * Do this really early, with the memory windows set up even before the
1220 * character device. The userland tool's register i/o and mem read
1221 * will work even in "recovery mode".
1222 */
1223 setup_memwin(sc);
1224 if (t4_init_devlog_params(sc, 0) == 0)
1226 make_dev_args_init(&mda);
1227 mda.mda_devsw = &t4_cdevsw;
1228 mda.mda_uid = UID_ROOT;
1229 mda.mda_gid = GID_WHEEL;
1230 mda.mda_mode = 0600;
1231 mda.mda_si_drv1 = sc;
1232 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1233 if (rc != 0)
1234 device_printf(dev, "failed to create nexus char device: %d.\n",
1235 rc);
1236
1237 /* Go no further if recovery mode has been requested. */
1238 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1239 device_printf(dev, "recovery mode.\n");
1240 goto done;
1241 }
1242
1243#if defined(__i386__)
1244 if ((cpu_feature & CPUID_CX8) == 0) {
1245 device_printf(dev, "64 bit atomics not available.\n");
1246 rc = ENOTSUP;
1247 goto done;
1248 }
1249#endif
1250
1251 /* Contact the firmware and try to become the master driver. */
1252 rc = contact_firmware(sc);
1253 if (rc != 0)
1254 goto done; /* error message displayed already */
1255 MPASS(sc->flags & FW_OK);
1256
1257 rc = get_params__pre_init(sc);
1258 if (rc != 0)
1259 goto done; /* error message displayed already */
1260
1261 if (sc->flags & MASTER_PF) {
1262 rc = partition_resources(sc);
1263 if (rc != 0)
1264 goto done; /* error message displayed already */
1265 t4_intr_clear(sc);
1266 }
1267
1268 rc = get_params__post_init(sc);
1269 if (rc != 0)
1270 goto done; /* error message displayed already */
1271
1272 rc = set_params__post_init(sc);
1273 if (rc != 0)
1274 goto done; /* error message displayed already */
1275
1276 rc = t4_map_bar_2(sc);
1277 if (rc != 0)
1278 goto done; /* error message displayed already */
1279
1280 rc = t4_create_dma_tag(sc);
1281 if (rc != 0)
1282 goto done; /* error message displayed already */
1283
1284 /*
1285 * First pass over all the ports - allocate VIs and initialize some
1286 * basic parameters like mac address, port type, etc.
1287 */
1288 for_each_port(sc, i) {
1289 struct port_info *pi;
1290
1291 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1292 sc->port[i] = pi;
1293
1294 /* These must be set before t4_port_init */
1295 pi->adapter = sc;
1296 pi->port_id = i;
1297 /*
1298 * XXX: vi[0] is special so we can't delay this allocation until
1299 * pi->nvi's final value is known.
1300 */
1301 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1302 M_ZERO | M_WAITOK);
1303
1304 /*
1305 * Allocate the "main" VI and initialize parameters
1306 * like mac addr.
1307 */
1308 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1309 if (rc != 0) {
1310 device_printf(dev, "unable to initialize port %d: %d\n",
1311 i, rc);
1312 free(pi->vi, M_CXGBE);
1313 free(pi, M_CXGBE);
1314 sc->port[i] = NULL;
1315 goto done;
1316 }
1317
1318 if (is_bt(pi->port_type))
1319 setbit(&sc->bt_map, pi->tx_chan);
1320 else
1321 MPASS(!isset(&sc->bt_map, pi->tx_chan));
1322
1323 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1324 device_get_nameunit(dev), i);
1325 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1326 sc->chan_map[pi->tx_chan] = i;
1327
1328 /*
1329 * The MPS counter for FCS errors doesn't work correctly on the
1330 * T6 so we use the MAC counter here. Which MAC is in use
1331 * depends on the link settings which will be known when the
1332 * link comes up.
1333 */
1334 if (is_t6(sc)) {
1335 pi->fcs_reg = -1;
1336 } else if (is_t4(sc)) {
1337 pi->fcs_reg = PORT_REG(pi->tx_chan,
1339 } else {
1340 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
1342 }
1343 pi->fcs_base = 0;
1344
1345 /* All VIs on this port share this media. */
1346 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1348
1349 PORT_LOCK(pi);
1350 init_link_config(pi);
1352 build_medialist(pi);
1353 if (fixed_ifmedia(pi))
1354 pi->flags |= FIXED_IFMEDIA;
1355 PORT_UNLOCK(pi);
1356
1357 pi->dev = device_add_child(dev, sc->names->ifnet_name,
1358 t4_ifnet_unit(sc, pi));
1359 if (pi->dev == NULL) {
1360 device_printf(dev,
1361 "failed to add device for port %d.\n", i);
1362 rc = ENXIO;
1363 goto done;
1364 }
1365 pi->vi[0].dev = pi->dev;
1366 device_set_softc(pi->dev, pi);
1367 }
1368
1369 /*
1370 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1371 */
1372 nports = sc->params.nports;
1373 rc = cfg_itype_and_nqueues(sc, &iaq);
1374 if (rc != 0)
1375 goto done; /* error message displayed already */
1376
1377 num_vis = iaq.num_vis;
1378 sc->intr_type = iaq.intr_type;
1379 sc->intr_count = iaq.nirq;
1380
1381 s = &sc->sge;
1382 s->nrxq = nports * iaq.nrxq;
1383 s->ntxq = nports * iaq.ntxq;
1384 if (num_vis > 1) {
1385 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1386 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1387 }
1388 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1389 s->neq += nports; /* ctrl queues: 1 per port */
1390 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1391#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1392 if (is_offload(sc) || is_ethoffload(sc)) {
1393 s->nofldtxq = nports * iaq.nofldtxq;
1394 if (num_vis > 1)
1395 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1396 s->neq += s->nofldtxq;
1397
1398 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq),
1399 M_CXGBE, M_ZERO | M_WAITOK);
1400 }
1401#endif
1402#ifdef TCP_OFFLOAD
1403 if (is_offload(sc)) {
1404 s->nofldrxq = nports * iaq.nofldrxq;
1405 if (num_vis > 1)
1406 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1407 s->neq += s->nofldrxq; /* free list */
1408 s->niq += s->nofldrxq;
1409
1410 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1411 M_CXGBE, M_ZERO | M_WAITOK);
1412 }
1413#endif
1414#ifdef DEV_NETMAP
1415 s->nnmrxq = 0;
1416 s->nnmtxq = 0;
1417 if (t4_native_netmap & NN_MAIN_VI) {
1418 s->nnmrxq += nports * iaq.nnmrxq;
1419 s->nnmtxq += nports * iaq.nnmtxq;
1420 }
1421 if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
1422 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
1423 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
1424 }
1425 s->neq += s->nnmtxq + s->nnmrxq;
1426 s->niq += s->nnmrxq;
1427
1428 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1429 M_CXGBE, M_ZERO | M_WAITOK);
1430 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1431 M_CXGBE, M_ZERO | M_WAITOK);
1432#endif
1433 MPASS(s->niq <= s->iqmap_sz);
1434 MPASS(s->neq <= s->eqmap_sz);
1435
1436 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1437 M_ZERO | M_WAITOK);
1438 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1439 M_ZERO | M_WAITOK);
1440 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1441 M_ZERO | M_WAITOK);
1442 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
1443 M_ZERO | M_WAITOK);
1444 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
1445 M_ZERO | M_WAITOK);
1446
1447 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1448 M_ZERO | M_WAITOK);
1449
1450 t4_init_l2t(sc, M_WAITOK);
1451 t4_init_smt(sc, M_WAITOK);
1452 t4_init_tx_sched(sc);
1454#ifdef RATELIMIT
1455 t4_init_etid_table(sc);
1456#endif
1457#ifdef INET6
1459#endif
1460 if (sc->vres.key.size != 0)
1461 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1462 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1463
1464 /*
1465 * Second pass over the ports. This time we know the number of rx and
1466 * tx queues that each port should get.
1467 */
1468 rqidx = tqidx = 0;
1469#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1470 ofld_tqidx = 0;
1471#endif
1472#ifdef TCP_OFFLOAD
1473 ofld_rqidx = 0;
1474#endif
1475#ifdef DEV_NETMAP
1476 nm_rqidx = nm_tqidx = 0;
1477#endif
1478 for_each_port(sc, i) {
1479 struct port_info *pi = sc->port[i];
1480 struct vi_info *vi;
1481
1482 if (pi == NULL)
1483 continue;
1484
1485 pi->nvi = num_vis;
1486 for_each_vi(pi, j, vi) {
1487 vi->pi = pi;
1488 vi->adapter = sc;
1489 vi->first_intr = -1;
1490 vi->qsize_rxq = t4_qsize_rxq;
1491 vi->qsize_txq = t4_qsize_txq;
1492
1493 vi->first_rxq = rqidx;
1494 vi->first_txq = tqidx;
1495 vi->tmr_idx = t4_tmr_idx;
1496 vi->pktc_idx = t4_pktc_idx;
1497 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1498 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1499
1500 rqidx += vi->nrxq;
1501 tqidx += vi->ntxq;
1502
1503 if (j == 0 && vi->ntxq > 1)
1504 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1505 else
1506 vi->rsrv_noflowq = 0;
1507
1508#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1509 vi->first_ofld_txq = ofld_tqidx;
1510 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1511 ofld_tqidx += vi->nofldtxq;
1512#endif
1513#ifdef TCP_OFFLOAD
1514 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1515 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1516 vi->first_ofld_rxq = ofld_rqidx;
1517 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1518
1519 ofld_rqidx += vi->nofldrxq;
1520#endif
1521#ifdef DEV_NETMAP
1522 vi->first_nm_rxq = nm_rqidx;
1523 vi->first_nm_txq = nm_tqidx;
1524 if (j == 0) {
1525 vi->nnmrxq = iaq.nnmrxq;
1526 vi->nnmtxq = iaq.nnmtxq;
1527 } else {
1528 vi->nnmrxq = iaq.nnmrxq_vi;
1529 vi->nnmtxq = iaq.nnmtxq_vi;
1530 }
1531 nm_rqidx += vi->nnmrxq;
1532 nm_tqidx += vi->nnmtxq;
1533#endif
1534 }
1535 }
1536
1537 rc = t4_setup_intr_handlers(sc);
1538 if (rc != 0) {
1539 device_printf(dev,
1540 "failed to setup interrupt handlers: %d\n", rc);
1541 goto done;
1542 }
1543
1544 rc = bus_generic_probe(dev);
1545 if (rc != 0) {
1546 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1547 goto done;
1548 }
1549
1550 /*
1551 * Ensure thread-safe mailbox access (in debug builds).
1552 *
1553 * So far this was the only thread accessing the mailbox but various
1554 * ifnets and sysctls are about to be created and their handlers/ioctls
1555 * will access the mailbox from different threads.
1556 */
1557 sc->flags |= CHK_MBOX_ACCESS;
1558
1559 rc = bus_generic_attach(dev);
1560 if (rc != 0) {
1561 device_printf(dev,
1562 "failed to attach all child ports: %d\n", rc);
1563 goto done;
1564 }
1565
1566 device_printf(dev,
1567 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1568 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1569 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1570 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1571 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1572
1573 t4_set_desc(sc);
1574
1575 notify_siblings(dev, 0);
1576
1577done:
1578 if (rc != 0 && sc->cdev) {
1579 /* cdev was created and so cxgbetool works; recover that way. */
1580 device_printf(dev,
1581 "error during attach, adapter is now in recovery mode.\n");
1582 rc = 0;
1583 }
1584
1585 if (rc != 0)
1587 else
1588 t4_sysctls(sc);
1589
1590 return (rc);
1591}
1592
1593static int
1594t4_child_location(device_t bus, device_t dev, struct sbuf *sb)
1595{
1596 struct adapter *sc;
1597 struct port_info *pi;
1598 int i;
1599
1600 sc = device_get_softc(bus);
1601 for_each_port(sc, i) {
1602 pi = sc->port[i];
1603 if (pi != NULL && pi->dev == dev) {
1604 sbuf_printf(sb, "port=%d", pi->port_id);
1605 break;
1606 }
1607 }
1608 return (0);
1609}
1610
1611static int
1612t4_ready(device_t dev)
1613{
1614 struct adapter *sc;
1615
1616 sc = device_get_softc(dev);
1617 if (sc->flags & FW_OK)
1618 return (0);
1619 return (ENXIO);
1620}
1621
1622static int
1623t4_read_port_device(device_t dev, int port, device_t *child)
1624{
1625 struct adapter *sc;
1626 struct port_info *pi;
1627
1628 sc = device_get_softc(dev);
1629 if (port < 0 || port >= MAX_NPORTS)
1630 return (EINVAL);
1631 pi = sc->port[port];
1632 if (pi == NULL || pi->dev == NULL)
1633 return (ENXIO);
1634 *child = pi->dev;
1635 return (0);
1636}
1637
1638static int
1639notify_siblings(device_t dev, int detaching)
1640{
1641 device_t sibling;
1642 int error, i;
1643
1644 error = 0;
1645 for (i = 0; i < PCI_FUNCMAX; i++) {
1646 if (i == pci_get_function(dev))
1647 continue;
1648 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1649 pci_get_slot(dev), i);
1650 if (sibling == NULL || !device_is_attached(sibling))
1651 continue;
1652 if (detaching)
1653 error = T4_DETACH_CHILD(sibling);
1654 else
1655 (void)T4_ATTACH_CHILD(sibling);
1656 if (error)
1657 break;
1658 }
1659 return (error);
1660}
1661
1662/*
1663 * Idempotent
1664 */
1665static int
1667{
1668 int rc;
1669
1670 rc = notify_siblings(dev, 1);
1671 if (rc) {
1672 device_printf(dev,
1673 "failed to detach sibling devices: %d\n", rc);
1674 return (rc);
1675 }
1676
1677 return (t4_detach_common(dev));
1678}
1679
1680int
1682{
1683 struct adapter *sc;
1684 struct port_info *pi;
1685 int i, rc;
1686
1687 sc = device_get_softc(dev);
1688
1689 if (sc->cdev) {
1690 destroy_dev(sc->cdev);
1691 sc->cdev = NULL;
1692 }
1693
1694 sx_xlock(&t4_list_lock);
1695 SLIST_REMOVE(&t4_list, sc, adapter, link);
1696 sx_xunlock(&t4_list_lock);
1697
1698 sc->flags &= ~CHK_MBOX_ACCESS;
1699 if (sc->flags & FULL_INIT_DONE) {
1700 if (!(sc->flags & IS_VF))
1701 t4_intr_disable(sc);
1702 }
1703
1704 if (device_is_attached(dev)) {
1705 rc = bus_generic_detach(dev);
1706 if (rc) {
1707 device_printf(dev,
1708 "failed to detach child devices: %d\n", rc);
1709 return (rc);
1710 }
1711 }
1712
1713 for (i = 0; i < sc->intr_count; i++)
1714 t4_free_irq(sc, &sc->irq[i]);
1715
1716 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1717 t4_free_tx_sched(sc);
1718
1719 for (i = 0; i < MAX_NPORTS; i++) {
1720 pi = sc->port[i];
1721 if (pi) {
1722 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1723 if (pi->dev)
1724 device_delete_child(dev, pi->dev);
1725
1726 mtx_destroy(&pi->pi_lock);
1727 free(pi->vi, M_CXGBE);
1728 free(pi, M_CXGBE);
1729 }
1730 }
1731
1732 device_delete_children(dev);
1733 sysctl_ctx_free(&sc->ctx);
1735
1736 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1737 t4_fw_bye(sc, sc->mbox);
1738
1739 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1740 pci_release_msi(dev);
1741
1742 if (sc->regs_res)
1743 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1744 sc->regs_res);
1745
1746 if (sc->udbs_res)
1747 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1748 sc->udbs_res);
1749
1750 if (sc->msix_res)
1751 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1752 sc->msix_res);
1753
1754 if (sc->l2t)
1755 t4_free_l2t(sc->l2t);
1756 if (sc->smt)
1757 t4_free_smt(sc->smt);
1759#ifdef RATELIMIT
1760 t4_free_etid_table(sc);
1761#endif
1762 if (sc->key_map)
1763 vmem_destroy(sc->key_map);
1764#ifdef INET6
1766#endif
1767
1768#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1769 free(sc->sge.ofld_txq, M_CXGBE);
1770#endif
1771#ifdef TCP_OFFLOAD
1772 free(sc->sge.ofld_rxq, M_CXGBE);
1773#endif
1774#ifdef DEV_NETMAP
1775 free(sc->sge.nm_rxq, M_CXGBE);
1776 free(sc->sge.nm_txq, M_CXGBE);
1777#endif
1778 free(sc->irq, M_CXGBE);
1779 free(sc->sge.rxq, M_CXGBE);
1780 free(sc->sge.txq, M_CXGBE);
1781 free(sc->sge.ctrlq, M_CXGBE);
1782 free(sc->sge.iqmap, M_CXGBE);
1783 free(sc->sge.eqmap, M_CXGBE);
1784 free(sc->tids.ftid_tab, M_CXGBE);
1785 free(sc->tids.hpftid_tab, M_CXGBE);
1786 free_hftid_hash(&sc->tids);
1787 free(sc->tids.tid_tab, M_CXGBE);
1788 free(sc->tt.tls_rx_ports, M_CXGBE);
1790
1791 callout_drain(&sc->ktls_tick);
1792 callout_drain(&sc->sfl_callout);
1793 if (mtx_initialized(&sc->tids.ftid_lock)) {
1794 mtx_destroy(&sc->tids.ftid_lock);
1795 cv_destroy(&sc->tids.ftid_cv);
1796 }
1797 if (mtx_initialized(&sc->tids.atid_lock))
1798 mtx_destroy(&sc->tids.atid_lock);
1799 if (mtx_initialized(&sc->ifp_lock))
1800 mtx_destroy(&sc->ifp_lock);
1801
1802 if (rw_initialized(&sc->policy_lock)) {
1803 rw_destroy(&sc->policy_lock);
1804#ifdef TCP_OFFLOAD
1805 if (sc->policy != NULL)
1807#endif
1808 }
1809
1810 for (i = 0; i < NUM_MEMWIN; i++) {
1811 struct memwin *mw = &sc->memwin[i];
1812
1813 if (rw_initialized(&mw->mw_lock))
1814 rw_destroy(&mw->mw_lock);
1815 }
1816
1817 mtx_destroy(&sc->sfl_lock);
1818 mtx_destroy(&sc->reg_lock);
1819 mtx_destroy(&sc->sc_lock);
1820
1821 bzero(sc, sizeof(*sc));
1822
1823 return (0);
1824}
1825
1826static inline bool
1828{
1829 struct tid_info *t = &sc->tids;
1830 struct port_info *pi;
1831 struct vi_info *vi;
1832 int i, j;
1833 const int caps = IFCAP_TOE | IFCAP_TXTLS | IFCAP_NETMAP | IFCAP_TXRTLMT;
1834
1836 MPASS(!(sc->flags & IS_VF));
1837
1838 for_each_port(sc, i) {
1839 pi = sc->port[i];
1840 for_each_vi(pi, j, vi) {
1841 if (vi->ifp->if_capenable & caps)
1842 return (false);
1843 }
1844 }
1845
1846 if (atomic_load_int(&t->tids_in_use) > 0)
1847 return (false);
1848 if (atomic_load_int(&t->stids_in_use) > 0)
1849 return (false);
1850 if (atomic_load_int(&t->atids_in_use) > 0)
1851 return (false);
1852 if (atomic_load_int(&t->ftids_in_use) > 0)
1853 return (false);
1854 if (atomic_load_int(&t->hpftids_in_use) > 0)
1855 return (false);
1856 if (atomic_load_int(&t->etids_in_use) > 0)
1857 return (false);
1858
1859 return (true);
1860}
1861
1862static inline int
1864{
1865 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED)))
1866 return (1); /* Already stopped. */
1867 return (t4_shutdown_adapter(sc));
1868}
1869
1870static int
1872{
1873 struct adapter *sc = device_get_softc(dev);
1874 struct port_info *pi;
1875 struct vi_info *vi;
1876 struct ifnet *ifp;
1877 struct sge_rxq *rxq;
1878 struct sge_txq *txq;
1879 struct sge_wrq *wrq;
1880#ifdef TCP_OFFLOAD
1881 struct sge_ofld_rxq *ofld_rxq;
1882#endif
1883#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1884 struct sge_ofld_txq *ofld_txq;
1885#endif
1886 int rc, i, j, k;
1887
1888 CH_ALERT(sc, "suspend requested\n");
1889
1890 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
1891 if (rc != 0)
1892 return (ENXIO);
1893
1894 /* XXX: Can the kernel call suspend repeatedly without resume? */
1895 MPASS(!hw_off_limits(sc));
1896
1897 if (!ok_to_reset(sc)) {
1898 /* XXX: should list what resource is preventing suspend. */
1899 CH_ERR(sc, "not safe to suspend.\n");
1900 rc = EBUSY;
1901 goto done;
1902 }
1903
1904 /* No more DMA or interrupts. */
1905 stop_adapter(sc);
1906
1907 /* Quiesce all activity. */
1908 for_each_port(sc, i) {
1909 pi = sc->port[i];
1910 pi->vxlan_tcam_entry = false;
1911
1912 PORT_LOCK(pi);
1913 if (pi->up_vis > 0) {
1914 /*
1915 * t4_shutdown_adapter has already shut down all the
1916 * PHYs but it also disables interrupts and DMA so there
1917 * won't be a link interrupt. So we update the state
1918 * manually and inform the kernel.
1919 */
1920 pi->link_cfg.link_ok = false;
1922 }
1923 PORT_UNLOCK(pi);
1924
1925 for_each_vi(pi, j, vi) {
1926 vi->xact_addr_filt = -1;
1927 mtx_lock(&vi->tick_mtx);
1928 vi->flags |= VI_SKIP_STATS;
1929 mtx_unlock(&vi->tick_mtx);
1930 if (!(vi->flags & VI_INIT_DONE))
1931 continue;
1932
1933 ifp = vi->ifp;
1934 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1935 mtx_lock(&vi->tick_mtx);
1936 callout_stop(&vi->tick);
1937 mtx_unlock(&vi->tick_mtx);
1938 callout_drain(&vi->tick);
1939 }
1940
1941 /*
1942 * Note that the HW is not available.
1943 */
1944 for_each_txq(vi, k, txq) {
1945 TXQ_LOCK(txq);
1946 txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED);
1947 TXQ_UNLOCK(txq);
1948 }
1949#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1950 for_each_ofld_txq(vi, k, ofld_txq) {
1951 ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
1952 }
1953#endif
1954 for_each_rxq(vi, k, rxq) {
1955 rxq->iq.flags &= ~IQ_HW_ALLOCATED;
1956 }
1957#if defined(TCP_OFFLOAD)
1958 for_each_ofld_rxq(vi, k, ofld_rxq) {
1959 ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED;
1960 }
1961#endif
1962
1963 quiesce_vi(vi);
1964 }
1965
1966 if (sc->flags & FULL_INIT_DONE) {
1967 /* Control queue */
1968 wrq = &sc->sge.ctrlq[i];
1969 wrq->eq.flags &= ~EQ_HW_ALLOCATED;
1971 }
1972 }
1973 if (sc->flags & FULL_INIT_DONE) {
1974 /* Firmware event queue */
1975 sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED;
1976 quiesce_iq_fl(sc, &sc->sge.fwq, NULL);
1977 }
1978
1979 /* Mark the adapter totally off limits. */
1980 mtx_lock(&sc->reg_lock);
1981 atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
1982 sc->flags &= ~(FW_OK | MASTER_PF);
1983 sc->reset_thread = NULL;
1984 mtx_unlock(&sc->reg_lock);
1985
1986 CH_ALERT(sc, "suspend completed.\n");
1987done:
1988 end_synchronized_op(sc, 0);
1989 return (rc);
1990}
1991
1993 u_int flags;
1994 uint16_t nbmcaps;
1995 uint16_t linkcaps;
1996 uint16_t switchcaps;
1997 uint16_t niccaps;
1998 uint16_t toecaps;
1999 uint16_t rdmacaps;
2000 uint16_t cryptocaps;
2001 uint16_t iscsicaps;
2002 uint16_t fcoecaps;
2003
2004 u_int cfcsum;
2005 char cfg_file[32];
2006
2010 struct sge sge;
2011
2014
2015};
2016
2017static void
2019{
2020
2022
2023 o->flags = sc->flags;
2024
2025 o->nbmcaps = sc->nbmcaps;
2026 o->linkcaps = sc->linkcaps;
2027 o->switchcaps = sc->switchcaps;
2028 o->niccaps = sc->niccaps;
2029 o->toecaps = sc->toecaps;
2030 o->rdmacaps = sc->rdmacaps;
2031 o->cryptocaps = sc->cryptocaps;
2032 o->iscsicaps = sc->iscsicaps;
2033 o->fcoecaps = sc->fcoecaps;
2034
2035 o->cfcsum = sc->cfcsum;
2036 MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file));
2037 memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file));
2038
2039 o->params = sc->params;
2040 o->vres = sc->vres;
2041 o->tids = sc->tids;
2042 o->sge = sc->sge;
2043
2044 o->rawf_base = sc->rawf_base;
2045 o->nrawf = sc->nrawf;
2046}
2047
2048static int
2050{
2051 int rc = 0;
2052
2054
2055 /* Capabilities */
2056#define COMPARE_CAPS(c) do { \
2057 if (o->c##caps != sc->c##caps) { \
2058 CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \
2059 sc->c##caps); \
2060 rc = EINVAL; \
2061 } \
2062} while (0)
2063 COMPARE_CAPS(nbm);
2064 COMPARE_CAPS(link);
2065 COMPARE_CAPS(switch);
2066 COMPARE_CAPS(nic);
2067 COMPARE_CAPS(toe);
2068 COMPARE_CAPS(rdma);
2069 COMPARE_CAPS(crypto);
2070 COMPARE_CAPS(iscsi);
2071 COMPARE_CAPS(fcoe);
2072#undef COMPARE_CAPS
2073
2074 /* Firmware config file */
2075 if (o->cfcsum != sc->cfcsum) {
2076 CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file,
2077 o->cfcsum, sc->cfg_file, sc->cfcsum);
2078 rc = EINVAL;
2079 }
2080
2081#define COMPARE_PARAM(p, name) do { \
2082 if (o->p != sc->p) { \
2083 CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \
2084 rc = EINVAL; \
2085 } \
2086} while (0)
2089 COMPARE_PARAM(tids.ftid_base, ftid_base);
2090 COMPARE_PARAM(tids.ftid_end, ftid_end);
2091 COMPARE_PARAM(tids.nftids, nftids);
2092 COMPARE_PARAM(vres.l2t.start, l2t_start);
2093 COMPARE_PARAM(vres.l2t.size, l2t_size);
2096 COMPARE_PARAM(tids.tid_base, tid_base);
2097 COMPARE_PARAM(tids.hpftid_base, hpftid_base);
2098 COMPARE_PARAM(tids.hpftid_end, hpftid_end);
2099 COMPARE_PARAM(tids.nhpftids, nhpftids);
2100 COMPARE_PARAM(rawf_base, rawf_base);
2101 COMPARE_PARAM(nrawf, nrawf);
2102 COMPARE_PARAM(params.mps_bg_map, mps_bg_map);
2103 COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support);
2104 COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl);
2105 COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support);
2106 COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr);
2107 COMPARE_PARAM(tids.ntids, ntids);
2108 COMPARE_PARAM(tids.etid_base, etid_base);
2109 COMPARE_PARAM(tids.etid_end, etid_end);
2110 COMPARE_PARAM(tids.netids, netids);
2111 COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred);
2112 COMPARE_PARAM(params.ethoffload, ethoffload);
2113 COMPARE_PARAM(tids.natids, natids);
2114 COMPARE_PARAM(tids.stid_base, stid_base);
2115 COMPARE_PARAM(vres.ddp.start, ddp_start);
2116 COMPARE_PARAM(vres.ddp.size, ddp_size);
2117 COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred);
2118 COMPARE_PARAM(vres.stag.start, stag_start);
2119 COMPARE_PARAM(vres.stag.size, stag_size);
2120 COMPARE_PARAM(vres.rq.start, rq_start);
2121 COMPARE_PARAM(vres.rq.size, rq_size);
2122 COMPARE_PARAM(vres.pbl.start, pbl_start);
2123 COMPARE_PARAM(vres.pbl.size, pbl_size);
2124 COMPARE_PARAM(vres.qp.start, qp_start);
2125 COMPARE_PARAM(vres.qp.size, qp_size);
2126 COMPARE_PARAM(vres.cq.start, cq_start);
2127 COMPARE_PARAM(vres.cq.size, cq_size);
2128 COMPARE_PARAM(vres.ocq.start, ocq_start);
2129 COMPARE_PARAM(vres.ocq.size, ocq_size);
2130 COMPARE_PARAM(vres.srq.start, srq_start);
2131 COMPARE_PARAM(vres.srq.size, srq_size);
2132 COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp);
2133 COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter);
2134 COMPARE_PARAM(vres.iscsi.start, iscsi_start);
2135 COMPARE_PARAM(vres.iscsi.size, iscsi_size);
2136 COMPARE_PARAM(vres.key.start, key_start);
2137 COMPARE_PARAM(vres.key.size, key_size);
2138#undef COMPARE_PARAM
2139
2140 return (rc);
2141}
2142
2143static int
2144t4_resume(device_t dev)
2145{
2146 struct adapter *sc = device_get_softc(dev);
2147 struct adapter_pre_reset_state *old_state = NULL;
2148 struct port_info *pi;
2149 struct vi_info *vi;
2150 struct ifnet *ifp;
2151 struct sge_txq *txq;
2152 int rc, i, j, k;
2153
2154 CH_ALERT(sc, "resume requested.\n");
2155
2156 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
2157 if (rc != 0)
2158 return (ENXIO);
2159 MPASS(hw_off_limits(sc));
2160 MPASS((sc->flags & FW_OK) == 0);
2161 MPASS((sc->flags & MASTER_PF) == 0);
2162 MPASS(sc->reset_thread == NULL);
2163 sc->reset_thread = curthread;
2164
2165 /* Register access is expected to work by the time we're here. */
2166 if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
2167 CH_ERR(sc, "%s: can't read device registers\n", __func__);
2168 rc = ENXIO;
2169 goto done;
2170 }
2171
2172 /* Note that HW_OFF_LIMITS is cleared a bit later. */
2173 atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR | ADAP_STOPPED);
2174
2175 /* Restore memory window. */
2176 setup_memwin(sc);
2177
2178 /* Go no further if recovery mode has been requested. */
2179 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
2180 CH_ALERT(sc, "recovery mode on resume.\n");
2181 rc = 0;
2182 mtx_lock(&sc->reg_lock);
2183 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2184 mtx_unlock(&sc->reg_lock);
2185 goto done;
2186 }
2187
2188 old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK);
2189 save_caps_and_params(sc, old_state);
2190
2191 /* Reestablish contact with firmware and become the primary PF. */
2192 rc = contact_firmware(sc);
2193 if (rc != 0)
2194 goto done; /* error message displayed already */
2195 MPASS(sc->flags & FW_OK);
2196
2197 if (sc->flags & MASTER_PF) {
2198 rc = partition_resources(sc);
2199 if (rc != 0)
2200 goto done; /* error message displayed already */
2201 t4_intr_clear(sc);
2202 }
2203
2204 rc = get_params__post_init(sc);
2205 if (rc != 0)
2206 goto done; /* error message displayed already */
2207
2208 rc = set_params__post_init(sc);
2209 if (rc != 0)
2210 goto done; /* error message displayed already */
2211
2212 rc = compare_caps_and_params(sc, old_state);
2213 if (rc != 0)
2214 goto done; /* error message displayed already */
2215
2216 for_each_port(sc, i) {
2217 pi = sc->port[i];
2218 MPASS(pi != NULL);
2219 MPASS(pi->vi != NULL);
2220 MPASS(pi->vi[0].dev == pi->dev);
2221
2222 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
2223 if (rc != 0) {
2224 CH_ERR(sc,
2225 "failed to re-initialize port %d: %d\n", i, rc);
2226 goto done;
2227 }
2228 MPASS(sc->chan_map[pi->tx_chan] == i);
2229
2230 PORT_LOCK(pi);
2232 build_medialist(pi);
2233 PORT_UNLOCK(pi);
2234 for_each_vi(pi, j, vi) {
2235 if (IS_MAIN_VI(vi))
2236 continue;
2237 rc = alloc_extra_vi(sc, pi, vi);
2238 if (rc != 0) {
2239 CH_ERR(vi,
2240 "failed to re-allocate extra VI: %d\n", rc);
2241 goto done;
2242 }
2243 }
2244 }
2245
2246 /*
2247 * Interrupts and queues are about to be enabled and other threads will
2248 * want to access the hardware too. It is safe to do so. Note that
2249 * this thread is still in the middle of a synchronized_op.
2250 */
2251 mtx_lock(&sc->reg_lock);
2252 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2253 mtx_unlock(&sc->reg_lock);
2254
2255 if (sc->flags & FULL_INIT_DONE) {
2256 rc = adapter_full_init(sc);
2257 if (rc != 0) {
2258 CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc);
2259 goto done;
2260 }
2261
2262 if (sc->vxlan_refcount > 0)
2263 enable_vxlan_rx(sc);
2264
2265 for_each_port(sc, i) {
2266 pi = sc->port[i];
2267 for_each_vi(pi, j, vi) {
2268 mtx_lock(&vi->tick_mtx);
2269 vi->flags &= ~VI_SKIP_STATS;
2270 mtx_unlock(&vi->tick_mtx);
2271 if (!(vi->flags & VI_INIT_DONE))
2272 continue;
2273 rc = vi_full_init(vi);
2274 if (rc != 0) {
2275 CH_ERR(vi, "failed to re-initialize "
2276 "interface: %d\n", rc);
2277 goto done;
2278 }
2279
2280 ifp = vi->ifp;
2281 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2282 continue;
2283 /*
2284 * Note that we do not setup multicast addresses
2285 * in the first pass. This ensures that the
2286 * unicast DMACs for all VIs on all ports get an
2287 * MPS TCAM entry.
2288 */
2290 ~XGMAC_MCADDRS);
2291 if (rc != 0) {
2292 CH_ERR(vi, "failed to re-configure MAC: %d\n", rc);
2293 goto done;
2294 }
2295 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true,
2296 true);
2297 if (rc != 0) {
2298 CH_ERR(vi, "failed to re-enable VI: %d\n", rc);
2299 goto done;
2300 }
2301 for_each_txq(vi, k, txq) {
2302 TXQ_LOCK(txq);
2303 txq->eq.flags |= EQ_ENABLED;
2304 TXQ_UNLOCK(txq);
2305 }
2306 mtx_lock(&vi->tick_mtx);
2307 callout_schedule(&vi->tick, hz);
2308 mtx_unlock(&vi->tick_mtx);
2309 }
2310 PORT_LOCK(pi);
2311 if (pi->up_vis > 0) {
2314 build_medialist(pi);
2316 if (pi->link_cfg.link_ok)
2318 }
2319 PORT_UNLOCK(pi);
2320 }
2321
2322 /* Now reprogram the L2 multicast addresses. */
2323 for_each_port(sc, i) {
2324 pi = sc->port[i];
2325 for_each_vi(pi, j, vi) {
2326 if (!(vi->flags & VI_INIT_DONE))
2327 continue;
2328 ifp = vi->ifp;
2329 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2330 continue;
2332 if (rc != 0) {
2333 CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc);
2334 rc = 0; /* carry on */
2335 }
2336 }
2337 }
2338 }
2339done:
2340 if (rc == 0) {
2341 sc->incarnation++;
2342 CH_ALERT(sc, "resume completed.\n");
2343 }
2344 end_synchronized_op(sc, 0);
2345 free(old_state, M_CXGBE);
2346 return (rc);
2347}
2348
2349static int
2350t4_reset_prepare(device_t dev, device_t child)
2351{
2352 struct adapter *sc = device_get_softc(dev);
2353
2354 CH_ALERT(sc, "reset_prepare.\n");
2355 return (0);
2356}
2357
2358static int
2359t4_reset_post(device_t dev, device_t child)
2360{
2361 struct adapter *sc = device_get_softc(dev);
2362
2363 CH_ALERT(sc, "reset_post.\n");
2364 return (0);
2365}
2366
2367static int
2369{
2370 int rc, oldinc, error_flags;
2371
2372 CH_ALERT(sc, "reset requested.\n");
2373
2374 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
2375 if (rc != 0)
2376 return (EBUSY);
2377
2378 if (hw_off_limits(sc)) {
2379 CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
2380 rc = ENXIO;
2381 goto done;
2382 }
2383
2384 if (!ok_to_reset(sc)) {
2385 /* XXX: should list what resource is preventing reset. */
2386 CH_ERR(sc, "not safe to reset.\n");
2387 rc = EBUSY;
2388 goto done;
2389 }
2390
2391done:
2392 oldinc = sc->incarnation;
2393 end_synchronized_op(sc, 0);
2394 if (rc != 0)
2395 return (rc); /* Error logged already. */
2396
2397 atomic_add_int(&sc->num_resets, 1);
2398 mtx_lock(&Giant);
2399 rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
2400 mtx_unlock(&Giant);
2401 if (rc != 0)
2402 CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
2403 else {
2404 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst2");
2405 if (rc != 0)
2406 return (EBUSY);
2407 error_flags = atomic_load_int(&sc->error_flags);
2408 if (sc->incarnation > oldinc && error_flags == 0) {
2409 CH_ALERT(sc, "bus_reset_child succeeded.\n");
2410 } else {
2411 CH_ERR(sc, "adapter did not reset properly, flags "
2412 "0x%08x, error_flags 0x%08x.\n", sc->flags,
2413 error_flags);
2414 rc = ENXIO;
2415 }
2416 end_synchronized_op(sc, 0);
2417 }
2418
2419 return (rc);
2420}
2421
2422static void
2423reset_adapter_task(void *arg, int pending)
2424{
2425 /* XXX: t4_async_event here? */
2426 reset_adapter(arg);
2427}
2428
2429static int
2431{
2432 char buf[128];
2433 struct port_info *pi = device_get_softc(dev);
2434
2435 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
2436 device_set_desc_copy(dev, buf);
2437
2438 return (BUS_PROBE_DEFAULT);
2439}
2440
2441#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
2442 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
2443 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
2444 IFCAP_HWRXTSTMP | IFCAP_MEXTPG)
2445#define T4_CAP_ENABLE (T4_CAP)
2446
2447static int
2448cxgbe_vi_attach(device_t dev, struct vi_info *vi)
2449{
2450 struct ifnet *ifp;
2451 struct sbuf *sb;
2452 struct sysctl_ctx_list *ctx = &vi->ctx;
2453 struct sysctl_oid_list *children;
2454 struct pfil_head_args pa;
2455 struct adapter *sc = vi->adapter;
2456
2457 sysctl_ctx_init(ctx);
2458 children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev));
2459 vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq",
2460 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues");
2461 vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq",
2462 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues");
2463#ifdef DEV_NETMAP
2464 vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq",
2465 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues");
2466 vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq",
2467 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues");
2468#endif
2469#ifdef TCP_OFFLOAD
2470 vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq",
2471 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues");
2472#endif
2473#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2474 vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq",
2475 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues");
2476#endif
2477
2478 vi->xact_addr_filt = -1;
2479 mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF);
2480 callout_init_mtx(&vi->tick, &vi->tick_mtx, 0);
2481 if (sc->flags & IS_VF || t4_tx_vm_wr != 0)
2482 vi->flags |= TX_USES_VM_WR;
2483
2484 /* Allocate an ifnet and set it up */
2485 ifp = if_alloc_dev(IFT_ETHER, dev);
2486 if (ifp == NULL) {
2487 device_printf(dev, "Cannot allocate ifnet\n");
2488 return (ENOMEM);
2489 }
2490 vi->ifp = ifp;
2491 ifp->if_softc = vi;
2492
2493 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2494 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2495
2496 ifp->if_init = cxgbe_init;
2497 ifp->if_ioctl = cxgbe_ioctl;
2498 ifp->if_transmit = cxgbe_transmit;
2499 ifp->if_qflush = cxgbe_qflush;
2500 if (vi->pi->nvi > 1 || sc->flags & IS_VF)
2501 ifp->if_get_counter = vi_get_counter;
2502 else
2503 ifp->if_get_counter = cxgbe_get_counter;
2504#if defined(KERN_TLS) || defined(RATELIMIT)
2505 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
2506#endif
2507#ifdef RATELIMIT
2508 ifp->if_ratelimit_query = cxgbe_ratelimit_query;
2509#endif
2510
2511 ifp->if_capabilities = T4_CAP;
2512 ifp->if_capenable = T4_CAP_ENABLE;
2513 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
2514 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
2515 if (chip_id(sc) >= CHELSIO_T6) {
2516 ifp->if_capabilities |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO;
2517 ifp->if_capenable |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO;
2518 ifp->if_hwassist |= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
2519 CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
2520 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN;
2521 }
2522
2523#ifdef TCP_OFFLOAD
2524 if (vi->nofldrxq != 0)
2525 ifp->if_capabilities |= IFCAP_TOE;
2526#endif
2527#ifdef RATELIMIT
2528 if (is_ethoffload(sc) && vi->nofldtxq != 0) {
2529 ifp->if_capabilities |= IFCAP_TXRTLMT;
2530 ifp->if_capenable |= IFCAP_TXRTLMT;
2531 }
2532#endif
2533
2534 ifp->if_hw_tsomax = IP_MAXPACKET;
2535 if (vi->flags & TX_USES_VM_WR)
2536 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO;
2537 else
2538 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
2539#ifdef RATELIMIT
2540 if (is_ethoffload(sc) && vi->nofldtxq != 0)
2541 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO;
2542#endif
2543 ifp->if_hw_tsomaxsegsize = 65536;
2544#ifdef KERN_TLS
2545 if (is_ktls(sc)) {
2546 ifp->if_capabilities |= IFCAP_TXTLS;
2547 if (sc->flags & KERN_TLS_ON)
2548 ifp->if_capenable |= IFCAP_TXTLS;
2549 }
2550#endif
2551
2552 ether_ifattach(ifp, vi->hw_addr);
2553#ifdef DEV_NETMAP
2554 if (vi->nnmrxq != 0)
2555 cxgbe_nm_attach(vi);
2556#endif
2557 sb = sbuf_new_auto();
2558 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
2559#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2560 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
2561 case IFCAP_TOE:
2562 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
2563 break;
2564 case IFCAP_TOE | IFCAP_TXRTLMT:
2565 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
2566 break;
2567 case IFCAP_TXRTLMT:
2568 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
2569 break;
2570 }
2571#endif
2572#ifdef TCP_OFFLOAD
2573 if (ifp->if_capabilities & IFCAP_TOE)
2574 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
2575#endif
2576#ifdef DEV_NETMAP
2577 if (ifp->if_capabilities & IFCAP_NETMAP)
2578 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
2579 vi->nnmtxq, vi->nnmrxq);
2580#endif
2581 sbuf_finish(sb);
2582 device_printf(dev, "%s\n", sbuf_data(sb));
2583 sbuf_delete(sb);
2584
2585 vi_sysctls(vi);
2586
2587 pa.pa_version = PFIL_VERSION;
2588 pa.pa_flags = PFIL_IN;
2589 pa.pa_type = PFIL_TYPE_ETHERNET;
2590 pa.pa_headname = ifp->if_xname;
2591 vi->pfil = pfil_head_register(&pa);
2592
2593 return (0);
2594}
2595
2596static int
2598{
2599 struct port_info *pi = device_get_softc(dev);
2600 struct adapter *sc = pi->adapter;
2601 struct vi_info *vi;
2602 int i, rc;
2603
2604 sysctl_ctx_init(&pi->ctx);
2605
2606 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
2607 if (rc)
2608 return (rc);
2609
2610 for_each_vi(pi, i, vi) {
2611 if (i == 0)
2612 continue;
2613 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
2614 if (vi->dev == NULL) {
2615 device_printf(dev, "failed to add VI %d\n", i);
2616 continue;
2617 }
2618 device_set_softc(vi->dev, vi);
2619 }
2620
2622
2623 bus_generic_attach(dev);
2624
2625 return (0);
2626}
2627
2628static void
2630{
2631 struct ifnet *ifp = vi->ifp;
2632
2633 if (vi->pfil != NULL) {
2634 pfil_head_unregister(vi->pfil);
2635 vi->pfil = NULL;
2636 }
2637
2638 ether_ifdetach(ifp);
2639
2640 /* Let detach proceed even if these fail. */
2641#ifdef DEV_NETMAP
2642 if (ifp->if_capabilities & IFCAP_NETMAP)
2643 cxgbe_nm_detach(vi);
2644#endif
2646 callout_drain(&vi->tick);
2647 sysctl_ctx_free(&vi->ctx);
2648 vi_full_uninit(vi);
2649
2650 if_free(vi->ifp);
2651 vi->ifp = NULL;
2652}
2653
2654static int
2655cxgbe_detach(device_t dev)
2656{
2657 struct port_info *pi = device_get_softc(dev);
2658 struct adapter *sc = pi->adapter;
2659 int rc;
2660
2661 /* Detach the extra VIs first. */
2662 rc = bus_generic_detach(dev);
2663 if (rc)
2664 return (rc);
2665 device_delete_children(dev);
2666
2667 sysctl_ctx_free(&pi->ctx);
2668 doom_vi(sc, &pi->vi[0]);
2669
2670 if (pi->flags & HAS_TRACEQ) {
2671 sc->traceq = -1; /* cloner should not create ifnet */
2673 }
2674
2675 cxgbe_vi_detach(&pi->vi[0]);
2676 ifmedia_removeall(&pi->media);
2677
2678 end_synchronized_op(sc, 0);
2679
2680 return (0);
2681}
2682
2683static void
2684cxgbe_init(void *arg)
2685{
2686 struct vi_info *vi = arg;
2687 struct adapter *sc = vi->adapter;
2688
2689 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
2690 return;
2692 end_synchronized_op(sc, 0);
2693}
2694
2695static int
2696cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
2697{
2698 int rc = 0, mtu, flags;
2699 struct vi_info *vi = ifp->if_softc;
2700 struct port_info *pi = vi->pi;
2701 struct adapter *sc = pi->adapter;
2702 struct ifreq *ifr = (struct ifreq *)data;
2703 uint32_t mask;
2704
2705 switch (cmd) {
2706 case SIOCSIFMTU:
2707 mtu = ifr->ifr_mtu;
2708 if (mtu < ETHERMIN || mtu > MAX_MTU)
2709 return (EINVAL);
2710
2711 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
2712 if (rc)
2713 return (rc);
2714 ifp->if_mtu = mtu;
2715 if (vi->flags & VI_INIT_DONE) {
2717 if (!hw_off_limits(sc) &&
2718 ifp->if_drv_flags & IFF_DRV_RUNNING)
2720 }
2721 end_synchronized_op(sc, 0);
2722 break;
2723
2724 case SIOCSIFFLAGS:
2725 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
2726 if (rc)
2727 return (rc);
2728
2729 if (hw_off_limits(sc)) {
2730 rc = ENXIO;
2731 goto fail;
2732 }
2733
2734 if (ifp->if_flags & IFF_UP) {
2735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2736 flags = vi->if_flags;
2737 if ((ifp->if_flags ^ flags) &
2738 (IFF_PROMISC | IFF_ALLMULTI)) {
2741 }
2742 } else {
2743 rc = cxgbe_init_synchronized(vi);
2744 }
2745 vi->if_flags = ifp->if_flags;
2746 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2748 }
2749 end_synchronized_op(sc, 0);
2750 break;
2751
2752 case SIOCADDMULTI:
2753 case SIOCDELMULTI:
2754 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
2755 if (rc)
2756 return (rc);
2757 if (!hw_off_limits(sc) && ifp->if_drv_flags & IFF_DRV_RUNNING)
2759 end_synchronized_op(sc, 0);
2760 break;
2761
2762 case SIOCSIFCAP:
2763 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
2764 if (rc)
2765 return (rc);
2766
2767 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2768 if (mask & IFCAP_TXCSUM) {
2769 ifp->if_capenable ^= IFCAP_TXCSUM;
2770 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2771
2772 if (IFCAP_TSO4 & ifp->if_capenable &&
2773 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2774 mask &= ~IFCAP_TSO4;
2775 ifp->if_capenable &= ~IFCAP_TSO4;
2776 if_printf(ifp,
2777 "tso4 disabled due to -txcsum.\n");
2778 }
2779 }
2780 if (mask & IFCAP_TXCSUM_IPV6) {
2781 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2782 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2783
2784 if (IFCAP_TSO6 & ifp->if_capenable &&
2785 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2786 mask &= ~IFCAP_TSO6;
2787 ifp->if_capenable &= ~IFCAP_TSO6;
2788 if_printf(ifp,
2789 "tso6 disabled due to -txcsum6.\n");
2790 }
2791 }
2792 if (mask & IFCAP_RXCSUM)
2793 ifp->if_capenable ^= IFCAP_RXCSUM;
2794 if (mask & IFCAP_RXCSUM_IPV6)
2795 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2796
2797 /*
2798 * Note that we leave CSUM_TSO alone (it is always set). The
2799 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
2800 * sending a TSO request our way, so it's sufficient to toggle
2801 * IFCAP_TSOx only.
2802 */
2803 if (mask & IFCAP_TSO4) {
2804 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2805 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2806 if_printf(ifp, "enable txcsum first.\n");
2807 rc = EAGAIN;
2808 goto fail;
2809 }
2810 ifp->if_capenable ^= IFCAP_TSO4;
2811 }
2812 if (mask & IFCAP_TSO6) {
2813 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2814 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2815 if_printf(ifp, "enable txcsum6 first.\n");
2816 rc = EAGAIN;
2817 goto fail;
2818 }
2819 ifp->if_capenable ^= IFCAP_TSO6;
2820 }
2821 if (mask & IFCAP_LRO) {
2822#if defined(INET) || defined(INET6)
2823 int i;
2824 struct sge_rxq *rxq;
2825
2826 ifp->if_capenable ^= IFCAP_LRO;
2827 for_each_rxq(vi, i, rxq) {
2828 if (ifp->if_capenable & IFCAP_LRO)
2830 else
2831 rxq->iq.flags &= ~IQ_LRO_ENABLED;
2832 }
2833#endif
2834 }
2835#ifdef TCP_OFFLOAD
2836 if (mask & IFCAP_TOE) {
2837 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
2838
2839 rc = toe_capability(vi, enable);
2840 if (rc != 0)
2841 goto fail;
2842
2843 ifp->if_capenable ^= mask;
2844 }
2845#endif
2846 if (mask & IFCAP_VLAN_HWTAGGING) {
2847 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2848 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2850 }
2851 if (mask & IFCAP_VLAN_MTU) {
2852 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2853
2854 /* Need to find out how to disable auto-mtu-inflation */
2855 }
2856 if (mask & IFCAP_VLAN_HWTSO)
2857 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2858 if (mask & IFCAP_VLAN_HWCSUM)
2859 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2860#ifdef RATELIMIT
2861 if (mask & IFCAP_TXRTLMT)
2862 ifp->if_capenable ^= IFCAP_TXRTLMT;
2863#endif
2864 if (mask & IFCAP_HWRXTSTMP) {
2865 int i;
2866 struct sge_rxq *rxq;
2867
2868 ifp->if_capenable ^= IFCAP_HWRXTSTMP;
2869 for_each_rxq(vi, i, rxq) {
2870 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
2872 else
2873 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
2874 }
2875 }
2876 if (mask & IFCAP_MEXTPG)
2877 ifp->if_capenable ^= IFCAP_MEXTPG;
2878
2879#ifdef KERN_TLS
2880 if (mask & IFCAP_TXTLS) {
2881 int enable = (ifp->if_capenable ^ mask) & IFCAP_TXTLS;
2882
2883 rc = ktls_capability(sc, enable);
2884 if (rc != 0)
2885 goto fail;
2886
2887 ifp->if_capenable ^= (mask & IFCAP_TXTLS);
2888 }
2889#endif
2890 if (mask & IFCAP_VXLAN_HWCSUM) {
2891 ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM;
2892 ifp->if_hwassist ^= CSUM_INNER_IP6_UDP |
2893 CSUM_INNER_IP6_TCP | CSUM_INNER_IP |
2894 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP;
2895 }
2896 if (mask & IFCAP_VXLAN_HWTSO) {
2897 ifp->if_capenable ^= IFCAP_VXLAN_HWTSO;
2898 ifp->if_hwassist ^= CSUM_INNER_IP6_TSO |
2899 CSUM_INNER_IP_TSO;
2900 }
2901
2902#ifdef VLAN_CAPABILITIES
2903 VLAN_CAPABILITIES(ifp);
2904#endif
2905fail:
2906 end_synchronized_op(sc, 0);
2907 break;
2908
2909 case SIOCSIFMEDIA:
2910 case SIOCGIFMEDIA:
2911 case SIOCGIFXMEDIA:
2912 rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
2913 break;
2914
2915 case SIOCGI2C: {
2916 struct ifi2creq i2c;
2917
2918 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2919 if (rc != 0)
2920 break;
2921 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
2922 rc = EPERM;
2923 break;
2924 }
2925 if (i2c.len > sizeof(i2c.data)) {
2926 rc = EINVAL;
2927 break;
2928 }
2929 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
2930 if (rc)
2931 return (rc);
2932 if (hw_off_limits(sc))
2933 rc = ENXIO;
2934 else
2935 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
2936 i2c.offset, i2c.len, &i2c.data[0]);
2937 end_synchronized_op(sc, 0);
2938 if (rc == 0)
2939 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2940 break;
2941 }
2942
2943 default:
2944 rc = ether_ioctl(ifp, cmd, data);
2945 }
2946
2947 return (rc);
2948}
2949
2950static int
2951cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
2952{
2953 struct vi_info *vi = ifp->if_softc;
2954 struct port_info *pi = vi->pi;
2955 struct adapter *sc;
2956 struct sge_txq *txq;
2957 void *items[1];
2958 int rc;
2959
2960 M_ASSERTPKTHDR(m);
2961 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
2962#if defined(KERN_TLS) || defined(RATELIMIT)
2963 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2964 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2965#endif
2966
2967 if (__predict_false(pi->link_cfg.link_ok == false)) {
2968 m_freem(m);
2969 return (ENETDOWN);
2970 }
2971
2972 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR);
2973 if (__predict_false(rc != 0)) {
2974 MPASS(m == NULL); /* was freed already */
2975 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
2976 return (rc);
2977 }
2978#ifdef RATELIMIT
2979 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2980 if (m->m_pkthdr.snd_tag->sw->type == IF_SND_TAG_TYPE_RATE_LIMIT)
2981 return (ethofld_transmit(ifp, m));
2982 }
2983#endif
2984
2985 /* Select a txq. */
2986 sc = vi->adapter;
2987 txq = &sc->sge.txq[vi->first_txq];
2988 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2989 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
2990 vi->rsrv_noflowq);
2991
2992 items[0] = m;
2993 rc = mp_ring_enqueue(txq->r, items, 1, 256);
2994 if (__predict_false(rc != 0))
2995 m_freem(m);
2996
2997 return (rc);
2998}
2999
3000static void
3001cxgbe_qflush(struct ifnet *ifp)
3002{
3003 struct vi_info *vi = ifp->if_softc;
3004 struct sge_txq *txq;
3005 int i;
3006
3007 /* queues do not exist if !VI_INIT_DONE. */
3008 if (vi->flags & VI_INIT_DONE) {
3009 for_each_txq(vi, i, txq) {
3010 TXQ_LOCK(txq);
3011 txq->eq.flags |= EQ_QFLUSH;
3012 TXQ_UNLOCK(txq);
3013 while (!mp_ring_is_idle(txq->r)) {
3014 mp_ring_check_drainage(txq->r, 4096);
3015 pause("qflush", 1);
3016 }
3017 TXQ_LOCK(txq);
3018 txq->eq.flags &= ~EQ_QFLUSH;
3019 TXQ_UNLOCK(txq);
3020 }
3021 }
3022 if_qflush(ifp);
3023}
3024
3025static uint64_t
3026vi_get_counter(struct ifnet *ifp, ift_counter c)
3027{
3028 struct vi_info *vi = ifp->if_softc;
3029 struct fw_vi_stats_vf *s = &vi->stats;
3030
3031 mtx_lock(&vi->tick_mtx);
3032 vi_refresh_stats(vi);
3033 mtx_unlock(&vi->tick_mtx);
3034
3035 switch (c) {
3036 case IFCOUNTER_IPACKETS:
3037 return (s->rx_bcast_frames + s->rx_mcast_frames +
3038 s->rx_ucast_frames);
3039 case IFCOUNTER_IERRORS:
3040 return (s->rx_err_frames);
3041 case IFCOUNTER_OPACKETS:
3042 return (s->tx_bcast_frames + s->tx_mcast_frames +
3043 s->tx_ucast_frames + s->tx_offload_frames);
3044 case IFCOUNTER_OERRORS:
3045 return (s->tx_drop_frames);
3046 case IFCOUNTER_IBYTES:
3047 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
3048 s->rx_ucast_bytes);
3049 case IFCOUNTER_OBYTES:
3050 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
3051 s->tx_ucast_bytes + s->tx_offload_bytes);
3052 case IFCOUNTER_IMCASTS:
3053 return (s->rx_mcast_frames);
3054 case IFCOUNTER_OMCASTS:
3055 return (s->tx_mcast_frames);
3056 case IFCOUNTER_OQDROPS: {
3057 uint64_t drops;
3058
3059 drops = 0;
3060 if (vi->flags & VI_INIT_DONE) {
3061 int i;
3062 struct sge_txq *txq;
3063
3064 for_each_txq(vi, i, txq)
3065 drops += counter_u64_fetch(txq->r->dropped);
3066 }
3067
3068 return (drops);
3069
3070 }
3071
3072 default:
3073 return (if_get_counter_default(ifp, c));
3074 }
3075}
3076
3077static uint64_t
3078cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
3079{
3080 struct vi_info *vi = ifp->if_softc;
3081 struct port_info *pi = vi->pi;
3082 struct port_stats *s = &pi->stats;
3083
3084 mtx_lock(&vi->tick_mtx);
3086 mtx_unlock(&vi->tick_mtx);
3087
3088 switch (c) {
3089 case IFCOUNTER_IPACKETS:
3090 return (s->rx_frames);
3091
3092 case IFCOUNTER_IERRORS:
3093 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
3094 s->rx_fcs_err + s->rx_len_err);
3095
3096 case IFCOUNTER_OPACKETS:
3097 return (s->tx_frames);
3098
3099 case IFCOUNTER_OERRORS:
3100 return (s->tx_error_frames);
3101
3102 case IFCOUNTER_IBYTES:
3103 return (s->rx_octets);
3104
3105 case IFCOUNTER_OBYTES:
3106 return (s->tx_octets);
3107
3108 case IFCOUNTER_IMCASTS:
3109 return (s->rx_mcast_frames);
3110
3111 case IFCOUNTER_OMCASTS:
3112 return (s->tx_mcast_frames);
3113
3114 case IFCOUNTER_IQDROPS:
3115 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3116 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3117 s->rx_trunc3 + pi->tnl_cong_drops);
3118
3119 case IFCOUNTER_OQDROPS: {
3120 uint64_t drops;
3121
3122 drops = s->tx_drop;
3123 if (vi->flags & VI_INIT_DONE) {
3124 int i;
3125 struct sge_txq *txq;
3126
3127 for_each_txq(vi, i, txq)
3128 drops += counter_u64_fetch(txq->r->dropped);
3129 }
3130
3131 return (drops);
3132
3133 }
3134
3135 default:
3136 return (if_get_counter_default(ifp, c));
3137 }
3138}
3139
3140#if defined(KERN_TLS) || defined(RATELIMIT)
3141static int
3142cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
3143 struct m_snd_tag **pt)
3144{
3145 int error;
3146
3147 switch (params->hdr.type) {
3148#ifdef RATELIMIT
3149 case IF_SND_TAG_TYPE_RATE_LIMIT:
3150 error = cxgbe_rate_tag_alloc(ifp, params, pt);
3151 break;
3152#endif
3153#ifdef KERN_TLS
3154 case IF_SND_TAG_TYPE_TLS:
3155 error = cxgbe_tls_tag_alloc(ifp, params, pt);
3156 break;
3157#endif
3158 default:
3159 error = EOPNOTSUPP;
3160 }
3161 return (error);
3162}
3163#endif
3164
3165/*
3166 * The kernel picks a media from the list we had provided but we still validate
3167 * the requeste.
3168 */
3169int
3171{
3172 struct vi_info *vi = ifp->if_softc;
3173 struct port_info *pi = vi->pi;
3174 struct ifmedia *ifm = &pi->media;
3175 struct link_config *lc = &pi->link_cfg;
3176 struct adapter *sc = pi->adapter;
3177 int rc;
3178
3179 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
3180 if (rc != 0)
3181 return (rc);
3182 PORT_LOCK(pi);
3183 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
3184 /* ifconfig .. media autoselect */
3185 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
3186 rc = ENOTSUP; /* AN not supported by transceiver */
3187 goto done;
3188 }
3190 lc->requested_speed = 0;
3192 } else {
3194 lc->requested_speed =
3195 ifmedia_baudrate(ifm->ifm_media) / 1000000;
3196 lc->requested_fc = 0;
3197 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
3198 lc->requested_fc |= PAUSE_RX;
3199 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
3200 lc->requested_fc |= PAUSE_TX;
3201 }
3202 if (pi->up_vis > 0 && !hw_off_limits(sc)) {
3204 rc = apply_link_config(pi);
3205 }
3206done:
3207 PORT_UNLOCK(pi);
3208 end_synchronized_op(sc, 0);
3209 return (rc);
3210}
3211
3212/*
3213 * Base media word (without ETHER, pause, link active, etc.) for the port at the
3214 * given speed.
3215 */
3216static int
3217port_mword(struct port_info *pi, uint32_t speed)
3218{
3219
3220 MPASS(speed & M_FW_PORT_CAP32_SPEED);
3221 MPASS(powerof2(speed));
3222
3223 switch(pi->port_type) {
3227 /* BaseT */
3228 switch (speed) {
3230 return (IFM_100_T);
3232 return (IFM_1000_T);
3234 return (IFM_10G_T);
3235 }
3236 break;
3237 case FW_PORT_TYPE_KX4:
3238 if (speed == FW_PORT_CAP32_SPEED_10G)
3239 return (IFM_10G_KX4);
3240 break;
3241 case FW_PORT_TYPE_CX4:
3242 if (speed == FW_PORT_CAP32_SPEED_10G)
3243 return (IFM_10G_CX4);
3244 break;
3245 case FW_PORT_TYPE_KX:
3246 if (speed == FW_PORT_CAP32_SPEED_1G)
3247 return (IFM_1000_KX);
3248 break;
3249 case FW_PORT_TYPE_KR:
3250 case FW_PORT_TYPE_BP_AP:
3256 switch (speed) {
3258 return (IFM_1000_KX);
3260 return (IFM_10G_KR);
3262 return (IFM_25G_KR);
3264 return (IFM_40G_KR4);
3266 return (IFM_50G_KR2);
3268 return (IFM_100G_KR4);
3269 }
3270 break;
3273 case FW_PORT_TYPE_SFP:
3275 case FW_PORT_TYPE_QSA:
3276 case FW_PORT_TYPE_QSFP:
3280 case FW_PORT_TYPE_SFP28:
3281 /* Pluggable transceiver */
3282 switch (pi->mod_type) {
3284 switch (speed) {
3286 return (IFM_1000_LX);
3288 return (IFM_10G_LR);
3290 return (IFM_25G_LR);
3292 return (IFM_40G_LR4);
3294 return (IFM_50G_LR2);
3296 return (IFM_100G_LR4);
3297 }
3298 break;
3300 switch (speed) {
3302 return (IFM_1000_SX);
3304 return (IFM_10G_SR);
3306 return (IFM_25G_SR);
3308 return (IFM_40G_SR4);
3310 return (IFM_50G_SR2);
3312 return (IFM_100G_SR4);
3313 }
3314 break;
3316 if (speed == FW_PORT_CAP32_SPEED_10G)
3317 return (IFM_10G_ER);
3318 break;
3321 switch (speed) {
3323 return (IFM_1000_CX);
3325 return (IFM_10G_TWINAX);
3327 return (IFM_25G_CR);
3329 return (IFM_40G_CR4);
3331 return (IFM_50G_CR2);
3333 return (IFM_100G_CR4);
3334 }
3335 break;
3337 if (speed == FW_PORT_CAP32_SPEED_10G)
3338 return (IFM_10G_LRM);
3339 break;
3341 MPASS(0); /* Not pluggable? */
3342 /* fall throough */
3346 break;
3348 return (IFM_NONE);
3349 }
3350 break;
3351 case FW_PORT_TYPE_NONE:
3352 return (IFM_NONE);
3353 }
3354
3355 return (IFM_UNKNOWN);
3356}
3357
3358void
3359cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3360{
3361 struct vi_info *vi = ifp->if_softc;
3362 struct port_info *pi = vi->pi;
3363 struct adapter *sc = pi->adapter;
3364 struct link_config *lc = &pi->link_cfg;
3365
3366 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
3367 return;
3368 PORT_LOCK(pi);
3369
3370 if (pi->up_vis == 0 && !hw_off_limits(sc)) {
3371 /*
3372 * If all the interfaces are administratively down the firmware
3373 * does not report transceiver changes. Refresh port info here
3374 * so that ifconfig displays accurate ifmedia at all times.
3375 * This is the only reason we have a synchronized op in this
3376 * function. Just PORT_LOCK would have been enough otherwise.
3377 */
3379 build_medialist(pi);
3380 }
3381
3382 /* ifm_status */
3383 ifmr->ifm_status = IFM_AVALID;
3384 if (lc->link_ok == false)
3385 goto done;
3386 ifmr->ifm_status |= IFM_ACTIVE;
3387
3388 /* ifm_active */
3389 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3390 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
3391 if (lc->fc & PAUSE_RX)
3392 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3393 if (lc->fc & PAUSE_TX)
3394 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3395 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
3396done:
3397 PORT_UNLOCK(pi);
3398 end_synchronized_op(sc, 0);
3399}
3400
3401static int
3402vcxgbe_probe(device_t dev)
3403{
3404 char buf[128];
3405 struct vi_info *vi = device_get_softc(dev);
3406
3407 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
3408 vi - vi->pi->vi);
3409 device_set_desc_copy(dev, buf);
3410
3411 return (BUS_PROBE_DEFAULT);
3412}
3413
3414static int
3415alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
3416{
3417 int func, index, rc;
3418 uint32_t param, val;
3419
3421
3422 index = vi - pi->vi;
3423 MPASS(index > 0); /* This function deals with _extra_ VIs only */
3424 KASSERT(index < nitems(vi_mac_funcs),
3425 ("%s: VI %s doesn't have a MAC func", __func__,
3426 device_get_nameunit(vi->dev)));
3427 func = vi_mac_funcs[index];
3428 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
3429 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
3430 if (rc < 0) {
3431 CH_ERR(vi, "failed to allocate virtual interface %d"
3432 "for port %d: %d\n", index, pi->port_id, -rc);
3433 return (-rc);
3434 }
3435 vi->viid = rc;
3436
3437 if (vi->rss_size == 1) {
3438 /*
3439 * This VI didn't get a slice of the RSS table. Reduce the
3440 * number of VIs being created (hw.cxgbe.num_vis) or modify the
3441 * configuration file (nvi, rssnvi for this PF) if this is a
3442 * problem.
3443 */
3444 device_printf(vi->dev, "RSS table not available.\n");
3445 vi->rss_base = 0xffff;
3446
3447 return (0);
3448 }
3449
3453 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3454 if (rc)
3455 vi->rss_base = 0xffff;
3456 else {
3457 MPASS((val >> 16) == vi->rss_size);
3458 vi->rss_base = val & 0xffff;
3459 }
3460
3461 return (0);
3462}
3463
3464static int
3466{
3467 struct vi_info *vi;
3468 struct port_info *pi;
3469 struct adapter *sc;
3470 int rc;
3471
3472 vi = device_get_softc(dev);
3473 pi = vi->pi;
3474 sc = pi->adapter;
3475
3476 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
3477 if (rc)
3478 return (rc);
3479 rc = alloc_extra_vi(sc, pi, vi);
3480 end_synchronized_op(sc, 0);
3481 if (rc)
3482 return (rc);
3483
3484 rc = cxgbe_vi_attach(dev, vi);
3485 if (rc) {
3486 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3487 return (rc);
3488 }
3489 return (0);
3490}
3491
3492static int
3494{
3495 struct vi_info *vi;
3496 struct adapter *sc;
3497
3498 vi = device_get_softc(dev);
3499 sc = vi->adapter;
3500
3501 doom_vi(sc, vi);
3502
3503 cxgbe_vi_detach(vi);
3504 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3505
3506 end_synchronized_op(sc, 0);
3507
3508 return (0);
3509}
3510
3511static struct callout fatal_callout;
3512static struct taskqueue *reset_tq;
3513
3514static void
3516{
3517 struct adapter *sc = arg;
3518
3519 panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
3520}
3521
3522static void
3523fatal_error_task(void *arg, int pending)
3524{
3525 struct adapter *sc = arg;
3526 int rc;
3527
3528#ifdef TCP_OFFLOAD
3529 t4_async_event(sc);
3530#endif
3531 if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
3532 dump_cim_regs(sc);
3533 dump_cimla(sc);
3534 dump_devlog(sc);
3535 }
3536
3537 if (t4_reset_on_fatal_err) {
3538 CH_ALERT(sc, "resetting on fatal error.\n");
3539 rc = reset_adapter(sc);
3540 if (rc == 0 && t4_panic_on_fatal_err) {
3541 CH_ALERT(sc, "reset was successful, "
3542 "system will NOT panic.\n");
3543 return;
3544 }
3545 }
3546
3547 if (t4_panic_on_fatal_err) {
3548 CH_ALERT(sc, "panicking on fatal error (after 30s).\n");
3549 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
3550 }
3551}
3552
3553void
3554t4_fatal_err(struct adapter *sc, bool fw_error)
3555{
3556 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
3557
3558 stop_adapter(sc);
3559 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
3560 return;
3561 if (fw_error) {
3562 /*
3563 * We are here because of a firmware error/timeout and not
3564 * because of a hardware interrupt. It is possible (although
3565 * not very likely) that an error interrupt was also raised but
3566 * this thread ran first and inhibited t4_intr_err. We walk the
3567 * main INT_CAUSE registers here to make sure we haven't missed
3568 * anything interesting.
3569 */
3570 t4_slow_intr_handler(sc, verbose);
3571 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
3572 }
3574 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n",
3575 device_get_nameunit(sc->dev), fw_error);
3576 taskqueue_enqueue(reset_tq, &sc->fatal_error_task);
3577}
3578
3579void
3581{
3582 sx_xlock(&t4_list_lock);
3583 SLIST_INSERT_HEAD(&t4_list, sc, link);
3584 sx_xunlock(&t4_list_lock);
3585}
3586
3587int
3589{
3590 sc->regs_rid = PCIR_BAR(0);
3591 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3592 &sc->regs_rid, RF_ACTIVE);
3593 if (sc->regs_res == NULL) {
3594 device_printf(sc->dev, "cannot map registers.\n");
3595 return (ENXIO);
3596 }
3597 sc->bt = rman_get_bustag(sc->regs_res);
3598 sc->bh = rman_get_bushandle(sc->regs_res);
3599 sc->mmio_len = rman_get_size(sc->regs_res);
3600 setbit(&sc->doorbells, DOORBELL_KDB);
3601
3602 sc->msix_rid = PCIR_BAR(4);
3603 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3604 &sc->msix_rid, RF_ACTIVE);
3605 if (sc->msix_res == NULL) {
3606 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
3607 return (ENXIO);
3608 }
3609
3610 return (0);
3611}
3612
3613int
3615{
3616
3617 /*
3618 * T4: only iWARP driver uses the userspace doorbells. There is no need
3619 * to map it if RDMA is disabled.
3620 */
3621 if (is_t4(sc) && sc->rdmacaps == 0)
3622 return (0);
3623
3624 sc->udbs_rid = PCIR_BAR(2);
3625 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3626 &sc->udbs_rid, RF_ACTIVE);
3627 if (sc->udbs_res == NULL) {
3628 device_printf(sc->dev, "cannot map doorbell BAR.\n");
3629 return (ENXIO);
3630 }
3631 sc->udbs_base = rman_get_virtual(sc->udbs_res);
3632
3633 if (chip_id(sc) >= CHELSIO_T5) {
3634 setbit(&sc->doorbells, DOORBELL_UDB);
3635#if defined(__i386__) || defined(__amd64__)
3636 if (t5_write_combine) {
3637 int rc, mode;
3638
3639 /*
3640 * Enable write combining on BAR2. This is the
3641 * userspace doorbell BAR and is split into 128B
3642 * (UDBS_SEG_SIZE) doorbell regions, each associated
3643 * with an egress queue. The first 64B has the doorbell
3644 * and the second 64B can be used to submit a tx work
3645 * request with an implicit doorbell.
3646 */
3647
3648 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
3649 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
3650 if (rc == 0) {
3651 clrbit(&sc->doorbells, DOORBELL_UDB);
3652 setbit(&sc->doorbells, DOORBELL_WCWR);
3653 setbit(&sc->doorbells, DOORBELL_UDBWC);
3654 } else {
3655 device_printf(sc->dev,
3656 "couldn't enable write combining: %d\n",
3657 rc);
3658 }
3659
3660 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
3662 V_STATSOURCE_T5(7) | mode);
3663 }
3664#endif
3665 }
3666 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
3667
3668 return (0);
3669}
3670
3672 uint32_t base;
3673 uint32_t aperture;
3674};
3675
3676static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
3680};
3681
3682static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
3686};
3687
3688static void
3690{
3691 const struct memwin_init *mw_init;
3692 struct memwin *mw;
3693 int i;
3694 uint32_t bar0;
3695
3696 if (is_t4(sc)) {
3697 /*
3698 * Read low 32b of bar0 indirectly via the hardware backdoor
3699 * mechanism. Works from within PCI passthrough environments
3700 * too, where rman_get_start() can return a different value. We
3701 * need to program the T4 memory window decoders with the actual
3702 * addresses that will be coming across the PCIe link.
3703 */
3704 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
3705 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
3706
3707 mw_init = &t4_memwin[0];
3708 } else {
3709 /* T5+ use the relative offset inside the PCIe BAR */
3710 bar0 = 0;
3711
3712 mw_init = &t5_memwin[0];
3713 }
3714
3715 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
3716 if (!rw_initialized(&mw->mw_lock)) {
3717 rw_init(&mw->mw_lock, "memory window access");
3718 mw->mw_base = mw_init->base;
3719 mw->mw_aperture = mw_init->aperture;
3720 mw->mw_curpos = 0;
3721 }
3722 t4_write_reg(sc,
3724 (mw->mw_base + bar0) | V_BIR(0) |
3725 V_WINDOW(ilog2(mw->mw_aperture) - 10));
3726 rw_wlock(&mw->mw_lock);
3727 position_memwin(sc, i, mw->mw_curpos);
3728 rw_wunlock(&mw->mw_lock);
3729 }
3730
3731 /* flush */
3733}
3734
3735/*
3736 * Positions the memory window at the given address in the card's address space.
3737 * There are some alignment requirements and the actual position may be at an
3738 * address prior to the requested address. mw->mw_curpos always has the actual
3739 * position of the window.
3740 */
3741static void
3742position_memwin(struct adapter *sc, int idx, uint32_t addr)
3743{
3744 struct memwin *mw;
3745 uint32_t pf;
3746 uint32_t reg;
3747
3748 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3749 mw = &sc->memwin[idx];
3750 rw_assert(&mw->mw_lock, RA_WLOCKED);
3751
3752 if (is_t4(sc)) {
3753 pf = 0;
3754 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
3755 } else {
3756 pf = V_PFNUM(sc->pf);
3757 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
3758 }
3760 t4_write_reg(sc, reg, mw->mw_curpos | pf);
3761 t4_read_reg(sc, reg); /* flush */
3762}
3763
3764int
3765rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
3766 int len, int rw)
3767{
3768 struct memwin *mw;
3769 uint32_t mw_end, v;
3770
3771 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3772
3773 /* Memory can only be accessed in naturally aligned 4 byte units */
3774 if (addr & 3 || len & 3 || len <= 0)
3775 return (EINVAL);
3776
3777 mw = &sc->memwin[idx];
3778 while (len > 0) {
3779 rw_rlock(&mw->mw_lock);
3780 mw_end = mw->mw_curpos + mw->mw_aperture;
3781 if (addr >= mw_end || addr < mw->mw_curpos) {
3782 /* Will need to reposition the window */
3783 if (!rw_try_upgrade(&mw->mw_lock)) {
3784 rw_runlock(&mw->mw_lock);
3785 rw_wlock(&mw->mw_lock);
3786 }
3787 rw_assert(&mw->mw_lock, RA_WLOCKED);
3788 position_memwin(sc, idx, addr);
3789 rw_downgrade(&mw->mw_lock);
3790 mw_end = mw->mw_curpos + mw->mw_aperture;
3791 }
3792 rw_assert(&mw->mw_lock, RA_RLOCKED);
3793 while (addr < mw_end && len > 0) {
3794 if (rw == 0) {
3795 v = t4_read_reg(sc, mw->mw_base + addr -
3796 mw->mw_curpos);
3797 *val++ = le32toh(v);
3798 } else {
3799 v = *val++;
3800 t4_write_reg(sc, mw->mw_base + addr -
3801 mw->mw_curpos, htole32(v));
3802 }
3803 addr += 4;
3804 len -= 4;
3805 }
3806 rw_runlock(&mw->mw_lock);
3807 }
3808
3809 return (0);
3810}
3811
3812static void
3814{
3815 struct tid_info *t;
3816 int i;
3817
3818 t = &sc->tids;
3819 if (t->natids == 0)
3820 return;
3821
3822 MPASS(t->atid_tab == NULL);
3823
3824 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
3825 M_ZERO | M_WAITOK);
3826 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
3827 t->afree = t->atid_tab;
3828 t->atids_in_use = 0;
3829 for (i = 1; i < t->natids; i++)
3830 t->atid_tab[i - 1].next = &t->atid_tab[i];
3831 t->atid_tab[t->natids - 1].next = NULL;
3832}
3833
3834static void
3836{
3837 struct tid_info *t;
3838
3839 t = &sc->tids;
3840
3841 KASSERT(t->atids_in_use == 0,
3842 ("%s: %d atids still in use.", __func__, t->atids_in_use));
3843
3844 if (mtx_initialized(&t->atid_lock))
3845 mtx_destroy(&t->atid_lock);
3846 free(t->atid_tab, M_CXGBE);
3847 t->atid_tab = NULL;
3848}
3849
3850int
3851alloc_atid(struct adapter *sc, void *ctx)
3852{
3853 struct tid_info *t = &sc->tids;
3854 int atid = -1;
3855
3856 mtx_lock(&t->atid_lock);
3857 if (t->afree) {
3858 union aopen_entry *p = t->afree;
3859
3860 atid = p - t->atid_tab;
3861 MPASS(atid <= M_TID_TID);
3862 t->afree = p->next;
3863 p->data = ctx;
3864 t->atids_in_use++;
3865 }
3866 mtx_unlock(&t->atid_lock);
3867 return (atid);
3868}
3869
3870void *
3871lookup_atid(struct adapter *sc, int atid)
3872{
3873 struct tid_info *t = &sc->tids;
3874
3875 return (t->atid_tab[atid].data);
3876}
3877
3878void
3879free_atid(struct adapter *sc, int atid)
3880{
3881 struct tid_info *t = &sc->tids;
3882 union aopen_entry *p = &t->atid_tab[atid];
3883
3884 mtx_lock(&t->atid_lock);
3885 p->next = t->afree;
3886 t->afree = p;
3887 t->atids_in_use--;
3888 mtx_unlock(&t->atid_lock);
3889}
3890
3891static void
3892queue_tid_release(struct adapter *sc, int tid)
3893{
3894
3895 CXGBE_UNIMPLEMENTED("deferred tid release");
3896}
3897
3898void
3899release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
3900{
3901 struct wrqe *wr;
3902 struct cpl_tid_release *req;
3903
3904 wr = alloc_wrqe(sizeof(*req), ctrlq);
3905 if (wr == NULL) {
3906 queue_tid_release(sc, tid); /* defer */
3907 return;
3908 }
3909 req = wrtod(wr);
3910
3912
3913 t4_wrq_tx(sc, wr);
3914}
3915
3916static int
3917t4_range_cmp(const void *a, const void *b)
3918{
3919 return ((const struct t4_range *)a)->start -
3920 ((const struct t4_range *)b)->start;
3921}
3922
3923/*
3924 * Verify that the memory range specified by the addr/len pair is valid within
3925 * the card's address space.
3926 */
3927static int
3928validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
3929{
3930 struct t4_range mem_ranges[4], *r, *next;
3931 uint32_t em, addr_len;
3932 int i, n, remaining;
3933
3934 /* Memory can only be accessed in naturally aligned 4 byte units */
3935 if (addr & 3 || len & 3 || len == 0)
3936 return (EINVAL);
3937
3938 /* Enabled memories */
3940
3941 r = &mem_ranges[0];
3942 n = 0;
3943 bzero(r, sizeof(mem_ranges));
3944 if (em & F_EDRAM0_ENABLE) {
3945 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3946 r->size = G_EDRAM0_SIZE(addr_len) << 20;
3947 if (r->size > 0) {
3948 r->start = G_EDRAM0_BASE(addr_len) << 20;
3949 if (addr >= r->start &&
3950 addr + len <= r->start + r->size)
3951 return (0);
3952 r++;
3953 n++;
3954 }
3955 }
3956 if (em & F_EDRAM1_ENABLE) {
3957 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3958 r->size = G_EDRAM1_SIZE(addr_len) << 20;
3959 if (r->size > 0) {
3960 r->start = G_EDRAM1_BASE(addr_len) << 20;
3961 if (addr >= r->start &&
3962 addr + len <= r->start + r->size)
3963 return (0);
3964 r++;
3965 n++;
3966 }
3967 }
3968 if (em & F_EXT_MEM_ENABLE) {
3969 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3970 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
3971 if (r->size > 0) {
3972 r->start = G_EXT_MEM_BASE(addr_len) << 20;
3973 if (addr >= r->start &&
3974 addr + len <= r->start + r->size)
3975 return (0);
3976 r++;
3977 n++;
3978 }
3979 }
3980 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
3981 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3982 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
3983 if (r->size > 0) {
3984 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
3985 if (addr >= r->start &&
3986 addr + len <= r->start + r->size)
3987 return (0);
3988 r++;
3989 n++;
3990 }
3991 }
3992 MPASS(n <= nitems(mem_ranges));
3993
3994 if (n > 1) {
3995 /* Sort and merge the ranges. */
3996 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
3997
3998 /* Start from index 0 and examine the next n - 1 entries. */
3999 r = &mem_ranges[0];
4000 for (remaining = n - 1; remaining > 0; remaining--, r++) {
4001
4002 MPASS(r->size > 0); /* r is a valid entry. */
4003 next = r + 1;
4004 MPASS(next->size > 0); /* and so is the next one. */
4005
4006 while (r->start + r->size >= next->start) {
4007 /* Merge the next one into the current entry. */
4008 r->size = max(r->start + r->size,
4009 next->start + next->size) - r->start;
4010 n--; /* One fewer entry in total. */
4011 if (--remaining == 0)
4012 goto done; /* short circuit */
4013 next++;
4014 }
4015 if (next != r + 1) {
4016 /*
4017 * Some entries were merged into r and next
4018 * points to the first valid entry that couldn't
4019 * be merged.
4020 */
4021 MPASS(next->size > 0); /* must be valid */
4022 memcpy(r + 1, next, remaining * sizeof(*r));
4023#ifdef INVARIANTS
4024 /*
4025 * This so that the foo->size assertion in the
4026 * next iteration of the loop do the right
4027 * thing for entries that were pulled up and are
4028 * no longer valid.
4029 */
4030 MPASS(n < nitems(mem_ranges));
4031 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
4032 sizeof(struct t4_range));
4033#endif
4034 }
4035 }
4036done:
4037 /* Done merging the ranges. */
4038 MPASS(n > 0);
4039 r = &mem_ranges[0];
4040 for (i = 0; i < n; i++, r++) {
4041 if (addr >= r->start &&
4042 addr + len <= r->start + r->size)
4043 return (0);
4044 }
4045 }
4046
4047 return (EFAULT);
4048}
4049
4050static int
4052{
4053
4054 switch (mtype) {
4055 case FW_MEMTYPE_EDC0:
4056 return (MEM_EDC0);
4057 case FW_MEMTYPE_EDC1:
4058 return (MEM_EDC1);
4059 case FW_MEMTYPE_EXTMEM:
4060 return (MEM_MC0);
4061 case FW_MEMTYPE_EXTMEM1:
4062 return (MEM_MC1);
4063 default:
4064 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
4065 }
4066}
4067
4068/*
4069 * Verify that the memory range specified by the memtype/offset/len pair is
4070 * valid and lies entirely within the memtype specified. The global address of
4071 * the start of the range is returned in addr.
4072 */
4073static int
4074validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
4075 uint32_t *addr)
4076{
4077 uint32_t em, addr_len, maddr;
4078
4079 /* Memory can only be accessed in naturally aligned 4 byte units */
4080 if (off & 3 || len & 3 || len == 0)
4081 return (EINVAL);
4082
4084 switch (fwmtype_to_hwmtype(mtype)) {
4085 case MEM_EDC0:
4086 if (!(em & F_EDRAM0_ENABLE))
4087 return (EINVAL);
4088 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4089 maddr = G_EDRAM0_BASE(addr_len) << 20;
4090 break;
4091 case MEM_EDC1:
4092 if (!(em & F_EDRAM1_ENABLE))
4093 return (EINVAL);
4094 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4095 maddr = G_EDRAM1_BASE(addr_len) << 20;
4096 break;
4097 case MEM_MC:
4098 if (!(em & F_EXT_MEM_ENABLE))
4099 return (EINVAL);
4100 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4101 maddr = G_EXT_MEM_BASE(addr_len) << 20;
4102 break;
4103 case MEM_MC1:
4104 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
4105 return (EINVAL);
4106 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
4107 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
4108 break;
4109 default:
4110 return (EINVAL);
4111 }
4112
4113 *addr = maddr + off; /* global address */
4114 return (validate_mem_range(sc, *addr, len));
4115}
4116
4117static int
4119{
4120 struct devlog_params *dparams = &sc->params.devlog;
4121 int rc;
4122
4123 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
4124 dparams->size, &dparams->addr);
4125
4126 return (rc);
4127}
4128
4129static void
4130update_nirq(struct intrs_and_queues *iaq, int nports)
4131{
4132
4133 iaq->nirq = T4_EXTRA_INTR;
4134 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
4135 iaq->nirq += nports * iaq->nofldrxq;
4136 iaq->nirq += nports * (iaq->num_vis - 1) *
4137 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
4138 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
4139}
4140
4141/*
4142 * Adjust requirements to fit the number of interrupts available.
4143 */
4144static void
4145calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
4146 int navail)
4147{
4148 int old_nirq;
4149 const int nports = sc->params.nports;
4150
4151 MPASS(nports > 0);
4152 MPASS(navail > 0);
4153
4154 bzero(iaq, sizeof(*iaq));
4155 iaq->intr_type = itype;
4156 iaq->num_vis = t4_num_vis;
4157 iaq->ntxq = t4_ntxq;
4158 iaq->ntxq_vi = t4_ntxq_vi;
4159 iaq->nrxq = t4_nrxq;
4160 iaq->nrxq_vi = t4_nrxq_vi;
4161#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4162 if (is_offload(sc) || is_ethoffload(sc)) {
4163 iaq->nofldtxq = t4_nofldtxq;
4164 iaq->nofldtxq_vi = t4_nofldtxq_vi;
4165 }
4166#endif
4167#ifdef TCP_OFFLOAD
4168 if (is_offload(sc)) {
4169 iaq->nofldrxq = t4_nofldrxq;
4170 iaq->nofldrxq_vi = t4_nofldrxq_vi;
4171 }
4172#endif
4173#ifdef DEV_NETMAP
4174 if (t4_native_netmap & NN_MAIN_VI) {
4175 iaq->nnmtxq = t4_nnmtxq;
4176 iaq->nnmrxq = t4_nnmrxq;
4177 }
4178 if (t4_native_netmap & NN_EXTRA_VI) {
4179 iaq->nnmtxq_vi = t4_nnmtxq_vi;
4180 iaq->nnmrxq_vi = t4_nnmrxq_vi;
4181 }
4182#endif
4183
4184 update_nirq(iaq, nports);
4185 if (iaq->nirq <= navail &&
4186 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4187 /*
4188 * This is the normal case -- there are enough interrupts for
4189 * everything.
4190 */
4191 goto done;
4192 }
4193
4194 /*
4195 * If extra VIs have been configured try reducing their count and see if
4196 * that works.
4197 */
4198 while (iaq->num_vis > 1) {
4199 iaq->num_vis--;
4200 update_nirq(iaq, nports);
4201 if (iaq->nirq <= navail &&
4202 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4203 device_printf(sc->dev, "virtual interfaces per port "
4204 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
4205 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
4206 "itype %d, navail %u, nirq %d.\n",
4207 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
4208 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
4209 itype, navail, iaq->nirq);
4210 goto done;
4211 }
4212 }
4213
4214 /*
4215 * Extra VIs will not be created. Log a message if they were requested.
4216 */
4217 MPASS(iaq->num_vis == 1);
4218 iaq->ntxq_vi = iaq->nrxq_vi = 0;
4219 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
4220 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
4221 if (iaq->num_vis != t4_num_vis) {
4222 device_printf(sc->dev, "extra virtual interfaces disabled. "
4223 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
4224 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
4225 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
4226 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
4227 }
4228
4229 /*
4230 * Keep reducing the number of NIC rx queues to the next lower power of
4231 * 2 (for even RSS distribution) and halving the TOE rx queues and see
4232 * if that works.
4233 */
4234 do {
4235 if (iaq->nrxq > 1) {
4236 do {
4237 iaq->nrxq--;
4238 } while (!powerof2(iaq->nrxq));
4239 if (iaq->nnmrxq > iaq->nrxq)
4240 iaq->nnmrxq = iaq->nrxq;
4241 }
4242 if (iaq->nofldrxq > 1)
4243 iaq->nofldrxq >>= 1;
4244
4245 old_nirq = iaq->nirq;
4246 update_nirq(iaq, nports);
4247 if (iaq->nirq <= navail &&
4248 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4249 device_printf(sc->dev, "running with reduced number of "
4250 "rx queues because of shortage of interrupts. "
4251 "nrxq=%u, nofldrxq=%u. "
4252 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
4253 iaq->nofldrxq, itype, navail, iaq->nirq);
4254 goto done;
4255 }
4256 } while (old_nirq != iaq->nirq);
4257
4258 /* One interrupt for everything. Ugh. */
4259 device_printf(sc->dev, "running with minimal number of queues. "
4260 "itype %d, navail %u.\n", itype, navail);
4261 iaq->nirq = 1;
4262 iaq->nrxq = 1;
4263 iaq->ntxq = 1;
4264 if (iaq->nofldrxq > 0) {
4265 iaq->nofldrxq = 1;
4266 iaq->nofldtxq = 1;
4267 }
4268 iaq->nnmtxq = 0;
4269 iaq->nnmrxq = 0;
4270done:
4271 MPASS(iaq->num_vis > 0);
4272 if (iaq->num_vis > 1) {
4273 MPASS(iaq->nrxq_vi > 0);
4274 MPASS(iaq->ntxq_vi > 0);
4275 }
4276 MPASS(iaq->nirq > 0);
4277 MPASS(iaq->nrxq > 0);
4278 MPASS(iaq->ntxq > 0);
4279 if (itype == INTR_MSI) {
4280 MPASS(powerof2(iaq->nirq));
4281 }
4282}
4283
4284static int
4286{
4287 int rc, itype, navail, nalloc;
4288
4289 for (itype = INTR_MSIX; itype; itype >>= 1) {
4290
4291 if ((itype & t4_intr_types) == 0)
4292 continue; /* not allowed */
4293
4294 if (itype == INTR_MSIX)
4295 navail = pci_msix_count(sc->dev);
4296 else if (itype == INTR_MSI)
4297 navail = pci_msi_count(sc->dev);
4298 else
4299 navail = 1;
4300restart:
4301 if (navail == 0)
4302 continue;
4303
4304 calculate_iaq(sc, iaq, itype, navail);
4305 nalloc = iaq->nirq;
4306 rc = 0;
4307 if (itype == INTR_MSIX)
4308 rc = pci_alloc_msix(sc->dev, &nalloc);
4309 else if (itype == INTR_MSI)
4310 rc = pci_alloc_msi(sc->dev, &nalloc);
4311
4312 if (rc == 0 && nalloc > 0) {
4313 if (nalloc == iaq->nirq)
4314 return (0);
4315
4316 /*
4317 * Didn't get the number requested. Use whatever number
4318 * the kernel is willing to allocate.
4319 */
4320 device_printf(sc->dev, "fewer vectors than requested, "
4321 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
4322 itype, iaq->nirq, nalloc);
4323 pci_release_msi(sc->dev);
4324 navail = nalloc;
4325 goto restart;
4326 }
4327
4328 device_printf(sc->dev,
4329 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
4330 itype, rc, iaq->nirq, nalloc);
4331 }
4332
4333 device_printf(sc->dev,
4334 "failed to find a usable interrupt type. "
4335 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
4336 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
4337
4338 return (ENXIO);
4339}
4340
4341#define FW_VERSION(chip) ( \
4342 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
4343 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
4344 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
4345 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
4346#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
4347
4348/* Just enough of fw_hdr to cover all version info. */
4349struct fw_h {
4363};
4364/* Spot check a couple of fields. */
4365CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
4366CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
4367CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
4368
4369struct fw_info {
4370 uint8_t chip;
4373 struct fw_h fw_h;
4374} fw_info[] = {
4375 {
4376 .chip = CHELSIO_T4,
4377 .kld_name = "t4fw_cfg",
4378 .fw_mod_name = "t4fw",
4379 .fw_h = {
4380 .chip = FW_HDR_CHIP_T4,
4381 .fw_ver = htobe32(FW_VERSION(T4)),
4382 .intfver_nic = FW_INTFVER(T4, NIC),
4383 .intfver_vnic = FW_INTFVER(T4, VNIC),
4384 .intfver_ofld = FW_INTFVER(T4, OFLD),
4385 .intfver_ri = FW_INTFVER(T4, RI),
4386 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
4387 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4388 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
4389 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4390 },
4391 }, {
4392 .chip = CHELSIO_T5,
4393 .kld_name = "t5fw_cfg",
4394 .fw_mod_name = "t5fw",
4395 .fw_h = {
4396 .chip = FW_HDR_CHIP_T5,
4397 .fw_ver = htobe32(FW_VERSION(T5)),
4398 .intfver_nic = FW_INTFVER(T5, NIC),
4399 .intfver_vnic = FW_INTFVER(T5, VNIC),
4400 .intfver_ofld = FW_INTFVER(T5, OFLD),
4401 .intfver_ri = FW_INTFVER(T5, RI),
4402 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
4403 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4404 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
4405 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4406 },
4407 }, {
4408 .chip = CHELSIO_T6,
4409 .kld_name = "t6fw_cfg",
4410 .fw_mod_name = "t6fw",
4411 .fw_h = {
4412 .chip = FW_HDR_CHIP_T6,
4413 .fw_ver = htobe32(FW_VERSION(T6)),
4414 .intfver_nic = FW_INTFVER(T6, NIC),
4415 .intfver_vnic = FW_INTFVER(T6, VNIC),
4416 .intfver_ofld = FW_INTFVER(T6, OFLD),
4417 .intfver_ri = FW_INTFVER(T6, RI),
4418 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4419 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4420 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4421 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4422 },
4423 }
4425
4426static struct fw_info *
4428{
4429 int i;
4430
4431 for (i = 0; i < nitems(fw_info); i++) {
4432 if (fw_info[i].chip == chip)
4433 return (&fw_info[i]);
4434 }
4435 return (NULL);
4436}
4437
4438/*
4439 * Is the given firmware API compatible with the one the driver was compiled
4440 * with?
4441 */
4442static int
4443fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
4444{
4445
4446 /* short circuit if it's the exact same firmware version */
4447 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
4448 return (1);
4449
4450 /*
4451 * XXX: Is this too conservative? Perhaps I should limit this to the
4452 * features that are supported in the driver.
4453 */
4454#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
4455 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
4456 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
4457 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
4458 return (1);
4459#undef SAME_INTF
4460
4461 return (0);
4462}
4463
4464static int
4465load_fw_module(struct adapter *sc, const struct firmware **dcfg,
4466 const struct firmware **fw)
4467{
4468 struct fw_info *fw_info;
4469
4470 *dcfg = NULL;
4471 if (fw != NULL)
4472 *fw = NULL;
4473
4475 if (fw_info == NULL) {
4476 device_printf(sc->dev,
4477 "unable to look up firmware information for chip %d.\n",
4478 chip_id(sc));
4479 return (EINVAL);
4480 }
4481
4482 *dcfg = firmware_get(fw_info->kld_name);
4483 if (*dcfg != NULL) {
4484 if (fw != NULL)
4485 *fw = firmware_get(fw_info->fw_mod_name);
4486 return (0);
4487 }
4488
4489 return (ENOENT);
4490}
4491
4492static void
4493unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
4494 const struct firmware *fw)
4495{
4496
4497 if (fw != NULL)
4498 firmware_put(fw, FIRMWARE_UNLOAD);
4499 if (dcfg != NULL)
4500 firmware_put(dcfg, FIRMWARE_UNLOAD);
4501}
4502
4503/*
4504 * Return values:
4505 * 0 means no firmware install attempted.
4506 * ERESTART means a firmware install was attempted and was successful.
4507 * +ve errno means a firmware install was attempted but failed.
4508 */
4509static int
4510install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
4511 const struct fw_h *drv_fw, const char *reason, int *already)
4512{
4513 const struct firmware *cfg, *fw;
4514 const uint32_t c = be32toh(card_fw->fw_ver);
4515 uint32_t d, k;
4516 int rc, fw_install;
4517 struct fw_h bundled_fw;
4518 bool load_attempted;
4519
4520 cfg = fw = NULL;
4521 load_attempted = false;
4522 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
4523
4524 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
4525 if (t4_fw_install < 0) {
4526 rc = load_fw_module(sc, &cfg, &fw);
4527 if (rc != 0 || fw == NULL) {
4528 device_printf(sc->dev,
4529 "failed to load firmware module: %d. cfg %p, fw %p;"
4530 " will use compiled-in firmware version for"
4531 "hw.cxgbe.fw_install checks.\n",
4532 rc, cfg, fw);
4533 } else {
4534 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
4535 }
4536 load_attempted = true;
4537 }
4538 d = be32toh(bundled_fw.fw_ver);
4539
4540 if (reason != NULL)
4541 goto install;
4542
4543 if ((sc->flags & FW_OK) == 0) {
4544
4545 if (c == 0xffffffff) {
4546 reason = "missing";
4547 goto install;
4548 }
4549
4550 rc = 0;
4551 goto done;
4552 }
4553
4554 if (!fw_compatible(card_fw, &bundled_fw)) {
4555 reason = "incompatible or unusable";
4556 goto install;
4557 }
4558
4559 if (d > c) {
4560 reason = "older than the version bundled with this driver";
4561 goto install;
4562 }
4563
4564 if (fw_install == 2 && d != c) {
4565 reason = "different than the version bundled with this driver";
4566 goto install;
4567 }
4568
4569 /* No reason to do anything to the firmware already on the card. */
4570 rc = 0;
4571 goto done;
4572
4573install:
4574 rc = 0;
4575 if ((*already)++)
4576 goto done;
4577
4578 if (fw_install == 0) {
4579 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4580 "but the driver is prohibited from installing a firmware "
4581 "on the card.\n",
4584
4585 goto done;
4586 }
4587
4588 /*
4589 * We'll attempt to install a firmware. Load the module first (if it
4590 * hasn't been loaded already).
4591 */
4592 if (!load_attempted) {
4593 rc = load_fw_module(sc, &cfg, &fw);
4594 if (rc != 0 || fw == NULL) {
4595 device_printf(sc->dev,
4596 "failed to load firmware module: %d. cfg %p, fw %p\n",
4597 rc, cfg, fw);
4598 /* carry on */
4599 }
4600 }
4601 if (fw == NULL) {
4602 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4603 "but the driver cannot take corrective action because it "
4604 "is unable to load the firmware module.\n",
4607 rc = sc->flags & FW_OK ? 0 : ENOENT;
4608 goto done;
4609 }
4610 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
4611 if (k != d) {
4612 MPASS(t4_fw_install > 0);
4613 device_printf(sc->dev,
4614 "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
4615 "expecting (%u.%u.%u.%u) and will not be used.\n",
4620 rc = sc->flags & FW_OK ? 0 : EINVAL;
4621 goto done;
4622 }
4623
4624 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4625 "installing firmware %u.%u.%u.%u on card.\n",
4630
4631 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
4632 if (rc != 0) {
4633 device_printf(sc->dev, "failed to install firmware: %d\n", rc);
4634 } else {
4635 /* Installed successfully, update the cached header too. */
4636 rc = ERESTART;
4637 memcpy(card_fw, fw->data, sizeof(*card_fw));
4638 }
4639done:
4640 unload_fw_module(sc, cfg, fw);
4641
4642 return (rc);
4643}
4644
4645/*
4646 * Establish contact with the firmware and attempt to become the master driver.
4647 *
4648 * A firmware will be installed to the card if needed (if the driver is allowed
4649 * to do so).
4650 */
4651static int
4653{
4654 int rc, already = 0;
4655 enum dev_state state;
4656 struct fw_info *fw_info;
4657 struct fw_hdr *card_fw; /* fw on the card */
4658 const struct fw_h *drv_fw;
4659
4661 if (fw_info == NULL) {
4662 device_printf(sc->dev,
4663 "unable to look up firmware information for chip %d.\n",
4664 chip_id(sc));
4665 return (EINVAL);
4666 }
4667 drv_fw = &fw_info->fw_h;
4668
4669 /* Read the header of the firmware on the card */
4670 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
4671restart:
4672 rc = -t4_get_fw_hdr(sc, card_fw);
4673 if (rc != 0) {
4674 device_printf(sc->dev,
4675 "unable to read firmware header from card's flash: %d\n",
4676 rc);
4677 goto done;
4678 }
4679
4680 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
4681 &already);
4682 if (rc == ERESTART)
4683 goto restart;
4684 if (rc != 0)
4685 goto done;
4686
4687 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
4688 if (rc < 0 || state == DEV_STATE_ERR) {
4689 rc = -rc;
4690 device_printf(sc->dev,
4691 "failed to connect to the firmware: %d, %d. "
4692 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4693#if 0
4694 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4695 "not responding properly to HELLO", &already) == ERESTART)
4696 goto restart;
4697#endif
4698 goto done;
4699 }
4700 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
4701 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
4702
4703 if (rc == sc->pf) {
4704 sc->flags |= MASTER_PF;
4705 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4706 NULL, &already);
4707 if (rc == ERESTART)
4708 rc = 0;
4709 else if (rc != 0)
4710 goto done;
4711 } else if (state == DEV_STATE_UNINIT) {
4712 /*
4713 * We didn't get to be the master so we definitely won't be
4714 * configuring the chip. It's a bug if someone else hasn't
4715 * configured it already.
4716 */
4717 device_printf(sc->dev, "couldn't be master(%d), "
4718 "device not already initialized either(%d). "
4719 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4720 rc = EPROTO;
4721 goto done;
4722 } else {
4723 /*
4724 * Some other PF is the master and has configured the chip.
4725 * This is allowed but untested.
4726 */
4727 device_printf(sc->dev, "PF%d is master, device state %d. "
4728 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4729 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
4730 sc->cfcsum = 0;
4731 rc = 0;
4732 }
4733done:
4734 if (rc != 0 && sc->flags & FW_OK) {
4735 t4_fw_bye(sc, sc->mbox);
4736 sc->flags &= ~FW_OK;
4737 }
4738 free(card_fw, M_CXGBE);
4739 return (rc);
4740}
4741
4742static int
4743copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
4744 uint32_t mtype, uint32_t moff)
4745{
4746 struct fw_info *fw_info;
4747 const struct firmware *dcfg, *rcfg = NULL;
4748 const uint32_t *cfdata;
4749 uint32_t cflen, addr;
4750 int rc;
4751
4752 load_fw_module(sc, &dcfg, NULL);
4753
4754 /* Card specific interpretation of "default". */
4755 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4756 if (pci_get_device(sc->dev) == 0x440a)
4757 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
4758 if (is_fpga(sc))
4759 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
4760 }
4761
4762 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4763 if (dcfg == NULL) {
4764 device_printf(sc->dev,
4765 "KLD with default config is not available.\n");
4766 rc = ENOENT;
4767 goto done;
4768 }
4769 cfdata = dcfg->data;
4770 cflen = dcfg->datasize & ~3;
4771 } else {
4772 char s[32];
4773
4775 if (fw_info == NULL) {
4776 device_printf(sc->dev,
4777 "unable to look up firmware information for chip %d.\n",
4778 chip_id(sc));
4779 rc = EINVAL;
4780 goto done;
4781 }
4782 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
4783
4784 rcfg = firmware_get(s);
4785 if (rcfg == NULL) {
4786 device_printf(sc->dev,
4787 "unable to load module \"%s\" for configuration "
4788 "profile \"%s\".\n", s, cfg_file);
4789 rc = ENOENT;
4790 goto done;
4791 }
4792 cfdata = rcfg->data;
4793 cflen = rcfg->datasize & ~3;
4794 }
4795
4796 if (cflen > FLASH_CFG_MAX_SIZE) {
4797 device_printf(sc->dev,
4798 "config file too long (%d, max allowed is %d).\n",
4799 cflen, FLASH_CFG_MAX_SIZE);
4800 rc = EINVAL;
4801 goto done;
4802 }
4803
4804 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
4805 if (rc != 0) {
4806 device_printf(sc->dev,
4807 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
4808 __func__, mtype, moff, cflen, rc);
4809 rc = EINVAL;
4810 goto done;
4811 }
4812 write_via_memwin(sc, 2, addr, cfdata, cflen);
4813done:
4814 if (rcfg != NULL)
4815 firmware_put(rcfg, FIRMWARE_UNLOAD);
4816 unload_fw_module(sc, dcfg, NULL);
4817 return (rc);
4818}
4819
4821 uint16_t nbmcaps;
4822 uint16_t linkcaps;
4823 uint16_t switchcaps;
4824 uint16_t niccaps;
4825 uint16_t toecaps;
4826 uint16_t rdmacaps;
4827 uint16_t cryptocaps;
4828 uint16_t iscsicaps;
4829 uint16_t fcoecaps;
4830};
4831
4832#define FW_PARAM_DEV(param) \
4833 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4834 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4835#define FW_PARAM_PFVF(param) \
4836 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4837 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
4838
4839/*
4840 * Provide a configuration profile to the firmware and have it initialize the
4841 * chip accordingly. This may involve uploading a configuration file to the
4842 * card.
4843 */
4844static int
4845apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
4846 const struct caps_allowed *caps_allowed)
4847{
4848 int rc;
4849 struct fw_caps_config_cmd caps;
4850 uint32_t mtype, moff, finicsum, cfcsum, param, val;
4851
4852 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
4853 if (rc != 0) {
4854 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
4855 return (rc);
4856 }
4857
4858 bzero(&caps, sizeof(caps));
4861 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
4862 mtype = 0;
4863 moff = 0;
4864 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4865 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
4866 mtype = FW_MEMTYPE_FLASH;
4867 moff = t4_flash_cfg_addr(sc);
4871 FW_LEN16(caps));
4872 } else {
4873 /*
4874 * Ask the firmware where it wants us to upload the config file.
4875 */
4876 param = FW_PARAM_DEV(CF);
4877 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4878 if (rc != 0) {
4879 /* No support for config file? Shouldn't happen. */
4880 device_printf(sc->dev,
4881 "failed to query config file location: %d.\n", rc);
4882 goto done;
4883 }
4884 mtype = G_FW_PARAMS_PARAM_Y(val);
4885 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
4889 FW_LEN16(caps));
4890
4891 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
4892 if (rc != 0) {
4893 device_printf(sc->dev,
4894 "failed to upload config file to card: %d.\n", rc);
4895 goto done;
4896 }
4897 }
4898 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4899 if (rc != 0) {
4900 device_printf(sc->dev, "failed to pre-process config file: %d "
4901 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
4902 goto done;
4903 }
4904
4905 finicsum = be32toh(caps.finicsum);
4906 cfcsum = be32toh(caps.cfcsum); /* actual */
4907 if (finicsum != cfcsum) {
4908 device_printf(sc->dev,
4909 "WARNING: config file checksum mismatch: %08x %08x\n",
4910 finicsum, cfcsum);
4911 }
4912 sc->cfcsum = cfcsum;
4913 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
4914
4915 /*
4916 * Let the firmware know what features will (not) be used so it can tune
4917 * things accordingly.
4918 */
4919#define LIMIT_CAPS(x) do { \
4920 caps.x##caps &= htobe16(caps_allowed->x##caps); \
4921} while (0)
4922 LIMIT_CAPS(nbm);
4923 LIMIT_CAPS(link);
4924 LIMIT_CAPS(switch);
4925 LIMIT_CAPS(nic);
4926 LIMIT_CAPS(toe);
4927 LIMIT_CAPS(rdma);
4928 LIMIT_CAPS(crypto);
4929 LIMIT_CAPS(iscsi);
4930 LIMIT_CAPS(fcoe);
4931#undef LIMIT_CAPS
4932 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4933 /*
4934 * TOE and hashfilters are mutually exclusive. It is a config
4935 * file or firmware bug if both are reported as available. Try
4936 * to cope with the situation in non-debug builds by disabling
4937 * TOE.
4938 */
4939 MPASS(caps.toecaps == 0);
4940
4941 caps.toecaps = 0;
4942 caps.rdmacaps = 0;
4943 caps.iscsicaps = 0;
4944 }
4945
4948 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4949 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
4950 if (rc != 0) {
4951 device_printf(sc->dev,
4952 "failed to process config file: %d.\n", rc);
4953 goto done;
4954 }
4955
4958
4959 /* get basic stuff going */
4960 rc = -t4_fw_initialize(sc, sc->mbox);
4961 if (rc != 0) {
4962 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
4963 goto done;
4964 }
4965done:
4966 return (rc);
4967}
4968
4969/*
4970 * Partition chip resources for use between various PFs, VFs, etc.
4971 */
4972static int
4974{
4975 char cfg_file[sizeof(t4_cfg_file)];
4977 int rc;
4978 bool fallback;
4979
4980 /* Only the master driver gets to configure the chip resources. */
4981 MPASS(sc->flags & MASTER_PF);
4982
4983#define COPY_CAPS(x) do { \
4984 caps_allowed.x##caps = t4_##x##caps_allowed; \
4985} while (0)
4986 bzero(&caps_allowed, sizeof(caps_allowed));
4987 COPY_CAPS(nbm);
4988 COPY_CAPS(link);
4989 COPY_CAPS(switch);
4990 COPY_CAPS(nic);
4991 COPY_CAPS(toe);
4992 COPY_CAPS(rdma);
4993 COPY_CAPS(crypto);
4994 COPY_CAPS(iscsi);
4995 COPY_CAPS(fcoe);
4996 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
4997 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
4998retry:
4999 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
5000 if (rc != 0 && fallback) {
5001 device_printf(sc->dev,
5002 "failed (%d) to configure card with \"%s\" profile, "
5003 "will fall back to a basic configuration and retry.\n",
5004 rc, cfg_file);
5005 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
5006 bzero(&caps_allowed, sizeof(caps_allowed));
5007 COPY_CAPS(switch);
5009 fallback = false;
5010 goto retry;
5011 }
5012#undef COPY_CAPS
5013 return (rc);
5014}
5015
5016/*
5017 * Retrieve parameters that are needed (or nice to have) very early.
5018 */
5019static int
5021{
5022 int rc;
5023 uint32_t param[2], val[2];
5024
5026
5027 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
5032
5033 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
5038
5039 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
5044
5045 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
5050
5051 param[0] = FW_PARAM_DEV(PORTVEC);
5052 param[1] = FW_PARAM_DEV(CCLK);
5053 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5054 if (rc != 0) {
5055 device_printf(sc->dev,
5056 "failed to query parameters (pre_init): %d.\n", rc);
5057 return (rc);
5058 }
5059
5060 sc->params.portvec = val[0];
5061 sc->params.nports = bitcount32(val[0]);
5062 sc->params.vpd.cclk = val[1];
5063
5064 /* Read device log parameters. */
5065 rc = -t4_init_devlog_params(sc, 1);
5066 if (rc == 0)
5068 else {
5069 device_printf(sc->dev,
5070 "failed to get devlog parameters: %d.\n", rc);
5071 rc = 0; /* devlog isn't critical for device operation */
5072 }
5073
5074 return (rc);
5075}
5076
5077/*
5078 * Any params that need to be set before FW_INITIALIZE.
5079 */
5080static int
5082{
5083 int rc = 0;
5084 uint32_t param, val;
5085
5086 if (chip_id(sc) >= CHELSIO_T6) {
5087 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
5088 val = 1;
5089 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5090 /* firmwares < 1.20.1.0 do not have this param. */
5091 if (rc == FW_EINVAL &&
5092 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
5093 rc = 0;
5094 }
5095 if (rc != 0) {
5096 device_printf(sc->dev,
5097 "failed to enable high priority filters :%d.\n",
5098 rc);
5099 }
5100 }
5101
5102 /* Enable opaque VIIDs with firmwares that support it. */
5103 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5104 val = 1;
5105 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5106 if (rc == 0 && val == 1)
5107 sc->params.viid_smt_extn_support = true;
5108 else
5109 sc->params.viid_smt_extn_support = false;
5110
5111 return (rc);
5112}
5113
5114/*
5115 * Retrieve various parameters that are of interest to the driver. The device
5116 * has been initialized by the firmware at this point.
5117 */
5118static int
5120{
5121 int rc;
5122 uint32_t param[7], val[7];
5123 struct fw_caps_config_cmd caps;
5124
5125 param[0] = FW_PARAM_PFVF(IQFLINT_START);
5126 param[1] = FW_PARAM_PFVF(EQ_START);
5127 param[2] = FW_PARAM_PFVF(FILTER_START);
5128 param[3] = FW_PARAM_PFVF(FILTER_END);
5129 param[4] = FW_PARAM_PFVF(L2T_START);
5130 param[5] = FW_PARAM_PFVF(L2T_END);
5134 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
5135 if (rc != 0) {
5136 device_printf(sc->dev,
5137 "failed to query parameters (post_init): %d.\n", rc);
5138 return (rc);
5139 }
5140
5141 sc->sge.iq_start = val[0];
5142 sc->sge.eq_start = val[1];
5143 if ((int)val[3] > (int)val[2]) {
5144 sc->tids.ftid_base = val[2];
5145 sc->tids.ftid_end = val[3];
5146 sc->tids.nftids = val[3] - val[2] + 1;
5147 }
5148 sc->vres.l2t.start = val[4];
5149 sc->vres.l2t.size = val[5] - val[4] + 1;
5150 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
5151 ("%s: L2 table size (%u) larger than expected (%u)",
5152 __func__, sc->vres.l2t.size, L2T_SIZE));
5153 sc->params.core_vdd = val[6];
5154
5155 param[0] = FW_PARAM_PFVF(IQFLINT_END);
5156 param[1] = FW_PARAM_PFVF(EQ_END);
5157 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5158 if (rc != 0) {
5159 device_printf(sc->dev,
5160 "failed to query parameters (post_init2): %d.\n", rc);
5161 return (rc);
5162 }
5163 MPASS((int)val[0] >= sc->sge.iq_start);
5164 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
5165 MPASS((int)val[1] >= sc->sge.eq_start);
5166 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
5167
5168 if (chip_id(sc) >= CHELSIO_T6) {
5169
5170 sc->tids.tid_base = t4_read_reg(sc,
5172
5173 param[0] = FW_PARAM_PFVF(HPFILTER_START);
5174 param[1] = FW_PARAM_PFVF(HPFILTER_END);
5175 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5176 if (rc != 0) {
5177 device_printf(sc->dev,
5178 "failed to query hpfilter parameters: %d.\n", rc);
5179 return (rc);
5180 }
5181 if ((int)val[1] > (int)val[0]) {
5182 sc->tids.hpftid_base = val[0];
5183 sc->tids.hpftid_end = val[1];
5184 sc->tids.nhpftids = val[1] - val[0] + 1;
5185
5186 /*
5187 * These should go off if the layout changes and the
5188 * driver needs to catch up.
5189 */
5190 MPASS(sc->tids.hpftid_base == 0);
5191 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
5192 }
5193
5194 param[0] = FW_PARAM_PFVF(RAWF_START);
5195 param[1] = FW_PARAM_PFVF(RAWF_END);
5196 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5197 if (rc != 0) {
5198 device_printf(sc->dev,
5199 "failed to query rawf parameters: %d.\n", rc);
5200 return (rc);
5201 }
5202 if ((int)val[1] > (int)val[0]) {
5203 sc->rawf_base = val[0];
5204 sc->nrawf = val[1] - val[0] + 1;
5205 }
5206 }
5207
5208 /*
5209 * MPSBGMAP is queried separately because only recent firmwares support
5210 * it as a parameter and we don't want the compound query above to fail
5211 * on older firmwares.
5212 */
5213 param[0] = FW_PARAM_DEV(MPSBGMAP);
5214 val[0] = 0;
5215 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5216 if (rc == 0)
5217 sc->params.mps_bg_map = val[0];
5218 else
5219 sc->params.mps_bg_map = 0;
5220
5221 /*
5222 * Determine whether the firmware supports the filter2 work request.
5223 * This is queried separately for the same reason as MPSBGMAP above.
5224 */
5225 param[0] = FW_PARAM_DEV(FILTER2_WR);
5226 val[0] = 0;
5227 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5228 if (rc == 0)
5229 sc->params.filter2_wr_support = val[0] != 0;
5230 else
5231 sc->params.filter2_wr_support = 0;
5232
5233 /*
5234 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
5235 * This is queried separately for the same reason as other params above.
5236 */
5237 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5238 val[0] = 0;
5239 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5240 if (rc == 0)
5241 sc->params.ulptx_memwrite_dsgl = val[0] != 0;
5242 else
5243 sc->params.ulptx_memwrite_dsgl = false;
5244
5245 /* FW_RI_FR_NSMR_TPTE_WR support */
5246 param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5247 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5248 if (rc == 0)
5249 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
5250 else
5251 sc->params.fr_nsmr_tpte_wr_support = false;
5252
5253 /* Support for 512 SGL entries per FR MR. */
5254 param[0] = FW_PARAM_DEV(DEV_512SGL_MR);
5255 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5256 if (rc == 0)
5257 sc->params.dev_512sgl_mr = val[0] != 0;
5258 else
5259 sc->params.dev_512sgl_mr = false;
5260
5261 param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
5262 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5263 if (rc == 0)
5265 else
5267
5268 param[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5269 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5270 if (rc == 0) {
5271 MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */
5272 sc->params.nsched_cls = val[0];
5273 } else
5275
5276 /* get capabilites */
5277 bzero(&caps, sizeof(caps));
5280 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
5281 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
5282 if (rc != 0) {
5283 device_printf(sc->dev,
5284 "failed to get card capabilities: %d.\n", rc);
5285 return (rc);
5286 }
5287
5288#define READ_CAPS(x) do { \
5289 sc->x = htobe16(caps.x); \
5290} while (0)
5300
5302 MPASS(chip_id(sc) > CHELSIO_T4);
5303 MPASS(sc->toecaps == 0);
5304 sc->toecaps = 0;
5305
5306 param[0] = FW_PARAM_DEV(NTID);
5307 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5308 if (rc != 0) {
5309 device_printf(sc->dev,
5310 "failed to query HASHFILTER parameters: %d.\n", rc);
5311 return (rc);
5312 }
5313 sc->tids.ntids = val[0];
5314 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5315 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5316 sc->tids.ntids -= sc->tids.nhpftids;
5317 }
5318 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5319 sc->params.hash_filter = 1;
5320 }
5322 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
5323 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
5324 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5325 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
5326 if (rc != 0) {
5327 device_printf(sc->dev,
5328 "failed to query NIC parameters: %d.\n", rc);
5329 return (rc);
5330 }
5331 if ((int)val[1] > (int)val[0]) {
5332 sc->tids.etid_base = val[0];
5333 sc->tids.etid_end = val[1];
5334 sc->tids.netids = val[1] - val[0] + 1;
5335 sc->params.eo_wr_cred = val[2];
5336 sc->params.ethoffload = 1;
5337 }
5338 }
5339 if (sc->toecaps) {
5340 /* query offload-related parameters */
5341 param[0] = FW_PARAM_DEV(NTID);
5342 param[1] = FW_PARAM_PFVF(SERVER_START);
5343 param[2] = FW_PARAM_PFVF(SERVER_END);
5344 param[3] = FW_PARAM_PFVF(TDDP_START);
5345 param[4] = FW_PARAM_PFVF(TDDP_END);
5346 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5347 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5348 if (rc != 0) {
5349 device_printf(sc->dev,
5350 "failed to query TOE parameters: %d.\n", rc);
5351 return (rc);
5352 }
5353 sc->tids.ntids = val[0];
5354 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5355 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5356 sc->tids.ntids -= sc->tids.nhpftids;
5357 }
5358 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5359 if ((int)val[2] > (int)val[1]) {
5360 sc->tids.stid_base = val[1];
5361 sc->tids.nstids = val[2] - val[1] + 1;
5362 }
5363 sc->vres.ddp.start = val[3];
5364 sc->vres.ddp.size = val[4] - val[3] + 1;
5365 sc->params.ofldq_wr_cred = val[5];
5366 sc->params.offload = 1;
5367 } else {
5368 /*
5369 * The firmware attempts memfree TOE configuration for -SO cards
5370 * and will report toecaps=0 if it runs out of resources (this
5371 * depends on the config file). It may not report 0 for other
5372 * capabilities dependent on the TOE in this case. Set them to
5373 * 0 here so that the driver doesn't bother tracking resources
5374 * that will never be used.
5375 */
5376 sc->iscsicaps = 0;
5377 sc->rdmacaps = 0;
5378 }
5379 if (sc->rdmacaps) {
5380 param[0] = FW_PARAM_PFVF(STAG_START);
5381 param[1] = FW_PARAM_PFVF(STAG_END);
5382 param[2] = FW_PARAM_PFVF(RQ_START);
5383 param[3] = FW_PARAM_PFVF(RQ_END);
5384 param[4] = FW_PARAM_PFVF(PBL_START);
5385 param[5] = FW_PARAM_PFVF(PBL_END);
5386 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5387 if (rc != 0) {
5388 device_printf(sc->dev,
5389 "failed to query RDMA parameters(1): %d.\n", rc);
5390 return (rc);
5391 }
5392 sc->vres.stag.start = val[0];
5393 sc->vres.stag.size = val[1] - val[0] + 1;
5394 sc->vres.rq.start = val[2];
5395 sc->vres.rq.size = val[3] - val[2] + 1;
5396 sc->vres.pbl.start = val[4];
5397 sc->vres.pbl.size = val[5] - val[4] + 1;
5398
5399 param[0] = FW_PARAM_PFVF(SQRQ_START);
5400 param[1] = FW_PARAM_PFVF(SQRQ_END);
5401 param[2] = FW_PARAM_PFVF(CQ_START);
5402 param[3] = FW_PARAM_PFVF(CQ_END);
5403 param[4] = FW_PARAM_PFVF(OCQ_START);
5404 param[5] = FW_PARAM_PFVF(OCQ_END);
5405 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5406 if (rc != 0) {
5407 device_printf(sc->dev,
5408 "failed to query RDMA parameters(2): %d.\n", rc);
5409 return (rc);
5410 }
5411 sc->vres.qp.start = val[0];
5412 sc->vres.qp.size = val[1] - val[0] + 1;
5413 sc->vres.cq.start = val[2];
5414 sc->vres.cq.size = val[3] - val[2] + 1;
5415 sc->vres.ocq.start = val[4];
5416 sc->vres.ocq.size = val[5] - val[4] + 1;
5417
5418 param[0] = FW_PARAM_PFVF(SRQ_START);
5419 param[1] = FW_PARAM_PFVF(SRQ_END);
5420 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
5421 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5422 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
5423 if (rc != 0) {
5424 device_printf(sc->dev,
5425 "failed to query RDMA parameters(3): %d.\n", rc);
5426 return (rc);
5427 }
5428 sc->vres.srq.start = val[0];
5429 sc->vres.srq.size = val[1] - val[0] + 1;
5430 sc->params.max_ordird_qp = val[2];
5431 sc->params.max_ird_adapter = val[3];
5432 }
5433 if (sc->iscsicaps) {
5434 param[0] = FW_PARAM_PFVF(ISCSI_START);
5435 param[1] = FW_PARAM_PFVF(ISCSI_END);
5436 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5437 if (rc != 0) {
5438 device_printf(sc->dev,
5439 "failed to query iSCSI parameters: %d.\n", rc);
5440 return (rc);
5441 }
5442 sc->vres.iscsi.start = val[0];
5443 sc->vres.iscsi.size = val[1] - val[0] + 1;
5444 }
5446 param[0] = FW_PARAM_PFVF(TLS_START);
5447 param[1] = FW_PARAM_PFVF(TLS_END);
5448 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5449 if (rc != 0) {
5450 device_printf(sc->dev,
5451 "failed to query TLS parameters: %d.\n", rc);
5452 return (rc);
5453 }
5454 sc->vres.key.start = val[0];
5455 sc->vres.key.size = val[1] - val[0] + 1;
5456 }
5457
5458 /*
5459 * We've got the params we wanted to query directly from the firmware.
5460 * Grab some others via other means.
5461 */
5464 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
5465 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
5466
5467 rc = t4_verify_chip_settings(sc);
5468 if (rc != 0)
5469 return (rc);
5471
5472 return (rc);
5473}
5474
5475#ifdef KERN_TLS
5476static void
5477ktls_tick(void *arg)
5478{
5479 struct adapter *sc;
5480 uint32_t tstamp;
5481
5482 sc = arg;
5483 tstamp = tcp_ts_getticks();
5484 t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
5485 t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
5486 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
5487}
5488
5489static int
5490t4_config_kern_tls(struct adapter *sc, bool enable)
5491{
5492 int rc;
5493 uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5495 V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) |
5497
5498 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &param);
5499 if (rc != 0) {
5500 CH_ERR(sc, "failed to %s NIC TLS: %d\n",
5501 enable ? "enable" : "disable", rc);
5502 return (rc);
5503 }
5504
5505 if (enable) {
5506 sc->flags |= KERN_TLS_ON;
5507 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
5508 C_HARDCLOCK);
5509 } else {
5510 sc->flags &= ~KERN_TLS_ON;
5511 callout_stop(&sc->ktls_tick);
5512 }
5513
5514 return (rc);
5515}
5516#endif
5517
5518static int
5520{
5521 uint32_t mask, param, val;
5522#ifdef TCP_OFFLOAD
5523 int i, v, shift;
5524#endif
5525
5526 /* ask for encapsulated CPLs */
5527 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5528 val = 1;
5529 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5530
5531 /* Enable 32b port caps if the firmware supports it. */
5532 param = FW_PARAM_PFVF(PORT_CAPS32);
5533 val = 1;
5534 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val) == 0)
5535 sc->params.port_caps32 = 1;
5536
5537 /* Let filter + maskhash steer to a part of the VI's RSS region. */
5538 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
5540 V_MASKFILTER(val - 1));
5541
5546 val = 0;
5547 if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) {
5550 val |= F_DROPERRORATTACK;
5551 }
5552 if (t4_drop_ip_fragments != 0) {
5555 val |= F_DROPERRORFRAG;
5556 }
5557 if (t4_drop_pkts_with_l2_errors != 0)
5559 if (t4_drop_pkts_with_l3_errors != 0) {
5562 }
5563 if (t4_drop_pkts_with_l4_errors != 0) {
5566 }
5567 t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val);
5568
5569#ifdef TCP_OFFLOAD
5570 /*
5571 * Override the TOE timers with user provided tunables. This is not the
5572 * recommended way to change the timers (the firmware config file is) so
5573 * these tunables are not documented.
5574 *
5575 * All the timer tunables are in microseconds.
5576 */
5577 if (t4_toe_keepalive_idle != 0) {
5578 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
5579 v &= M_KEEPALIVEIDLE;
5582 }
5583 if (t4_toe_keepalive_interval != 0) {
5584 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
5585 v &= M_KEEPALIVEINTVL;
5588 }
5589 if (t4_toe_keepalive_count != 0) {
5590 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
5595 }
5596 if (t4_toe_rexmt_min != 0) {
5597 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
5598 v &= M_RXTMIN;
5601 }
5602 if (t4_toe_rexmt_max != 0) {
5603 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
5604 v &= M_RXTMAX;
5607 }
5608 if (t4_toe_rexmt_count != 0) {
5609 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
5614 }
5615 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
5616 if (t4_toe_rexmt_backoff[i] != -1) {
5617 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
5618 shift = (i & 3) << 3;
5620 M_TIMERBACKOFFINDEX0 << shift, v << shift);
5621 }
5622 }
5623#endif
5624
5625#ifdef KERN_TLS
5628 /*
5629 * Limit TOE connections to 2 reassembly "islands". This is
5630 * required for TOE TLS connections to downgrade to plain TOE
5631 * connections if an unsupported TLS version or ciphersuite is
5632 * used.
5633 */
5636 if (is_ktls(sc)) {
5637 sc->tlst.inline_keys = t4_tls_inline_keys;
5638 sc->tlst.combo_wrs = t4_tls_combo_wrs;
5639 if (t4_kern_tls != 0)
5640 t4_config_kern_tls(sc, true);
5641 }
5642 }
5643#endif
5644 return (0);
5645}
5646
5647#undef FW_PARAM_PFVF
5648#undef FW_PARAM_DEV
5649
5650static void
5652{
5653 char buf[128];
5654 struct adapter_params *p = &sc->params;
5655
5656 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
5657
5658 device_set_desc_copy(sc->dev, buf);
5659}
5660
5661static inline void
5662ifmedia_add4(struct ifmedia *ifm, int m)
5663{
5664
5665 ifmedia_add(ifm, m, 0, NULL);
5666 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
5667 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
5668 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
5669}
5670
5671/*
5672 * This is the selected media, which is not quite the same as the active media.
5673 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
5674 * and active are not the same, and "media: Ethernet selected" otherwise.
5675 */
5676static void
5678{
5679 struct link_config *lc;
5680 struct ifmedia *ifm;
5681 int mword;
5682 u_int speed;
5683
5685
5686 /* Leave current media alone if it's already set to IFM_NONE. */
5687 ifm = &pi->media;
5688 if (ifm->ifm_cur != NULL &&
5689 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
5690 return;
5691
5692 lc = &pi->link_cfg;
5693 if (lc->requested_aneg != AUTONEG_DISABLE &&
5694 lc->pcaps & FW_PORT_CAP32_ANEG) {
5695 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
5696 return;
5697 }
5698 mword = IFM_ETHER | IFM_FDX;
5699 if (lc->requested_fc & PAUSE_TX)
5700 mword |= IFM_ETH_TXPAUSE;
5701 if (lc->requested_fc & PAUSE_RX)
5702 mword |= IFM_ETH_RXPAUSE;
5703 if (lc->requested_speed == 0)
5704 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
5705 else
5706 speed = lc->requested_speed;
5707 mword |= port_mword(pi, speed_to_fwcap(speed));
5708 ifmedia_set(ifm, mword);
5709}
5710
5711/*
5712 * Returns true if the ifmedia list for the port cannot change.
5713 */
5714static bool
5716{
5717
5718 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
5721 pi->port_type == FW_PORT_TYPE_KX4 ||
5722 pi->port_type == FW_PORT_TYPE_KX ||
5723 pi->port_type == FW_PORT_TYPE_KR ||
5730}
5731
5732static void
5734{
5735 uint32_t ss, speed;
5736 int unknown, mword, bit;
5737 struct link_config *lc;
5738 struct ifmedia *ifm;
5739
5741
5742 if (pi->flags & FIXED_IFMEDIA)
5743 return;
5744
5745 /*
5746 * Rebuild the ifmedia list.
5747 */
5748 ifm = &pi->media;
5749 ifmedia_removeall(ifm);
5750 lc = &pi->link_cfg;
5751 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
5752 if (__predict_false(ss == 0)) { /* not supposed to happen. */
5753 MPASS(ss != 0);
5754no_media:
5755 MPASS(LIST_EMPTY(&ifm->ifm_list));
5756 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
5757 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
5758 return;
5759 }
5760
5761 unknown = 0;
5762 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
5763 speed = 1 << bit;
5764 MPASS(speed & M_FW_PORT_CAP32_SPEED);
5765 if (ss & speed) {
5766 mword = port_mword(pi, speed);
5767 if (mword == IFM_NONE) {
5768 goto no_media;
5769 } else if (mword == IFM_UNKNOWN)
5770 unknown++;
5771 else
5772 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
5773 }
5774 }
5775 if (unknown > 0) /* Add one unknown for all unknown media types. */
5776 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
5777 if (lc->pcaps & FW_PORT_CAP32_ANEG)
5778 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
5779
5781}
5782
5783/*
5784 * Initialize the requested fields in the link config based on driver tunables.
5785 */
5786static void
5788{
5789 struct link_config *lc = &pi->link_cfg;
5790
5792 MPASS(lc->pcaps != 0);
5793
5794 lc->requested_caps = 0;
5795 lc->requested_speed = 0;
5796
5797 if (t4_autoneg == 0)
5799 else if (t4_autoneg == 1)
5801 else
5803
5804 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
5806
5807 if (t4_fec & FEC_AUTO)
5808 lc->requested_fec = FEC_AUTO;
5809 else if (t4_fec == 0)
5810 lc->requested_fec = FEC_NONE;
5811 else {
5812 /* -1 is handled by the FEC_AUTO block above and not here. */
5813 lc->requested_fec = t4_fec &
5815 if (lc->requested_fec == 0)
5816 lc->requested_fec = FEC_AUTO;
5817 }
5818 lc->force_fec = 0;
5819 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) {
5820 if (t4_force_fec < 0)
5821 lc->force_fec = -1;
5822 else if (t4_force_fec > 0)
5823 lc->force_fec = 1;
5824 }
5825}
5826
5827/*
5828 * Makes sure that all requested settings comply with what's supported by the
5829 * port. Returns the number of settings that were invalid and had to be fixed.
5830 */
5831static int
5833{
5834 int n = 0;
5835 struct link_config *lc = &pi->link_cfg;
5836 uint32_t fwspeed;
5837
5839
5840 /* Speed (when not autonegotiating) */
5841 if (lc->requested_speed != 0) {
5842 fwspeed = speed_to_fwcap(lc->requested_speed);
5843 if ((fwspeed & lc->pcaps) == 0) {
5844 n++;
5845 lc->requested_speed = 0;
5846 }
5847 }
5848
5849 /* Link autonegotiation */
5850 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
5853 if (lc->requested_aneg == AUTONEG_ENABLE &&
5854 !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
5855 n++;
5857 }
5858
5859 /* Flow control */
5860 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
5861 if (lc->requested_fc & PAUSE_TX &&
5862 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
5863 n++;
5864 lc->requested_fc &= ~PAUSE_TX;
5865 }
5866 if (lc->requested_fc & PAUSE_RX &&
5867 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
5868 n++;
5869 lc->requested_fc &= ~PAUSE_RX;
5870 }
5871 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
5873 n++;
5875 }
5876
5877 /* FEC */
5878 if ((lc->requested_fec & FEC_RS &&
5879 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
5880 (lc->requested_fec & FEC_BASER_RS &&
5882 n++;
5883 lc->requested_fec = FEC_AUTO;
5884 }
5885
5886 return (n);
5887}
5888
5889/*
5890 * Apply the requested L1 settings, which are expected to be valid, to the
5891 * hardware.
5892 */
5893static int
5895{
5896 struct adapter *sc = pi->adapter;
5897 struct link_config *lc = &pi->link_cfg;
5898 int rc;
5899
5900#ifdef INVARIANTS
5903
5904 if (lc->requested_aneg == AUTONEG_ENABLE)
5905 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
5906 if (!(lc->requested_fc & PAUSE_AUTONEG))
5907 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
5908 if (lc->requested_fc & PAUSE_TX)
5909 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
5910 if (lc->requested_fc & PAUSE_RX)
5911 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
5912 if (lc->requested_fec & FEC_RS)
5913 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
5914 if (lc->requested_fec & FEC_BASER_RS)
5915 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
5916#endif
5917 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5918 if (rc != 0) {
5919 /* Don't complain if the VF driver gets back an EPERM. */
5920 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
5921 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
5922 } else {
5923 /*
5924 * An L1_CFG will almost always result in a link-change event if
5925 * the link is up, and the driver will refresh the actual
5926 * fec/fc/etc. when the notification is processed. If the link
5927 * is down then the actual settings are meaningless.
5928 *
5929 * This takes care of the case where a change in the L1 settings
5930 * may not result in a notification.
5931 */
5932 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
5933 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
5934 }
5935 return (rc);
5936}
5937
5938#define FW_MAC_EXACT_CHUNK 7
5940 struct ifnet *ifp;
5942 uint64_t hash;
5943 int i;
5944 int del;
5945 int rc;
5946};
5947
5948static u_int
5949add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
5950{
5951 struct mcaddr_ctx *ctx = arg;
5952 struct vi_info *vi = ctx->ifp->if_softc;
5953 struct port_info *pi = vi->pi;
5954 struct adapter *sc = pi->adapter;
5955
5956 if (ctx->rc < 0)
5957 return (0);
5958
5959 ctx->mcaddr[ctx->i] = LLADDR(sdl);
5960 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
5961 ctx->i++;
5962
5963 if (ctx->i == FW_MAC_EXACT_CHUNK) {
5964 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
5965 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
5966 if (ctx->rc < 0) {
5967 int j;
5968
5969 for (j = 0; j < ctx->i; j++) {
5970 if_printf(ctx->ifp,
5971 "failed to add mc address"
5972 " %02x:%02x:%02x:"
5973 "%02x:%02x:%02x rc=%d\n",
5974 ctx->mcaddr[j][0], ctx->mcaddr[j][1],
5975 ctx->mcaddr[j][2], ctx->mcaddr[j][3],
5976 ctx->mcaddr[j][4], ctx->mcaddr[j][5],
5977 -ctx->rc);
5978 }
5979 return (0);
5980 }
5981 ctx->del = 0;
5982 ctx->i = 0;
5983 }
5984
5985 return (1);
5986}
5987
5988/*
5989 * Program the port's XGMAC based on parameters in ifnet. The caller also
5990 * indicates which parameters should be programmed (the rest are left alone).
5991 */
5992int
5993update_mac_settings(struct ifnet *ifp, int flags)
5994{
5995 int rc = 0;
5996 struct vi_info *vi = ifp->if_softc;
5997 struct port_info *pi = vi->pi;
5998 struct adapter *sc = pi->adapter;
5999 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
6000 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
6001
6003 KASSERT(flags, ("%s: not told what to update.", __func__));
6004
6005 if (flags & XGMAC_MTU)
6006 mtu = ifp->if_mtu;
6007
6008 if (flags & XGMAC_PROMISC)
6009 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
6010
6011 if (flags & XGMAC_ALLMULTI)
6012 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
6013
6014 if (flags & XGMAC_VLANEX)
6015 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
6016
6018 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
6019 allmulti, 1, vlanex, false);
6020 if (rc) {
6021 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
6022 rc);
6023 return (rc);
6024 }
6025 }
6026
6027 if (flags & XGMAC_UCADDR) {
6028 uint8_t ucaddr[ETHER_ADDR_LEN];
6029
6030 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
6031 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
6032 ucaddr, true, &vi->smt_idx);
6033 if (rc < 0) {
6034 rc = -rc;
6035 if_printf(ifp, "change_mac failed: %d\n", rc);
6036 return (rc);
6037 } else {
6038 vi->xact_addr_filt = rc;
6039 rc = 0;
6040 }
6041 }
6042
6043 if (flags & XGMAC_MCADDRS) {
6044 struct epoch_tracker et;
6045 struct mcaddr_ctx ctx;
6046 int j;
6047
6048 ctx.ifp = ifp;
6049 ctx.hash = 0;
6050 ctx.i = 0;
6051 ctx.del = 1;
6052 ctx.rc = 0;
6053 /*
6054 * Unlike other drivers, we accumulate list of pointers into
6055 * interface address lists and we need to keep it safe even
6056 * after if_foreach_llmaddr() returns, thus we must enter the
6057 * network epoch.
6058 */
6059 NET_EPOCH_ENTER(et);
6060 if_foreach_llmaddr(ifp, add_maddr, &ctx);
6061 if (ctx.rc < 0) {
6062 NET_EPOCH_EXIT(et);
6063 rc = -ctx.rc;
6064 return (rc);
6065 }
6066 if (ctx.i > 0) {
6067 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
6068 ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
6069 NET_EPOCH_EXIT(et);
6070 if (rc < 0) {
6071 rc = -rc;
6072 for (j = 0; j < ctx.i; j++) {
6073 if_printf(ifp,
6074 "failed to add mcast address"
6075 " %02x:%02x:%02x:"
6076 "%02x:%02x:%02x rc=%d\n",
6077 ctx.mcaddr[j][0], ctx.mcaddr[j][1],
6078 ctx.mcaddr[j][2], ctx.mcaddr[j][3],
6079 ctx.mcaddr[j][4], ctx.mcaddr[j][5],
6080 rc);
6081 }
6082 return (rc);
6083 }
6084 ctx.del = 0;
6085 } else
6086 NET_EPOCH_EXIT(et);
6087
6088 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
6089 if (rc != 0)
6090 if_printf(ifp, "failed to set mcast address hash: %d\n",
6091 rc);
6092 if (ctx.del == 0) {
6093 /* We clobbered the VXLAN entry if there was one. */
6094 pi->vxlan_tcam_entry = false;
6095 }
6096 }
6097
6098 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 &&
6099 pi->vxlan_tcam_entry == false) {
6100 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac,
6101 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
6102 true);
6103 if (rc < 0) {
6104 rc = -rc;
6105 if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n",
6106 rc);
6107 } else {
6108 MPASS(rc == sc->rawf_base + pi->port_id);
6109 rc = 0;
6110 pi->vxlan_tcam_entry = true;
6111 }
6112 }
6113
6114 return (rc);
6115}
6116
6117/*
6118 * {begin|end}_synchronized_op must be called from the same thread.
6119 */
6120int
6121begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
6122 char *wmesg)
6123{
6124 int rc, pri;
6125
6126#ifdef WITNESS
6127 /* the caller thinks it's ok to sleep, but is it really? */
6128 if (flags & SLEEP_OK)
6129 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
6130 "begin_synchronized_op");
6131#endif
6132
6133 if (INTR_OK)
6134 pri = PCATCH;
6135 else
6136 pri = 0;
6137
6138 ADAPTER_LOCK(sc);
6139 for (;;) {
6140
6141 if (vi && IS_DOOMED(vi)) {
6142 rc = ENXIO;
6143 goto done;
6144 }
6145
6146 if (!IS_BUSY(sc)) {
6147 rc = 0;
6148 break;
6149 }
6150
6151 if (!(flags & SLEEP_OK)) {
6152 rc = EBUSY;
6153 goto done;
6154 }
6155
6156 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
6157 rc = EINTR;
6158 goto done;
6159 }
6160 }
6161
6162 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
6163 SET_BUSY(sc);
6164#ifdef INVARIANTS
6165 sc->last_op = wmesg;
6166 sc->last_op_thr = curthread;
6167 sc->last_op_flags = flags;
6168#endif
6169
6170done:
6171 if (!(flags & HOLD_LOCK) || rc)
6172 ADAPTER_UNLOCK(sc);
6173
6174 return (rc);
6175}
6176
6177/*
6178 * Tell if_ioctl and if_init that the VI is going away. This is
6179 * special variant of begin_synchronized_op and must be paired with a
6180 * call to end_synchronized_op.
6181 */
6182void
6183doom_vi(struct adapter *sc, struct vi_info *vi)
6184{
6185
6186 ADAPTER_LOCK(sc);
6187 SET_DOOMED(vi);
6188 wakeup(&sc->flags);
6189 while (IS_BUSY(sc))
6190 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
6191 SET_BUSY(sc);
6192#ifdef INVARIANTS
6193 sc->last_op = "t4detach";
6194 sc->last_op_thr = curthread;
6195 sc->last_op_flags = 0;
6196#endif
6197 ADAPTER_UNLOCK(sc);
6198}
6199
6200/*
6201 * {begin|end}_synchronized_op must be called from the same thread.
6202 */
6203void
6204end_synchronized_op(struct adapter *sc, int flags)
6205{
6206
6207 if (flags & LOCK_HELD)
6209 else
6210 ADAPTER_LOCK(sc);
6211
6212 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
6213 CLR_BUSY(sc);
6214 wakeup(&sc->flags);
6215 ADAPTER_UNLOCK(sc);
6216}
6217
6218static int
6220{
6221 struct port_info *pi = vi->pi;
6222 struct adapter *sc = pi->adapter;
6223 struct ifnet *ifp = vi->ifp;
6224 int rc = 0, i;
6225 struct sge_txq *txq;
6226
6228
6229 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6230 return (0); /* already running */
6231
6232 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
6233 return (rc); /* error message displayed already */
6234
6235 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
6236 return (rc); /* error message displayed already */
6237
6239 if (rc)
6240 goto done; /* error message displayed already */
6241
6242 PORT_LOCK(pi);
6243 if (pi->up_vis == 0) {
6246 build_medialist(pi);
6248 }
6249
6250 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
6251 if (rc != 0) {
6252 if_printf(ifp, "enable_vi failed: %d\n", rc);
6253 PORT_UNLOCK(pi);
6254 goto done;
6255 }
6256
6257 /*
6258 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
6259 * if this changes.
6260 */
6261
6262 for_each_txq(vi, i, txq) {
6263 TXQ_LOCK(txq);
6264 txq->eq.flags |= EQ_ENABLED;
6265 TXQ_UNLOCK(txq);
6266 }
6267
6268 /*
6269 * The first iq of the first port to come up is used for tracing.
6270 */
6271 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
6272 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
6275 V_QUEUENUMBER(sc->traceq));
6276 pi->flags |= HAS_TRACEQ;
6277 }
6278
6279 /* all ok */
6280 pi->up_vis++;
6281 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6282 if (pi->link_cfg.link_ok)
6284 PORT_UNLOCK(pi);
6285
6286 mtx_lock(&vi->tick_mtx);
6287 if (ifp->if_get_counter == vi_get_counter)
6288 callout_reset(&vi->tick, hz, vi_tick, vi);
6289 else
6290 callout_reset(&vi->tick, hz, cxgbe_tick, vi);
6291 mtx_unlock(&vi->tick_mtx);
6292done:
6293 if (rc != 0)
6295
6296 return (rc);
6297}
6298
6299/*
6300 * Idempotent.
6301 */
6302static int
6304{
6305 struct port_info *pi = vi->pi;
6306 struct adapter *sc = pi->adapter;
6307 struct ifnet *ifp = vi->ifp;
6308 int rc, i;
6309 struct sge_txq *txq;
6310
6312
6313 if (!(vi->flags & VI_INIT_DONE)) {
6314 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
6315 KASSERT(0, ("uninited VI is running"));
6316 if_printf(ifp, "uninited VI with running ifnet. "
6317 "vi->flags 0x%016lx, if_flags 0x%08x, "
6318 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
6319 ifp->if_drv_flags);
6320 }
6321 return (0);
6322 }
6323
6324 /*
6325 * Disable the VI so that all its data in either direction is discarded
6326 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
6327 * tick) intact as the TP can deliver negative advice or data that it's
6328 * holding in its RAM (for an offloaded connection) even after the VI is
6329 * disabled.
6330 */
6331 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
6332 if (rc) {
6333 if_printf(ifp, "disable_vi failed: %d\n", rc);
6334 return (rc);
6335 }
6336
6337 for_each_txq(vi, i, txq) {
6338 TXQ_LOCK(txq);
6339 txq->eq.flags &= ~EQ_ENABLED;
6340 TXQ_UNLOCK(txq);
6341 }
6342
6343 mtx_lock(&vi->tick_mtx);
6344 callout_stop(&vi->tick);
6345 mtx_unlock(&vi->tick_mtx);
6346
6347 PORT_LOCK(pi);
6348 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
6349 PORT_UNLOCK(pi);
6350 return (0);
6351 }
6352 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6353 pi->up_vis--;
6354 if (pi->up_vis > 0) {
6355 PORT_UNLOCK(pi);
6356 return (0);
6357 }
6358
6359 pi->link_cfg.link_ok = false;
6360 pi->link_cfg.speed = 0;
6361 pi->link_cfg.link_down_rc = 255;
6363 PORT_UNLOCK(pi);
6364
6365 return (0);
6366}
6367
6368/*
6369 * It is ok for this function to fail midway and return right away. t4_detach
6370 * will walk the entire sc->irq list and clean up whatever is valid.
6371 */
6372int
6374{
6375 int rc, rid, p, q, v;
6376 char s[8];
6377 struct irq *irq;
6378 struct port_info *pi;
6379 struct vi_info *vi;
6380 struct sge *sge = &sc->sge;
6381 struct sge_rxq *rxq;
6382#ifdef TCP_OFFLOAD
6383 struct sge_ofld_rxq *ofld_rxq;
6384#endif
6385#ifdef DEV_NETMAP
6386 struct sge_nm_rxq *nm_rxq;
6387#endif
6388#ifdef RSS
6389 int nbuckets = rss_getnumbuckets();
6390#endif
6391
6392 /*
6393 * Setup interrupts.
6394 */
6395 irq = &sc->irq[0];
6396 rid = sc->intr_type == INTR_INTX ? 0 : 1;
6397 if (forwarding_intr_to_fwq(sc))
6398 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
6399
6400 /* Multiple interrupts. */
6401 if (sc->flags & IS_VF)
6402 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
6403 ("%s: too few intr.", __func__));
6404 else
6405 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
6406 ("%s: too few intr.", __func__));
6407
6408 /* The first one is always error intr on PFs */
6409 if (!(sc->flags & IS_VF)) {
6410 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
6411 if (rc != 0)
6412 return (rc);
6413 irq++;
6414 rid++;
6415 }
6416
6417 /* The second one is always the firmware event queue (first on VFs) */
6418 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
6419 if (rc != 0)
6420 return (rc);
6421 irq++;
6422 rid++;
6423
6424 for_each_port(sc, p) {
6425 pi = sc->port[p];
6426 for_each_vi(pi, v, vi) {
6427 vi->first_intr = rid - 1;
6428
6429 if (vi->nnmrxq > 0) {
6430 int n = max(vi->nrxq, vi->nnmrxq);
6431
6432 rxq = &sge->rxq[vi->first_rxq];
6433#ifdef DEV_NETMAP
6435#endif
6436 for (q = 0; q < n; q++) {
6437 snprintf(s, sizeof(s), "%x%c%x", p,
6438 'a' + v, q);
6439 if (q < vi->nrxq)
6440 irq->rxq = rxq++;
6441#ifdef DEV_NETMAP
6442 if (q < vi->nnmrxq)
6443 irq->nm_rxq = nm_rxq++;
6444
6445 if (irq->nm_rxq != NULL &&
6446 irq->rxq == NULL) {
6447 /* Netmap rx only */
6448 rc = t4_alloc_irq(sc, irq, rid,
6449 t4_nm_intr, irq->nm_rxq, s);
6450 }
6451 if (irq->nm_rxq != NULL &&
6452 irq->rxq != NULL) {
6453 /* NIC and Netmap rx */
6454 rc = t4_alloc_irq(sc, irq, rid,
6455 t4_vi_intr, irq, s);
6456 }
6457#endif
6458 if (irq->rxq != NULL &&
6459 irq->nm_rxq == NULL) {
6460 /* NIC rx only */
6461 rc = t4_alloc_irq(sc, irq, rid,
6462 t4_intr, irq->rxq, s);
6463 }
6464 if (rc != 0)
6465 return (rc);
6466#ifdef RSS
6467 if (q < vi->nrxq) {
6468 bus_bind_intr(sc->dev, irq->res,
6469 rss_getcpu(q % nbuckets));
6470 }
6471#endif
6472 irq++;
6473 rid++;
6474 vi->nintr++;
6475 }
6476 } else {
6477 for_each_rxq(vi, q, rxq) {
6478 snprintf(s, sizeof(s), "%x%c%x", p,
6479 'a' + v, q);
6480 rc = t4_alloc_irq(sc, irq, rid,
6481 t4_intr, rxq, s);
6482 if (rc != 0)
6483 return (rc);
6484#ifdef RSS
6485 bus_bind_intr(sc->dev, irq->res,
6486 rss_getcpu(q % nbuckets));
6487#endif
6488 irq++;
6489 rid++;
6490 vi->nintr++;
6491 }
6492 }
6493#ifdef TCP_OFFLOAD
6494 for_each_ofld_rxq(vi, q, ofld_rxq) {
6495 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
6496 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
6497 ofld_rxq, s);
6498 if (rc != 0)
6499 return (rc);
6500 irq++;
6501 rid++;
6502 vi->nintr++;
6503 }
6504#endif
6505 }
6506 }
6507 MPASS(irq == &sc->irq[sc->intr_count]);
6508
6509 return (0);
6510}
6511
6512static void
6514{
6515#ifdef RSS
6516 int i;
6517 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6518 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6519
6520 CTASSERT(RSS_KEYSIZE == 40);
6521
6522 rss_getkey((void *)&raw_rss_key[0]);
6523 for (i = 0; i < nitems(rss_key); i++) {
6524 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
6525 }
6526 t4_write_rss_key(sc, &rss_key[0], -1, 1);
6527#endif
6528}
6529
6530/*
6531 * Idempotent.
6532 */
6533static int
6535{
6536 int rc, i;
6537
6539
6540 /*
6541 * queues that belong to the adapter (not any particular port).
6542 */
6543 rc = t4_setup_adapter_queues(sc);
6544 if (rc != 0)
6545 return (rc);
6546
6547 for (i = 0; i < nitems(sc->tq); i++) {
6548 if (sc->tq[i] != NULL)
6549 continue;
6550 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
6551 taskqueue_thread_enqueue, &sc->tq[i]);
6552 if (sc->tq[i] == NULL) {
6553 CH_ERR(sc, "failed to allocate task queue %d\n", i);
6554 return (ENOMEM);
6555 }
6556 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
6557 device_get_nameunit(sc->dev), i);
6558 }
6559
6560 if (!(sc->flags & IS_VF)) {
6562 t4_intr_enable(sc);
6563 }
6564 return (0);
6565}
6566
6567int
6569{
6570 int rc;
6571
6574 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
6575 ("%s: FULL_INIT_DONE already", __func__));
6576
6577 rc = adapter_full_init(sc);
6578 if (rc != 0)
6580 else
6581 sc->flags |= FULL_INIT_DONE;
6582
6583 return (rc);
6584}
6585
6586/*
6587 * Idempotent.
6588 */
6589static void
6591{
6592 int i;
6593
6595
6596 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
6597 taskqueue_free(sc->tq[i]);
6598 sc->tq[i] = NULL;
6599 }
6600
6601 sc->flags &= ~FULL_INIT_DONE;
6602}
6603
6604#ifdef RSS
6605#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
6606 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
6607 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
6608 RSS_HASHTYPE_RSS_UDP_IPV6)
6609
6610/* Translates kernel hash types to hardware. */
6611static int
6612hashconfig_to_hashen(int hashconfig)
6613{
6614 int hashen = 0;
6615
6616 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
6618 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
6620 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
6623 }
6624 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
6627 }
6628 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
6630 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
6632
6633 return (hashen);
6634}
6635
6636/* Translates hardware hash types to kernel. */
6637static int
6638hashen_to_hashconfig(int hashen)
6639{
6640 int hashconfig = 0;
6641
6642 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
6643 /*
6644 * If UDP hashing was enabled it must have been enabled for
6645 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
6646 * enabling any 4-tuple hash is nonsense configuration.
6647 */
6648 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6650
6652 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
6654 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
6655 }
6657 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
6659 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
6661 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
6663 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
6664
6665 return (hashconfig);
6666}
6667#endif
6668
6669/*
6670 * Idempotent.
6671 */
6672static int
6674{
6675 struct adapter *sc = vi->adapter;
6676 struct sge_rxq *rxq;
6677 int rc, i, j;
6678#ifdef RSS
6679 int nbuckets = rss_getnumbuckets();
6680 int hashconfig = rss_gethashconfig();
6681 int extra;
6682#endif
6683
6685
6686 /*
6687 * Allocate tx/rx/fl queues for this VI.
6688 */
6689 rc = t4_setup_vi_queues(vi);
6690 if (rc != 0)
6691 return (rc);
6692
6693 /*
6694 * Setup RSS for this VI. Save a copy of the RSS table for later use.
6695 */
6696 if (vi->nrxq > vi->rss_size) {
6697 CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); "
6698 "some queues will never receive traffic.\n", vi->nrxq,
6699 vi->rss_size);
6700 } else if (vi->rss_size % vi->nrxq) {
6701 CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); "
6702 "expect uneven traffic distribution.\n", vi->nrxq,
6703 vi->rss_size);
6704 }
6705#ifdef RSS
6706 if (vi->nrxq != nbuckets) {
6707 CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);"
6708 "performance will be impacted.\n", vi->nrxq, nbuckets);
6709 }
6710#endif
6711 if (vi->rss == NULL)
6712 vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE,
6713 M_ZERO | M_WAITOK);
6714 for (i = 0; i < vi->rss_size;) {
6715#ifdef RSS
6716 j = rss_get_indirection_to_bucket(i);
6717 j %= vi->nrxq;
6718 rxq = &sc->sge.rxq[vi->first_rxq + j];
6719 vi->rss[i++] = rxq->iq.abs_id;
6720#else
6721 for_each_rxq(vi, j, rxq) {
6722 vi->rss[i++] = rxq->iq.abs_id;
6723 if (i == vi->rss_size)
6724 break;
6725 }
6726#endif
6727 }
6728
6729 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
6730 vi->rss, vi->rss_size);
6731 if (rc != 0) {
6732 CH_ERR(vi, "rss_config failed: %d\n", rc);
6733 return (rc);
6734 }
6735
6736#ifdef RSS
6737 vi->hashen = hashconfig_to_hashen(hashconfig);
6738
6739 /*
6740 * We may have had to enable some hashes even though the global config
6741 * wants them disabled. This is a potential problem that must be
6742 * reported to the user.
6743 */
6744 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
6745
6746 /*
6747 * If we consider only the supported hash types, then the enabled hashes
6748 * are a superset of the requested hashes. In other words, there cannot
6749 * be any supported hash that was requested but not enabled, but there
6750 * can be hashes that were not requested but had to be enabled.
6751 */
6752 extra &= SUPPORTED_RSS_HASHTYPES;
6753 MPASS((extra & hashconfig) == 0);
6754
6755 if (extra) {
6756 CH_ALERT(vi,
6757 "global RSS config (0x%x) cannot be accommodated.\n",
6758 hashconfig);
6759 }
6760 if (extra & RSS_HASHTYPE_RSS_IPV4)
6761 CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n");
6762 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
6763 CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n");
6764 if (extra & RSS_HASHTYPE_RSS_IPV6)
6765 CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n");
6766 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
6767 CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n");
6768 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
6769 CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n");
6770 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
6771 CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n");
6772#else
6777#endif
6778 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0],
6779 0, 0);
6780 if (rc != 0) {
6781 CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc);
6782 return (rc);
6783 }
6784
6785 return (0);
6786}
6787
6788int
6789vi_init(struct vi_info *vi)
6790{
6791 int rc;
6792
6794 KASSERT((vi->flags & VI_INIT_DONE) == 0,
6795 ("%s: VI_INIT_DONE already", __func__));
6796
6797 rc = vi_full_init(vi);
6798 if (rc != 0)
6799 vi_full_uninit(vi);
6800 else
6801 vi->flags |= VI_INIT_DONE;
6802
6803 return (rc);
6804}
6805
6806/*
6807 * Idempotent.
6808 */
6809static void
6811{
6812
6813 if (vi->flags & VI_INIT_DONE) {
6814 quiesce_vi(vi);
6815 free(vi->rss, M_CXGBE);
6816 free(vi->nm_rss, M_CXGBE);
6817 }
6818
6820 vi->flags &= ~VI_INIT_DONE;
6821}
6822
6823static void
6825{
6826 struct sge_eq *eq = &txq->eq;
6827 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6828
6829 MPASS(eq->flags & EQ_SW_ALLOCATED);
6830 MPASS(!(eq->flags & EQ_ENABLED));
6831
6832 /* Wait for the mp_ring to empty. */
6833 while (!mp_ring_is_idle(txq->r)) {
6834 mp_ring_check_drainage(txq->r, 4096);
6835 pause("rquiesce", 1);
6836 }
6837 MPASS(txq->txp.npkt == 0);
6838
6839 if (eq->flags & EQ_HW_ALLOCATED) {
6840 /*
6841 * Hardware is alive and working normally. Wait for it to
6842 * finish and then wait for the driver to catch up and reclaim
6843 * all descriptors.
6844 */
6845 while (spg->cidx != htobe16(eq->pidx))
6846 pause("equiesce", 1);
6847 while (eq->cidx != eq->pidx)
6848 pause("dquiesce", 1);
6849 } else {
6850 /*
6851 * Hardware is unavailable. Discard all pending tx and reclaim
6852 * descriptors directly.
6853 */
6854 TXQ_LOCK(txq);
6855 while (eq->cidx != eq->pidx) {
6856 struct mbuf *m, *nextpkt;
6857 struct tx_sdesc *txsd;
6858
6859 txsd = &txq->sdesc[eq->cidx];
6860 for (m = txsd->m; m != NULL; m = nextpkt) {
6861 nextpkt = m->m_nextpkt;
6862 m->m_nextpkt = NULL;
6863 m_freem(m);
6864 }
6865 IDXINCR(eq->cidx, txsd->desc_used, eq->sidx);
6866 }
6867 spg->pidx = spg->cidx = htobe16(eq->cidx);
6868 TXQ_UNLOCK(txq);
6869 }
6870}
6871
6872static void
6874{
6875
6876 /* XXXTX */
6877}
6878
6879static void
6880quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
6881{
6882 /* Synchronize with the interrupt handler */
6883 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
6884 pause("iqfree", 1);
6885
6886 if (fl != NULL) {
6887 MPASS(iq->flags & IQ_HAS_FL);
6888
6889 mtx_lock(&sc->sfl_lock);
6890 FL_LOCK(fl);
6891 fl->flags |= FL_DOOMED;
6892 FL_UNLOCK(fl);
6893 callout_stop(&sc->sfl_callout);
6894 mtx_unlock(&sc->sfl_lock);
6895
6896 KASSERT((fl->flags & FL_STARVING) == 0,
6897 ("%s: still starving", __func__));
6898
6899 /* Release all buffers if hardware is no longer available. */
6900 if (!(iq->flags & IQ_HW_ALLOCATED))
6901 free_fl_buffers(sc, fl);
6902 }
6903}
6904
6905/*
6906 * Wait for all activity on all the queues of the VI to complete. It is assumed
6907 * that no new work is being enqueued by the hardware or the driver. That part
6908 * should be arranged before calling this function.
6909 */
6910static void
6912{
6913 int i;
6914 struct adapter *sc = vi->adapter;
6915 struct sge_rxq *rxq;
6916 struct sge_txq *txq;
6917#ifdef TCP_OFFLOAD
6918 struct sge_ofld_rxq *ofld_rxq;
6919#endif
6920#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6921 struct sge_ofld_txq *ofld_txq;
6922#endif
6923
6924 if (!(vi->flags & VI_INIT_DONE))
6925 return;
6926
6927 for_each_txq(vi, i, txq) {
6928 quiesce_txq(txq);
6929 }
6930
6931#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6932 for_each_ofld_txq(vi, i, ofld_txq) {
6933 quiesce_wrq(&ofld_txq->wrq);
6934 }
6935#endif
6936
6937 for_each_rxq(vi, i, rxq) {
6938 quiesce_iq_fl(sc, &rxq->iq, &rxq->fl);
6939 }
6940
6941#ifdef TCP_OFFLOAD
6942 for_each_ofld_rxq(vi, i, ofld_rxq) {
6943 quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl);
6944 }
6945#endif
6946}
6947
6948static int
6949t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
6950 driver_intr_t *handler, void *arg, char *name)
6951{
6952 int rc;
6953
6954 irq->rid = rid;
6955 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
6956 RF_SHAREABLE | RF_ACTIVE);
6957 if (irq->res == NULL) {
6958 device_printf(sc->dev,
6959 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
6960 return (ENOMEM);
6961 }
6962
6963 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
6964 NULL, handler, arg, &irq->tag);
6965 if (rc != 0) {
6966 device_printf(sc->dev,
6967 "failed to setup interrupt for rid %d, name %s: %d\n",
6968 rid, name, rc);
6969 } else if (name)
6970 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
6971
6972 return (rc);
6973}
6974
6975static int
6976t4_free_irq(struct adapter *sc, struct irq *irq)
6977{
6978 if (irq->tag)
6979 bus_teardown_intr(sc->dev, irq->res, irq->tag);
6980 if (irq->res)
6981 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
6982
6983 bzero(irq, sizeof(*irq));
6984
6985 return (0);
6986}
6987
6988static void
6989get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
6990{
6991
6992 regs->version = chip_id(sc) | chip_rev(sc) << 10;
6993 t4_get_regs(sc, buf, regs->len);
6994}
6995
6996#define A_PL_INDIR_CMD 0x1f8
6997
6998#define S_PL_AUTOINC 31
6999#define M_PL_AUTOINC 0x1U
7000#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
7001#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
7002
7003#define S_PL_VFID 20
7004#define M_PL_VFID 0xffU
7005#define V_PL_VFID(x) ((x) << S_PL_VFID)
7006#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
7007
7008#define S_PL_ADDR 0
7009#define M_PL_ADDR 0xfffffU
7010#define V_PL_ADDR(x) ((x) << S_PL_ADDR)
7011#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
7012
7013#define A_PL_INDIR_DATA 0x1fc
7014
7015static uint64_t
7016read_vf_stat(struct adapter *sc, u_int vin, int reg)
7017{
7018 u32 stats[2];
7019
7020 if (sc->flags & IS_VF) {
7021 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
7022 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
7023 } else {
7024 mtx_assert(&sc->reg_lock, MA_OWNED);
7026 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
7027 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
7028 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
7029 }
7030 return (((uint64_t)stats[1]) << 32 | stats[0]);
7031}
7032
7033static void
7034t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
7035{
7036
7037#define GET_STAT(name) \
7038 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
7039
7040 if (!(sc->flags & IS_VF))
7041 mtx_lock(&sc->reg_lock);
7042 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
7043 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
7044 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
7045 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
7046 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
7047 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
7048 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
7049 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
7050 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
7051 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
7052 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
7053 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
7054 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
7055 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
7056 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
7057 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
7058 if (!(sc->flags & IS_VF))
7059 mtx_unlock(&sc->reg_lock);
7060
7061#undef GET_STAT
7062}
7063
7064static void
7065t4_clr_vi_stats(struct adapter *sc, u_int vin)
7066{
7067 int reg;
7068
7072 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
7074}
7075
7076static void
7078{
7079 struct timeval tv;
7080 const struct timeval interval = {0, 250000}; /* 250ms */
7081
7082 mtx_assert(&vi->tick_mtx, MA_OWNED);
7083
7084 if (vi->flags & VI_SKIP_STATS)
7085 return;
7086
7087 getmicrotime(&tv);
7088 timevalsub(&tv, &interval);
7089 if (timevalcmp(&tv, &vi->last_refreshed, <))
7090 return;
7091
7092 t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats);
7093 getmicrotime(&vi->last_refreshed);
7094}
7095
7096static void
7098{
7099 u_int i, v, tnl_cong_drops, chan_map;
7100 struct timeval tv;
7101 const struct timeval interval = {0, 250000}; /* 250ms */
7102 struct port_info *pi;
7103 struct adapter *sc;
7104
7105 mtx_assert(&vi->tick_mtx, MA_OWNED);
7106
7107 if (vi->flags & VI_SKIP_STATS)
7108 return;
7109
7110 getmicrotime(&tv);
7111 timevalsub(&tv, &interval);
7112 if (timevalcmp(&tv, &vi->last_refreshed, <))
7113 return;
7114
7115 pi = vi->pi;
7116 sc = vi->adapter;
7117 tnl_cong_drops = 0;
7118 t4_get_port_stats(sc, pi->port_id, &pi->stats);
7119 chan_map = pi->rx_e_chan_map;
7120 while (chan_map) {
7121 i = ffs(chan_map) - 1;
7122 mtx_lock(&sc->reg_lock);
7125 mtx_unlock(&sc->reg_lock);
7126 tnl_cong_drops += v;
7127 chan_map &= ~(1 << i);
7128 }
7129 pi->tnl_cong_drops = tnl_cong_drops;
7130 getmicrotime(&vi->last_refreshed);
7131}
7132
7133static void
7134cxgbe_tick(void *arg)
7135{
7136 struct vi_info *vi = arg;
7137
7138 MPASS(IS_MAIN_VI(vi));
7139 mtx_assert(&vi->tick_mtx, MA_OWNED);
7140
7142 callout_schedule(&vi->tick, hz);
7143}
7144
7145static void
7146vi_tick(void *arg)
7147{
7148 struct vi_info *vi = arg;
7149
7150 mtx_assert(&vi->tick_mtx, MA_OWNED);
7151
7152 vi_refresh_stats(vi);
7153 callout_schedule(&vi->tick, hz);
7154}
7155
7156/*
7157 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
7158 */
7159static char *caps_decoder[] = {
7160 "\20\001IPMI\002NCSI", /* 0: NBM */
7161 "\20\001PPP\002QFC\003DCBX", /* 1: link */
7162 "\20\001INGRESS\002EGRESS", /* 2: switch */
7163 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
7164 "\006HASHFILTER\007ETHOFLD",
7165 "\20\001TOE", /* 4: TOE */
7166 "\20\001RDDP\002RDMAC", /* 5: RDMA */
7167 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
7168 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
7169 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
7170 "\007T10DIF"
7171 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
7172 "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
7173 "\004TLS_HW",
7174 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
7175 "\004PO_INITIATOR\005PO_TARGET",
7176};
7177
7178void
7180{
7181 struct sysctl_ctx_list *ctx = &sc->ctx;
7182 struct sysctl_oid *oid;
7183 struct sysctl_oid_list *children, *c0;
7184 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
7185
7186 /*
7187 * dev.t4nex.X.
7188 */
7189 oid = device_get_sysctl_tree(sc->dev);
7190 c0 = children = SYSCTL_CHILDREN(oid);
7191
7192 sc->sc_do_rxcopy = 1;
7193 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
7194 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
7195
7196 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
7197 sc->params.nports, "# of ports");
7198
7199 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
7200 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells,
7201 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
7202 "available doorbells");
7203
7204 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
7205 sc->params.vpd.cclk, "core clock frequency (in KHz)");
7206
7207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
7208 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7209 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
7210 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
7211
7212 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
7213 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7214 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
7215 sysctl_int_array, "A", "interrupt holdoff packet counter values");
7216
7217 t4_sge_sysctls(sc, ctx, children);
7218
7219 sc->lro_timeout = 100;
7220 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
7221 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
7222
7223 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
7224 &sc->debug_flags, 0, "flags to enable runtime debugging");
7225
7226 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
7227 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
7228
7229 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
7230 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
7231
7232 if (sc->flags & IS_VF)
7233 return;
7234
7235 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
7236 NULL, chip_rev(sc), "chip hardware revision");
7237
7238 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
7239 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
7240
7241 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
7242 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
7243
7244 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
7245 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
7246
7247 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
7248 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
7249
7250 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
7251 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
7252
7253 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
7254 sc->er_version, 0, "expansion ROM version");
7255
7256 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
7257 sc->bs_version, 0, "bootstrap firmware version");
7258
7259 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
7260 NULL, sc->params.scfg_vers, "serial config version");
7261
7262 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
7263 NULL, sc->params.vpd_vers, "VPD version");
7264
7265 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
7266 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
7267
7268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
7269 sc->cfcsum, "config file checksum");
7270
7271#define SYSCTL_CAP(name, n, text) \
7272 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
7273 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \
7274 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
7275 "available " text " capabilities")
7276
7277 SYSCTL_CAP(nbmcaps, 0, "NBM");
7278 SYSCTL_CAP(linkcaps, 1, "link");
7279 SYSCTL_CAP(switchcaps, 2, "switch");
7280 SYSCTL_CAP(niccaps, 3, "NIC");
7281 SYSCTL_CAP(toecaps, 4, "TCP offload");
7282 SYSCTL_CAP(rdmacaps, 5, "RDMA");
7283 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
7284 SYSCTL_CAP(cryptocaps, 7, "crypto");
7285 SYSCTL_CAP(fcoecaps, 8, "FCoE");
7286#undef SYSCTL_CAP
7287
7288 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
7289 NULL, sc->tids.nftids, "number of filters");
7290
7291 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7292 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7293 sysctl_temperature, "I", "chip temperature (in Celsius)");
7294 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
7295 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7296 sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
7297
7298 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
7299 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7300 sysctl_loadavg, "A",
7301 "microprocessor load averages (debug firmwares only)");
7302
7303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
7304 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
7305 "I", "core Vdd (in mV)");
7306
7307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
7308 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS,
7309 sysctl_cpus, "A", "local CPUs");
7310
7311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
7312 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS,
7313 sysctl_cpus, "A", "preferred CPUs for interrupts");
7314
7315 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
7316 &sc->swintr, 0, "software triggered interrupts");
7317
7318 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset",
7319 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I",
7320 "1 = reset adapter, 0 = zero reset counter");
7321
7322 /*
7323 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
7324 */
7325 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
7326 CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
7327 "logs and miscellaneous information");
7328 children = SYSCTL_CHILDREN(oid);
7329
7330 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
7331 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7332 sysctl_cctrl, "A", "congestion control");
7333
7334 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
7335 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7336 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
7337
7338 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
7339 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7340 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
7341
7342 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
7343 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7344 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
7345
7346 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
7347 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
7348 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
7349
7350 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
7351 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
7352 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
7353
7354 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
7355 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
7356 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
7357
7358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
7359 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7360 sysctl_cim_la, "A", "CIM logic analyzer");
7361
7362 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
7363 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7364 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
7365
7366 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
7367 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7368 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
7369
7370 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
7371 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7372 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
7373
7374 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
7375 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7376 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
7377
7378 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
7379 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7380 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
7381
7382 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
7383 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7384 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
7385
7386 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
7387 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7388 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
7389
7390 if (chip_id(sc) > CHELSIO_T4) {
7391 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
7392 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7394 "CIM OBQ 6 (SGE0-RX)");
7395
7396 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
7397 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7399 "CIM OBQ 7 (SGE1-RX)");
7400 }
7401
7402 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
7403 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7404 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
7405
7406 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
7407 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7408 sysctl_cim_qcfg, "A", "CIM queue configuration");
7409
7410 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
7411 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7412 sysctl_cpl_stats, "A", "CPL statistics");
7413
7414 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
7415 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7416 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
7417
7418 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats",
7419 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7420 sysctl_tid_stats, "A", "tid stats");
7421
7422 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
7423 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7424 sysctl_devlog, "A", "firmware's device log");
7425
7426 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
7427 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7428 sysctl_fcoe_stats, "A", "FCoE statistics");
7429
7430 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
7431 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7432 sysctl_hw_sched, "A", "hardware scheduler ");
7433
7434 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
7435 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7436 sysctl_l2t, "A", "hardware L2 table");
7437
7438 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
7439 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7440 sysctl_smt, "A", "hardware source MAC table");
7441
7442#ifdef INET6
7443 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
7444 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7445 sysctl_clip, "A", "active CLIP table entries");
7446#endif
7447
7448 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
7449 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7450 sysctl_lb_stats, "A", "loopback statistics");
7451
7452 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
7453 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7454 sysctl_meminfo, "A", "memory regions");
7455
7456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
7457 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7459 "A", "MPS TCAM entries");
7460
7461 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
7462 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7463 sysctl_path_mtus, "A", "path MTUs");
7464
7465 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
7466 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7467 sysctl_pm_stats, "A", "PM statistics");
7468
7469 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
7470 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7471 sysctl_rdma_stats, "A", "RDMA statistics");
7472
7473 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
7474 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7475 sysctl_tcp_stats, "A", "TCP statistics");
7476
7477 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
7478 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7479 sysctl_tids, "A", "TID information");
7480
7481 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
7482 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7483 sysctl_tp_err_stats, "A", "TP error statistics");
7484
7485 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats",
7486 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7487 sysctl_tnl_stats, "A", "TP tunnel statistics");
7488
7489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
7490 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7491 sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
7492
7493 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
7494 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7495 sysctl_tp_la, "A", "TP logic analyzer");
7496
7497 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
7498 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7499 sysctl_tx_rate, "A", "Tx rate");
7500
7501 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
7502 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7503 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
7504
7505 if (chip_id(sc) >= CHELSIO_T5) {
7506 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
7507 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7508 sysctl_wcwr_stats, "A", "write combined work requests");
7509 }
7510
7511#ifdef KERN_TLS
7512 if (is_ktls(sc)) {
7513 /*
7514 * dev.t4nex.0.tls.
7515 */
7516 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
7517 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
7518 children = SYSCTL_CHILDREN(oid);
7519
7520 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
7521 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
7522 "keys in work requests (1) or attempt to store TLS keys "
7523 "in card memory.");
7524 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
7525 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to combine "
7526 "TCB field updates with TLS record work requests.");
7527 }
7528#endif
7529
7530#ifdef TCP_OFFLOAD
7531 if (is_offload(sc)) {
7532 int i;
7533 char s[4];
7534
7535 /*
7536 * dev.t4nex.X.toe.
7537 */
7538 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
7539 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
7540 children = SYSCTL_CHILDREN(oid);
7541
7542 sc->tt.cong_algorithm = -1;
7543 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
7544 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
7545 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
7546 "3 = highspeed)");
7547
7548 sc->tt.sndbuf = -1;
7549 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
7550 &sc->tt.sndbuf, 0, "hardware send buffer");
7551
7552 sc->tt.ddp = 0;
7553 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
7554 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
7555 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
7556 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
7557
7558 sc->tt.rx_coalesce = -1;
7559 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
7560 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
7561
7562 sc->tt.tls = 0;
7563 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
7564 CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
7565 "Inline TLS allowed");
7566
7567 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
7568 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7569 sysctl_tls_rx_ports, "I",
7570 "TCP ports that use inline TLS+TOE RX");
7571
7572 sc->tt.tls_rx_timeout = t4_toe_tls_rx_timeout;
7573 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_timeout",
7574 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7575 sysctl_tls_rx_timeout, "I",
7576 "Timeout in seconds to downgrade TLS sockets to plain TOE");
7577
7578 sc->tt.tx_align = -1;
7579 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
7580 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
7581
7582 sc->tt.tx_zcopy = 0;
7583 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
7584 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
7585 "Enable zero-copy aio_write(2)");
7586
7587 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
7588 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7589 "cop_managed_offloading", CTLFLAG_RW,
7590 &sc->tt.cop_managed_offloading, 0,
7591 "COP (Connection Offload Policy) controls all TOE offload");
7592
7593 sc->tt.autorcvbuf_inc = 16 * 1024;
7594 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
7595 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
7596 "autorcvbuf increment");
7597
7599 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7600 "update_hc_on_pmtu_change", CTLFLAG_RW,
7602 "Update hostcache entry if the PMTU changes");
7603
7604 sc->tt.iso = 1;
7605 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW,
7606 &sc->tt.iso, 0, "Enable iSCSI segmentation offload");
7607
7608 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
7609 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7610 sysctl_tp_tick, "A", "TP timer tick (us)");
7611
7612 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
7613 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7614 sysctl_tp_tick, "A", "TCP timestamp tick (us)");
7615
7616 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
7617 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7618 sysctl_tp_tick, "A", "DACK tick (us)");
7619
7620 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
7621 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7622 sysctl_tp_dack_timer, "IU", "DACK timer (us)");
7623
7624 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
7625 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7626 A_TP_RXT_MIN, sysctl_tp_timer, "LU",
7627 "Minimum retransmit interval (us)");
7628
7629 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
7630 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7631 A_TP_RXT_MAX, sysctl_tp_timer, "LU",
7632 "Maximum retransmit interval (us)");
7633
7634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
7635 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7636 A_TP_PERS_MIN, sysctl_tp_timer, "LU",
7637 "Persist timer min (us)");
7638
7639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
7640 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7641 A_TP_PERS_MAX, sysctl_tp_timer, "LU",
7642 "Persist timer max (us)");
7643
7644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
7645 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7646 A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
7647 "Keepalive idle timer (us)");
7648
7649 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
7650 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7651 A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
7652 "Keepalive interval timer (us)");
7653
7654 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
7655 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7656 A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
7657
7658 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
7659 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7660 A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
7661 "FINWAIT2 timer (us)");
7662
7663 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
7664 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7665 S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
7666 "Number of SYN retransmissions before abort");
7667
7668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
7669 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7670 S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
7671 "Number of retransmissions before abort");
7672
7673 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
7674 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7675 S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
7676 "Number of keepalive probes before abort");
7677
7678 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
7679 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7680 "TOE retransmit backoffs");
7681 children = SYSCTL_CHILDREN(oid);
7682 for (i = 0; i < 16; i++) {
7683 snprintf(s, sizeof(s), "%u", i);
7684 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
7685 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7686 i, sysctl_tp_backoff, "IU",
7687 "TOE retransmit backoff");
7688 }
7689 }
7690#endif
7691}
7692
7693void
7695{
7696 struct sysctl_ctx_list *ctx = &vi->ctx;
7697 struct sysctl_oid *oid;
7698 struct sysctl_oid_list *children;
7699
7700 /*
7701 * dev.v?(cxgbe|cxl).X.
7702 */
7703 oid = device_get_sysctl_tree(vi->dev);
7704 children = SYSCTL_CHILDREN(oid);
7705
7706 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
7707 vi->viid, "VI identifer");
7708 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
7709 &vi->nrxq, 0, "# of rx queues");
7710 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
7711 &vi->ntxq, 0, "# of tx queues");
7712 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
7713 &vi->first_rxq, 0, "index of first rx queue");
7714 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
7715 &vi->first_txq, 0, "index of first tx queue");
7716 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
7717 vi->rss_base, "start of RSS indirection table");
7718 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
7719 vi->rss_size, "size of RSS indirection table");
7720
7721 if (IS_MAIN_VI(vi)) {
7722 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
7723 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7724 sysctl_noflowq, "IU",
7725 "Reserve queue 0 for non-flowid packets");
7726 }
7727
7728 if (vi->adapter->flags & IS_VF) {
7729 MPASS(vi->flags & TX_USES_VM_WR);
7730 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD,
7731 NULL, 1, "use VM work requests for transmit");
7732 } else {
7733 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr",
7734 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7735 sysctl_tx_vm_wr, "I", "use VM work requestes for transmit");
7736 }
7737
7738#ifdef TCP_OFFLOAD
7739 if (vi->nofldrxq != 0) {
7740 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
7741 &vi->nofldrxq, 0,
7742 "# of rx queues for offloaded TCP connections");
7743 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
7744 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
7745 "index of first TOE rx queue");
7746 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
7747 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7748 sysctl_holdoff_tmr_idx_ofld, "I",
7749 "holdoff timer index for TOE queues");
7750 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
7751 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7752 sysctl_holdoff_pktc_idx_ofld, "I",
7753 "holdoff packet counter index for TOE queues");
7754 }
7755#endif
7756#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7757 if (vi->nofldtxq != 0) {
7758 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
7759 &vi->nofldtxq, 0,
7760 "# of tx queues for TOE/ETHOFLD");
7761 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
7762 CTLFLAG_RD, &vi->first_ofld_txq, 0,
7763 "index of first TOE/ETHOFLD tx queue");
7764 }
7765#endif
7766#ifdef DEV_NETMAP
7767 if (vi->nnmrxq != 0) {
7768 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
7769 &vi->nnmrxq, 0, "# of netmap rx queues");
7770 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
7771 &vi->nnmtxq, 0, "# of netmap tx queues");
7772 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
7773 CTLFLAG_RD, &vi->first_nm_rxq, 0,
7774 "index of first netmap rx queue");
7775 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
7776 CTLFLAG_RD, &vi->first_nm_txq, 0,
7777 "index of first netmap tx queue");
7778 }
7779#endif
7780
7781 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
7782 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7783 sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
7784 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
7785 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7786 sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
7787
7788 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
7789 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7790 sysctl_qsize_rxq, "I", "rx queue size");
7791 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
7792 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7793 sysctl_qsize_txq, "I", "tx queue size");
7794}
7795
7796static void
7798{
7799 struct sysctl_ctx_list *ctx = &pi->ctx;
7800 struct sysctl_oid *oid;
7801 struct sysctl_oid_list *children, *children2;
7802 struct adapter *sc = pi->adapter;
7803 int i;
7804 char name[16];
7805 static char *tc_flags = {"\20\1USER"};
7806
7807 /*
7808 * dev.cxgbe.X.
7809 */
7810 oid = device_get_sysctl_tree(pi->dev);
7811 children = SYSCTL_CHILDREN(oid);
7812
7813 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
7814 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7815 sysctl_linkdnrc, "A", "reason why link is down");
7816 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
7817 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7818 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7819 sysctl_btphy, "I", "PHY temperature (in Celsius)");
7820 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
7821 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1,
7822 sysctl_btphy, "I", "PHY firmware version");
7823 }
7824
7825 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
7826 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7828 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
7829 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec",
7830 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A",
7831 "FEC in use on the link");
7832 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec",
7833 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7835 "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
7836 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
7837 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A",
7838 "FEC recommended by the cable/transceiver");
7839 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
7840 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7841 sysctl_autoneg, "I",
7842 "autonegotiation (-1 = not supported)");
7843 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec",
7844 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7845 sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config");
7846
7847 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD,
7848 &pi->link_cfg.requested_caps, 0, "L1 config requested by driver");
7849 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
7850 &pi->link_cfg.pcaps, 0, "port capabilities");
7851 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
7852 &pi->link_cfg.acaps, 0, "advertised capabilities");
7853 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
7854 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
7855
7856 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
7857 port_top_speed(pi), "max speed (in Gbps)");
7858 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
7859 pi->mps_bg_map, "MPS buffer group map");
7860 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
7861 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
7862 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_c_chan", CTLFLAG_RD, NULL,
7863 pi->rx_c_chan, "TP rx c-channel");
7864
7865 if (sc->flags & IS_VF)
7866 return;
7867
7868 /*
7869 * dev.(cxgbe|cxl).X.tc.
7870 */
7871 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
7872 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7873 "Tx scheduler traffic classes (cl_rl)");
7874 children2 = SYSCTL_CHILDREN(oid);
7875 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
7876 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
7877 "pktsize for per-flow cl-rl (0 means up to the driver )");
7878 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
7879 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
7880 "burstsize for per-flow cl-rl (0 means up to the driver)");
7881 for (i = 0; i < sc->params.nsched_cls; i++) {
7882 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
7883
7884 snprintf(name, sizeof(name), "%d", i);
7885 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
7886 SYSCTL_CHILDREN(oid), OID_AUTO, name,
7887 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
7888 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state",
7889 CTLFLAG_RD, &tc->state, 0, "current state");
7890 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
7891 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags,
7892 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
7893 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
7894 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
7895 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
7896 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7897 (pi->port_id << 16) | i, sysctl_tc_params, "A",
7898 "traffic class parameters");
7899 }
7900
7901 /*
7902 * dev.cxgbe.X.stats.
7903 */
7904 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
7905 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
7906 children = SYSCTL_CHILDREN(oid);
7907 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
7908 &pi->tx_parse_error, 0,
7909 "# of tx packets with invalid length or # of segments");
7910
7911#define T4_REGSTAT(name, stat, desc) \
7912 SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
7913 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
7914 (is_t4(sc) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L) : \
7915 T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L)), \
7916 sysctl_handle_t4_reg64, "QU", desc)
7917
7918/* We get these from port_stats and they may be stale by up to 1s */
7919#define T4_PORTSTAT(name, desc) \
7920 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
7921 &pi->stats.name, desc)
7922
7923 T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
7924 T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
7925 T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
7926 T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
7927 T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
7928 T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
7929 T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
7930 T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
7931 T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
7932 T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
7933 T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
7934 T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
7935 T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
7936 T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
7937 T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
7938 T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
7939 T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
7940 T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
7941 T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
7942 T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
7943 T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
7944 T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
7945 T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
7946
7947 T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
7948 T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
7949 T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
7950 T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
7951 T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
7952 T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
7953 T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
7954 if (is_t6(sc)) {
7955 T4_PORTSTAT(rx_fcs_err,
7956 "# of frames received with bad FCS since last link up");
7957 } else {
7958 T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
7959 "# of frames received with bad FCS");
7960 }
7961 T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
7962 T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
7963 T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
7964 T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
7965 T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
7966 T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
7967 T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
7968 T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
7969 T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
7970 T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
7971 T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
7972 T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
7973 T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
7974 T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
7975 T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
7976 T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
7977 T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
7978 T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
7979 T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
7980
7981 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
7982 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
7983 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
7984 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
7985 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
7986 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
7987 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
7988 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
7989
7990#undef T4_REGSTAT
7991#undef T4_PORTSTAT
7992}
7993
7994static int
7995sysctl_int_array(SYSCTL_HANDLER_ARGS)
7996{
7997 int rc, *i, space = 0;
7998 struct sbuf sb;
7999
8000 sbuf_new_for_sysctl(&sb, NULL, 64, req);
8001 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
8002 if (space)
8003 sbuf_printf(&sb, " ");
8004 sbuf_printf(&sb, "%d", *i);
8005 space = 1;
8006 }
8007 rc = sbuf_finish(&sb);
8008 sbuf_delete(&sb);
8009 return (rc);
8010}
8011
8012static int
8013sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
8014{
8015 int rc;
8016 struct sbuf *sb;
8017
8018 rc = sysctl_wire_old_buffer(req, 0);
8019 if (rc != 0)
8020 return(rc);
8021
8022 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8023 if (sb == NULL)
8024 return (ENOMEM);
8025
8026 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
8027 rc = sbuf_finish(sb);
8028 sbuf_delete(sb);
8029
8030 return (rc);
8031}
8032
8033static int
8034sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
8035{
8036 int rc;
8037 struct sbuf *sb;
8038
8039 rc = sysctl_wire_old_buffer(req, 0);
8040 if (rc != 0)
8041 return(rc);
8042
8043 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8044 if (sb == NULL)
8045 return (ENOMEM);
8046
8047 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
8048 rc = sbuf_finish(sb);
8049 sbuf_delete(sb);
8050
8051 return (rc);
8052}
8053
8054static int
8055sysctl_btphy(SYSCTL_HANDLER_ARGS)
8056{
8057 struct port_info *pi = arg1;
8058 int op = arg2;
8059 struct adapter *sc = pi->adapter;
8060 u_int v;
8061 int rc;
8062
8063 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
8064 if (rc)
8065 return (rc);
8066 if (hw_off_limits(sc))
8067 rc = ENXIO;
8068 else {
8069 /* XXX: magic numbers */
8070 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e,
8071 op ? 0x20 : 0xc820, &v);
8072 }
8073 end_synchronized_op(sc, 0);
8074 if (rc)
8075 return (rc);
8076 if (op == 0)
8077 v /= 256;
8078
8079 rc = sysctl_handle_int(oidp, &v, 0, req);
8080 return (rc);
8081}
8082
8083static int
8084sysctl_noflowq(SYSCTL_HANDLER_ARGS)
8085{
8086 struct vi_info *vi = arg1;
8087 int rc, val;
8088
8089 val = vi->rsrv_noflowq;
8090 rc = sysctl_handle_int(oidp, &val, 0, req);
8091 if (rc != 0 || req->newptr == NULL)
8092 return (rc);
8093
8094 if ((val >= 1) && (vi->ntxq > 1))
8095 vi->rsrv_noflowq = 1;
8096 else
8097 vi->rsrv_noflowq = 0;
8098
8099 return (rc);
8100}
8101
8102static int
8103sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
8104{
8105 struct vi_info *vi = arg1;
8106 struct adapter *sc = vi->adapter;
8107 int rc, val, i;
8108
8109 MPASS(!(sc->flags & IS_VF));
8110
8111 val = vi->flags & TX_USES_VM_WR ? 1 : 0;
8112 rc = sysctl_handle_int(oidp, &val, 0, req);
8113 if (rc != 0 || req->newptr == NULL)
8114 return (rc);
8115
8116 if (val != 0 && val != 1)
8117 return (EINVAL);
8118
8120 "t4txvm");
8121 if (rc)
8122 return (rc);
8123 if (hw_off_limits(sc))
8124 rc = ENXIO;
8125 else if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
8126 /*
8127 * We don't want parse_pkt to run with one setting (VF or PF)
8128 * and then eth_tx to see a different setting but still use
8129 * stale information calculated by parse_pkt.
8130 */
8131 rc = EBUSY;
8132 } else {
8133 struct port_info *pi = vi->pi;
8134 struct sge_txq *txq;
8135 uint32_t ctrl0;
8136 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr;
8137
8138 if (val) {
8139 vi->flags |= TX_USES_VM_WR;
8140 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO;
8141 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8142 V_TXPKT_INTF(pi->tx_chan));
8143 if (!(sc->flags & IS_VF))
8144 npkt--;
8145 } else {
8146 vi->flags &= ~TX_USES_VM_WR;
8147 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
8148 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8149 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
8150 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
8151 }
8152 for_each_txq(vi, i, txq) {
8153 txq->cpl_ctrl0 = ctrl0;
8154 txq->txp.max_npkt = npkt;
8155 }
8156 }
8158 return (rc);
8159}
8160
8161static int
8162sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
8163{
8164 struct vi_info *vi = arg1;
8165 struct adapter *sc = vi->adapter;
8166 int idx, rc, i;
8167 struct sge_rxq *rxq;
8168 uint8_t v;
8169
8170 idx = vi->tmr_idx;
8171
8172 rc = sysctl_handle_int(oidp, &idx, 0, req);
8173 if (rc != 0 || req->newptr == NULL)
8174 return (rc);
8175
8176 if (idx < 0 || idx >= SGE_NTIMERS)
8177 return (EINVAL);
8178
8180 "t4tmr");
8181 if (rc)
8182 return (rc);
8183
8184 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
8185 for_each_rxq(vi, i, rxq) {
8186#ifdef atomic_store_rel_8
8187 atomic_store_rel_8(&rxq->iq.intr_params, v);
8188#else
8189 rxq->iq.intr_params = v;
8190#endif
8191 }
8192 vi->tmr_idx = idx;
8193
8195 return (0);
8196}
8197
8198static int
8199sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
8200{
8201 struct vi_info *vi = arg1;
8202 struct adapter *sc = vi->adapter;
8203 int idx, rc;
8204
8205 idx = vi->pktc_idx;
8206
8207 rc = sysctl_handle_int(oidp, &idx, 0, req);
8208 if (rc != 0 || req->newptr == NULL)
8209 return (rc);
8210
8211 if (idx < -1 || idx >= SGE_NCOUNTERS)
8212 return (EINVAL);
8213
8215 "t4pktc");
8216 if (rc)
8217 return (rc);
8218
8219 if (vi->flags & VI_INIT_DONE)
8220 rc = EBUSY; /* cannot be changed once the queues are created */
8221 else
8222 vi->pktc_idx = idx;
8223
8225 return (rc);
8226}
8227
8228static int
8229sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
8230{
8231 struct vi_info *vi = arg1;
8232 struct adapter *sc = vi->adapter;
8233 int qsize, rc;
8234
8235 qsize = vi->qsize_rxq;
8236
8237 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8238 if (rc != 0 || req->newptr == NULL)
8239 return (rc);
8240
8241 if (qsize < 128 || (qsize & 7))
8242 return (EINVAL);
8243
8245 "t4rxqs");
8246 if (rc)
8247 return (rc);
8248
8249 if (vi->flags & VI_INIT_DONE)
8250 rc = EBUSY; /* cannot be changed once the queues are created */
8251 else
8252 vi->qsize_rxq = qsize;
8253
8255 return (rc);
8256}
8257
8258static int
8259sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
8260{
8261 struct vi_info *vi = arg1;
8262 struct adapter *sc = vi->adapter;
8263 int qsize, rc;
8264
8265 qsize = vi->qsize_txq;
8266
8267 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8268 if (rc != 0 || req->newptr == NULL)
8269 return (rc);
8270
8271 if (qsize < 128 || qsize > 65536)
8272 return (EINVAL);
8273
8275 "t4txqs");
8276 if (rc)
8277 return (rc);
8278
8279 if (vi->flags & VI_INIT_DONE)
8280 rc = EBUSY; /* cannot be changed once the queues are created */
8281 else
8282 vi->qsize_txq = qsize;
8283
8285 return (rc);
8286}
8287
8288static int
8289sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
8290{
8291 struct port_info *pi = arg1;
8292 struct adapter *sc = pi->adapter;
8293 struct link_config *lc = &pi->link_cfg;
8294 int rc;
8295
8296 if (req->newptr == NULL) {
8297 struct sbuf *sb;
8298 static char *bits = "\20\1RX\2TX\3AUTO";
8299
8300 rc = sysctl_wire_old_buffer(req, 0);
8301 if (rc != 0)
8302 return(rc);
8303
8304 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8305 if (sb == NULL)
8306 return (ENOMEM);
8307
8308 if (lc->link_ok) {
8309 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
8310 (lc->requested_fc & PAUSE_AUTONEG), bits);
8311 } else {
8312 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
8313 PAUSE_RX | PAUSE_AUTONEG), bits);
8314 }
8315 rc = sbuf_finish(sb);
8316 sbuf_delete(sb);
8317 } else {
8318 char s[2];
8319 int n;
8320
8321 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
8322 PAUSE_AUTONEG));
8323 s[1] = 0;
8324
8325 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8326 if (rc != 0)
8327 return(rc);
8328
8329 if (s[1] != 0)
8330 return (EINVAL);
8331 if (s[0] < '0' || s[0] > '9')
8332 return (EINVAL); /* not a number */
8333 n = s[0] - '0';
8334 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
8335 return (EINVAL); /* some other bit is set too */
8336
8337 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8338 "t4PAUSE");
8339 if (rc)
8340 return (rc);
8341 if (!hw_off_limits(sc)) {
8342 PORT_LOCK(pi);
8343 lc->requested_fc = n;
8345 if (pi->up_vis > 0)
8346 rc = apply_link_config(pi);
8348 PORT_UNLOCK(pi);
8349 }
8350 end_synchronized_op(sc, 0);
8351 }
8352
8353 return (rc);
8354}
8355
8356static int
8357sysctl_link_fec(SYSCTL_HANDLER_ARGS)
8358{
8359 struct port_info *pi = arg1;
8360 struct link_config *lc = &pi->link_cfg;
8361 int rc;
8362 struct sbuf *sb;
8363 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
8364
8365 rc = sysctl_wire_old_buffer(req, 0);
8366 if (rc != 0)
8367 return(rc);
8368
8369 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8370 if (sb == NULL)
8371 return (ENOMEM);
8372 if (lc->link_ok)
8373 sbuf_printf(sb, "%b", lc->fec, bits);
8374 else
8375 sbuf_printf(sb, "no link");
8376 rc = sbuf_finish(sb);
8377 sbuf_delete(sb);
8378
8379 return (rc);
8380}
8381
8382static int
8383sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
8384{
8385 struct port_info *pi = arg1;
8386 struct adapter *sc = pi->adapter;
8387 struct link_config *lc = &pi->link_cfg;
8388 int rc;
8389 int8_t old;
8390
8391 if (req->newptr == NULL) {
8392 struct sbuf *sb;
8393 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
8394 "\5RSVD3\6auto\7module";
8395
8396 rc = sysctl_wire_old_buffer(req, 0);
8397 if (rc != 0)
8398 return(rc);
8399
8400 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8401 if (sb == NULL)
8402 return (ENOMEM);
8403
8404 sbuf_printf(sb, "%b", lc->requested_fec, bits);
8405 rc = sbuf_finish(sb);
8406 sbuf_delete(sb);
8407 } else {
8408 char s[8];
8409 int n;
8410
8411 snprintf(s, sizeof(s), "%d",
8412 lc->requested_fec == FEC_AUTO ? -1 :
8414
8415 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8416 if (rc != 0)
8417 return(rc);
8418
8419 n = strtol(&s[0], NULL, 0);
8420 if (n < 0 || n & FEC_AUTO)
8421 n = FEC_AUTO;
8422 else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
8423 return (EINVAL);/* some other bit is set too */
8424
8425 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8426 "t4reqf");
8427 if (rc)
8428 return (rc);
8429 PORT_LOCK(pi);
8430 old = lc->requested_fec;
8431 if (n == FEC_AUTO)
8432 lc->requested_fec = FEC_AUTO;
8433 else if (n == 0 || n == FEC_NONE)
8434 lc->requested_fec = FEC_NONE;
8435 else {
8436 if ((lc->pcaps |
8438 lc->pcaps) {
8439 rc = ENOTSUP;
8440 goto done;
8441 }
8443 FEC_MODULE);
8444 }
8445 if (!hw_off_limits(sc)) {
8447 if (pi->up_vis > 0) {
8448 rc = apply_link_config(pi);
8449 if (rc != 0) {
8450 lc->requested_fec = old;
8451 if (rc == FW_EPROTO)
8452 rc = ENOTSUP;
8453 }
8454 }
8455 }
8456done:
8457 PORT_UNLOCK(pi);
8458 end_synchronized_op(sc, 0);
8459 }
8460
8461 return (rc);
8462}
8463
8464static int
8465sysctl_module_fec(SYSCTL_HANDLER_ARGS)
8466{
8467 struct port_info *pi = arg1;
8468 struct adapter *sc = pi->adapter;
8469 struct link_config *lc = &pi->link_cfg;
8470 int rc;
8471 int8_t fec;
8472 struct sbuf *sb;
8473 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
8474
8475 rc = sysctl_wire_old_buffer(req, 0);
8476 if (rc != 0)
8477 return (rc);
8478
8479 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8480 if (sb == NULL)
8481 return (ENOMEM);
8482
8483 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) {
8484 rc = EBUSY;
8485 goto done;
8486 }
8487 if (hw_off_limits(sc)) {
8488 rc = ENXIO;
8489 goto done;
8490 }
8491 PORT_LOCK(pi);
8492 if (pi->up_vis == 0) {
8493 /*
8494 * If all the interfaces are administratively down the firmware
8495 * does not report transceiver changes. Refresh port info here.
8496 * This is the only reason we have a synchronized op in this
8497 * function. Just PORT_LOCK would have been enough otherwise.
8498 */
8500 }
8501
8502 fec = lc->fec_hint;
8503 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
8504 !fec_supported(lc->pcaps)) {
8505 sbuf_printf(sb, "n/a");
8506 } else {
8507 if (fec == 0)
8508 fec = FEC_NONE;
8509 sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
8510 }
8511 rc = sbuf_finish(sb);
8512 PORT_UNLOCK(pi);
8513done:
8514 sbuf_delete(sb);
8515 end_synchronized_op(sc, 0);
8516
8517 return (rc);
8518}
8519
8520static int
8521sysctl_autoneg(SYSCTL_HANDLER_ARGS)
8522{
8523 struct port_info *pi = arg1;
8524 struct adapter *sc = pi->adapter;
8525 struct link_config *lc = &pi->link_cfg;
8526 int rc, val;
8527
8528 if (lc->pcaps & FW_PORT_CAP32_ANEG)
8529 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
8530 else
8531 val = -1;
8532 rc = sysctl_handle_int(oidp, &val, 0, req);
8533 if (rc != 0 || req->newptr == NULL)
8534 return (rc);
8535 if (val == 0)
8536 val = AUTONEG_DISABLE;
8537 else if (val == 1)
8538 val = AUTONEG_ENABLE;
8539 else
8540 val = AUTONEG_AUTO;
8541
8542 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8543 "t4aneg");
8544 if (rc)
8545 return (rc);
8546 PORT_LOCK(pi);
8547 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
8548 rc = ENOTSUP;
8549 goto done;
8550 }
8551 lc->requested_aneg = val;
8552 if (!hw_off_limits(sc)) {
8554 if (pi->up_vis > 0)
8555 rc = apply_link_config(pi);
8557 }
8558done:
8559 PORT_UNLOCK(pi);
8560 end_synchronized_op(sc, 0);
8561 return (rc);
8562}
8563
8564static int
8565sysctl_force_fec(SYSCTL_HANDLER_ARGS)
8566{
8567 struct port_info *pi = arg1;
8568 struct adapter *sc = pi->adapter;
8569 struct link_config *lc = &pi->link_cfg;
8570 int rc, val;
8571
8572 val = lc->force_fec;
8573 MPASS(val >= -1 && val <= 1);
8574 rc = sysctl_handle_int(oidp, &val, 0, req);
8575 if (rc != 0 || req->newptr == NULL)
8576 return (rc);
8577 if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC))
8578 return (ENOTSUP);
8579 if (val < -1 || val > 1)
8580 return (EINVAL);
8581
8582 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff");
8583 if (rc)
8584 return (rc);
8585 PORT_LOCK(pi);
8586 lc->force_fec = val;
8587 if (!hw_off_limits(sc)) {
8589 if (pi->up_vis > 0)
8590 rc = apply_link_config(pi);
8591 }
8592 PORT_UNLOCK(pi);
8593 end_synchronized_op(sc, 0);
8594 return (rc);
8595}
8596
8597static int
8598sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
8599{
8600 struct adapter *sc = arg1;
8601 int rc, reg = arg2;
8602 uint64_t val;
8603
8604 mtx_lock(&sc->reg_lock);
8605 if (hw_off_limits(sc))
8606 rc = ENXIO;
8607 else {
8608 rc = 0;
8609 val = t4_read_reg64(sc, reg);
8610 }
8611 mtx_unlock(&sc->reg_lock);
8612 if (rc == 0)
8613 rc = sysctl_handle_64(oidp, &val, 0, req);
8614 return (rc);
8615}
8616
8617static int
8618sysctl_temperature(SYSCTL_HANDLER_ARGS)
8619{
8620 struct adapter *sc = arg1;
8621 int rc, t;
8622 uint32_t param, val;
8623
8624 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
8625 if (rc)
8626 return (rc);
8627 if (hw_off_limits(sc))
8628 rc = ENXIO;
8629 else {
8633 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
8634 }
8635 end_synchronized_op(sc, 0);
8636 if (rc)
8637 return (rc);
8638
8639 /* unknown is returned as 0 but we display -1 in that case */
8640 t = val == 0 ? -1 : val;
8641
8642 rc = sysctl_handle_int(oidp, &t, 0, req);
8643 return (rc);
8644}
8645
8646static int
8647sysctl_vdd(SYSCTL_HANDLER_ARGS)
8648{
8649 struct adapter *sc = arg1;
8650 int rc;
8651 uint32_t param, val;
8652
8653 if (sc->params.core_vdd == 0) {
8654 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8655 "t4vdd");
8656 if (rc)
8657 return (rc);
8658 if (hw_off_limits(sc))
8659 rc = ENXIO;
8660 else {
8664 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1,
8665 &param, &val);
8666 }
8667 end_synchronized_op(sc, 0);
8668 if (rc)
8669 return (rc);
8670 sc->params.core_vdd = val;
8671 }
8672
8673 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
8674}
8675
8676static int
8677sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
8678{
8679 struct adapter *sc = arg1;
8680 int rc, v;
8681 uint32_t param, val;
8682
8683 v = sc->sensor_resets;
8684 rc = sysctl_handle_int(oidp, &v, 0, req);
8685 if (rc != 0 || req->newptr == NULL || v <= 0)
8686 return (rc);
8687
8688 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
8689 chip_id(sc) < CHELSIO_T5)
8690 return (ENOTSUP);
8691
8692 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
8693 if (rc)
8694 return (rc);
8695 if (hw_off_limits(sc))
8696 rc = ENXIO;
8697 else {
8701 val = 1;
8702 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
8703 }
8704 end_synchronized_op(sc, 0);
8705 if (rc == 0)
8706 sc->sensor_resets++;
8707 return (rc);
8708}
8709
8710static int
8711sysctl_loadavg(SYSCTL_HANDLER_ARGS)
8712{
8713 struct adapter *sc = arg1;
8714 struct sbuf *sb;
8715 int rc;
8716 uint32_t param, val;
8717
8718 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
8719 if (rc)
8720 return (rc);
8721 if (hw_off_limits(sc))
8722 rc = ENXIO;
8723 else {
8726 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
8727 }
8728 end_synchronized_op(sc, 0);
8729 if (rc)
8730 return (rc);
8731
8732 rc = sysctl_wire_old_buffer(req, 0);
8733 if (rc != 0)
8734 return (rc);
8735
8736 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8737 if (sb == NULL)
8738 return (ENOMEM);
8739
8740 if (val == 0xffffffff) {
8741 /* Only debug and custom firmwares report load averages. */
8742 sbuf_printf(sb, "not available");
8743 } else {
8744 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
8745 (val >> 16) & 0xff);
8746 }
8747 rc = sbuf_finish(sb);
8748 sbuf_delete(sb);
8749
8750 return (rc);
8751}
8752
8753static int
8754sysctl_cctrl(SYSCTL_HANDLER_ARGS)
8755{
8756 struct adapter *sc = arg1;
8757 struct sbuf *sb;
8758 int rc, i;
8759 uint16_t incr[NMTUS][NCCTRL_WIN];
8760 static const char *dec_fac[] = {
8761 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
8762 "0.9375"
8763 };
8764
8765 rc = sysctl_wire_old_buffer(req, 0);
8766 if (rc != 0)
8767 return (rc);
8768
8769 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8770 if (sb == NULL)
8771 return (ENOMEM);
8772
8773 mtx_lock(&sc->reg_lock);
8774 if (hw_off_limits(sc))
8775 rc = ENXIO;
8776 else
8777 t4_read_cong_tbl(sc, incr);
8778 mtx_unlock(&sc->reg_lock);
8779 if (rc)
8780 goto done;
8781
8782 for (i = 0; i < NCCTRL_WIN; ++i) {
8783 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
8784 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
8785 incr[5][i], incr[6][i], incr[7][i]);
8786 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
8787 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
8788 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
8789 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
8790 }
8791
8792 rc = sbuf_finish(sb);
8793done:
8794 sbuf_delete(sb);
8795 return (rc);
8796}
8797
8798static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
8799 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
8800 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
8801 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
8802};
8803
8804static int
8805sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
8806{
8807 struct adapter *sc = arg1;
8808 struct sbuf *sb;
8809 int rc, i, n, qid = arg2;
8810 uint32_t *buf, *p;
8811 char *qtype;
8812 u_int cim_num_obq = sc->chip_params->cim_num_obq;
8813
8814 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
8815 ("%s: bad qid %d\n", __func__, qid));
8816
8817 if (qid < CIM_NUM_IBQ) {
8818 /* inbound queue */
8819 qtype = "IBQ";
8820 n = 4 * CIM_IBQ_SIZE;
8821 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8822 mtx_lock(&sc->reg_lock);
8823 if (hw_off_limits(sc))
8824 rc = -ENXIO;
8825 else
8826 rc = t4_read_cim_ibq(sc, qid, buf, n);
8827 mtx_unlock(&sc->reg_lock);
8828 } else {
8829 /* outbound queue */
8830 qtype = "OBQ";
8831 qid -= CIM_NUM_IBQ;
8832 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
8833 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8834 mtx_lock(&sc->reg_lock);
8835 if (hw_off_limits(sc))
8836 rc = -ENXIO;
8837 else
8838 rc = t4_read_cim_obq(sc, qid, buf, n);
8839 mtx_unlock(&sc->reg_lock);
8840 }
8841
8842 if (rc < 0) {
8843 rc = -rc;
8844 goto done;
8845 }
8846 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
8847
8848 rc = sysctl_wire_old_buffer(req, 0);
8849 if (rc != 0)
8850 goto done;
8851
8852 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
8853 if (sb == NULL) {
8854 rc = ENOMEM;
8855 goto done;
8856 }
8857
8858 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
8859 for (i = 0, p = buf; i < n; i += 16, p += 4)
8860 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
8861 p[2], p[3]);
8862
8863 rc = sbuf_finish(sb);
8864 sbuf_delete(sb);
8865done:
8866 free(buf, M_CXGBE);
8867 return (rc);
8868}
8869
8870static void
8871sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
8872{
8873 uint32_t *p;
8874
8875 sbuf_printf(sb, "Status Data PC%s",
8876 cfg & F_UPDBGLACAPTPCONLY ? "" :
8877 " LS0Stat LS0Addr LS0Data");
8878
8879 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
8880 if (cfg & F_UPDBGLACAPTPCONLY) {
8881 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
8882 p[6], p[7]);
8883 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
8884 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
8885 p[4] & 0xff, p[5] >> 8);
8886 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
8887 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8888 p[1] & 0xf, p[2] >> 4);
8889 } else {
8890 sbuf_printf(sb,
8891 "\n %02x %x%07x %x%07x %08x %08x "
8892 "%08x%08x%08x%08x",
8893 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8894 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
8895 p[6], p[7]);
8896 }
8897 }
8898}
8899
8900static void
8901sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
8902{
8903 uint32_t *p;
8904
8905 sbuf_printf(sb, "Status Inst Data PC%s",
8906 cfg & F_UPDBGLACAPTPCONLY ? "" :
8907 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
8908
8909 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
8910 if (cfg & F_UPDBGLACAPTPCONLY) {
8911 sbuf_printf(sb, "\n %02x %08x %08x %08x",
8912 p[3] & 0xff, p[2], p[1], p[0]);
8913 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
8914 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
8915 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
8916 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
8917 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
8918 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
8919 p[6] >> 16);
8920 } else {
8921 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
8922 "%08x %08x %08x %08x %08x %08x",
8923 (p[9] >> 16) & 0xff,
8924 p[9] & 0xffff, p[8] >> 16,
8925 p[8] & 0xffff, p[7] >> 16,
8926 p[7] & 0xffff, p[6] >> 16,
8927 p[2], p[1], p[0], p[5], p[4], p[3]);
8928 }
8929 }
8930}
8931
8932static int
8933sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
8934{
8935 uint32_t cfg, *buf;
8936 int rc;
8937
8938 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
8939 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
8940 M_ZERO | flags);
8941 if (buf == NULL)
8942 return (ENOMEM);
8943
8944 mtx_lock(&sc->reg_lock);
8945 if (hw_off_limits(sc))
8946 rc = ENXIO;
8947 else {
8948 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8949 if (rc == 0)
8950 rc = -t4_cim_read_la(sc, buf, NULL);
8951 }
8952 mtx_unlock(&sc->reg_lock);
8953 if (rc == 0) {
8954 if (chip_id(sc) < CHELSIO_T6)
8955 sbuf_cim_la4(sc, sb, buf, cfg);
8956 else
8957 sbuf_cim_la6(sc, sb, buf, cfg);
8958 }
8959 free(buf, M_CXGBE);
8960 return (rc);
8961}
8962
8963static int
8964sysctl_cim_la(SYSCTL_HANDLER_ARGS)
8965{
8966 struct adapter *sc = arg1;
8967 struct sbuf *sb;
8968 int rc;
8969
8970 rc = sysctl_wire_old_buffer(req, 0);
8971 if (rc != 0)
8972 return (rc);
8973 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8974 if (sb == NULL)
8975 return (ENOMEM);
8976
8977 rc = sbuf_cim_la(sc, sb, M_WAITOK);
8978 if (rc == 0)
8979 rc = sbuf_finish(sb);
8980 sbuf_delete(sb);
8981 return (rc);
8982}
8983
8984static void
8986{
8987 log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n",
8988 device_get_nameunit(sc->dev),
8994 log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n",
8995 device_get_nameunit(sc->dev),
9001}
9002
9003static void
9005{
9006 struct sbuf sb;
9007 int rc;
9008
9009 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9010 log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n",
9011 device_get_nameunit(sc->dev));
9012 return;
9013 }
9014 rc = sbuf_cim_la(sc, &sb, M_NOWAIT);
9015 if (rc == 0) {
9016 rc = sbuf_finish(&sb);
9017 if (rc == 0) {
9018 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n",
9019 device_get_nameunit(sc->dev), sbuf_data(&sb));
9020 }
9021 }
9022 sbuf_delete(&sb);
9023}
9024
9025void
9027{
9028 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
9029}
9030
9031static int
9032sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
9033{
9034 struct adapter *sc = arg1;
9035 u_int i;
9036 struct sbuf *sb;
9037 uint32_t *buf, *p;
9038 int rc;
9039
9040 rc = sysctl_wire_old_buffer(req, 0);
9041 if (rc != 0)
9042 return (rc);
9043
9044 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9045 if (sb == NULL)
9046 return (ENOMEM);
9047
9048 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
9049 M_ZERO | M_WAITOK);
9050
9051 mtx_lock(&sc->reg_lock);
9052 if (hw_off_limits(sc))
9053 rc = ENXIO;
9054 else
9055 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
9056 mtx_unlock(&sc->reg_lock);
9057 if (rc)
9058 goto done;
9059
9060 p = buf;
9061 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9062 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
9063 p[1], p[0]);
9064 }
9065
9066 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
9067 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9068 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
9069 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
9070 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
9071 (p[1] >> 2) | ((p[2] & 3) << 30),
9072 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
9073 p[0] & 1);
9074 }
9075 rc = sbuf_finish(sb);
9076done:
9077 sbuf_delete(sb);
9078 free(buf, M_CXGBE);
9079 return (rc);
9080}
9081
9082static int
9083sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
9084{
9085 struct adapter *sc = arg1;
9086 u_int i;
9087 struct sbuf *sb;
9088 uint32_t *buf, *p;
9089 int rc;
9090
9091 rc = sysctl_wire_old_buffer(req, 0);
9092 if (rc != 0)
9093 return (rc);
9094
9095 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9096 if (sb == NULL)
9097 return (ENOMEM);
9098
9099 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
9100 M_ZERO | M_WAITOK);
9101
9102 mtx_lock(&sc->reg_lock);
9103 if (hw_off_limits(sc))
9104 rc = ENXIO;
9105 else
9106 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
9107 mtx_unlock(&sc->reg_lock);
9108 if (rc)
9109 goto done;
9110
9111 p = buf;
9112 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
9113 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9114 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
9115 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
9116 p[4], p[3], p[2], p[1], p[0]);
9117 }
9118
9119 sbuf_printf(sb, "\n\nCntl ID Data");
9120 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9121 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
9122 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
9123 }
9124
9125 rc = sbuf_finish(sb);
9126done:
9127 sbuf_delete(sb);
9128 free(buf, M_CXGBE);
9129 return (rc);
9130}
9131
9132static int
9133sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
9134{
9135 struct adapter *sc = arg1;
9136 struct sbuf *sb;
9137 int rc, i;
9138 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9139 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9140 uint16_t thres[CIM_NUM_IBQ];
9141 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
9142 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
9143 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
9144
9145 cim_num_obq = sc->chip_params->cim_num_obq;
9146 if (is_t4(sc)) {
9147 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
9148 obq_rdaddr = A_UP_OBQ_0_REALADDR;
9149 } else {
9150 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
9151 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
9152 }
9153 nq = CIM_NUM_IBQ + cim_num_obq;
9154
9155 mtx_lock(&sc->reg_lock);
9156 if (hw_off_limits(sc))
9157 rc = ENXIO;
9158 else {
9159 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
9160 if (rc == 0) {
9161 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq,
9162 obq_wr);
9163 if (rc == 0)
9164 t4_read_cimq_cfg(sc, base, size, thres);
9165 }
9166 }
9167 mtx_unlock(&sc->reg_lock);
9168 if (rc)
9169 return (rc);
9170
9171 rc = sysctl_wire_old_buffer(req, 0);
9172 if (rc != 0)
9173 return (rc);
9174
9175 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
9176 if (sb == NULL)
9177 return (ENOMEM);
9178
9179 sbuf_printf(sb,
9180 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
9181
9182 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
9183 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
9184 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
9185 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9186 G_QUEREMFLITS(p[2]) * 16);
9187 for ( ; i < nq; i++, p += 4, wr += 2)
9188 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
9189 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
9190 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9191 G_QUEREMFLITS(p[2]) * 16);
9192
9193 rc = sbuf_finish(sb);
9194 sbuf_delete(sb);
9195
9196 return (rc);
9197}
9198
9199static int
9200sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
9201{
9202 struct adapter *sc = arg1;
9203 struct sbuf *sb;
9204 int rc;
9205 struct tp_cpl_stats stats;
9206
9207 rc = sysctl_wire_old_buffer(req, 0);
9208 if (rc != 0)
9209 return (rc);
9210
9211 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9212 if (sb == NULL)
9213 return (ENOMEM);
9214
9215 mtx_lock(&sc->reg_lock);
9216 if (hw_off_limits(sc))
9217 rc = ENXIO;
9218 else
9219 t4_tp_get_cpl_stats(sc, &stats, 0);
9220 mtx_unlock(&sc->reg_lock);
9221 if (rc)
9222 goto done;
9223
9224 if (sc->chip_params->nchan > 2) {
9225 sbuf_printf(sb, " channel 0 channel 1"
9226 " channel 2 channel 3");
9227 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
9228 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
9229 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
9230 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
9231 } else {
9232 sbuf_printf(sb, " channel 0 channel 1");
9233 sbuf_printf(sb, "\nCPL requests: %10u %10u",
9234 stats.req[0], stats.req[1]);
9235 sbuf_printf(sb, "\nCPL responses: %10u %10u",
9236 stats.rsp[0], stats.rsp[1]);
9237 }
9238
9239 rc = sbuf_finish(sb);
9240done:
9241 sbuf_delete(sb);
9242 return (rc);
9243}
9244
9245static int
9246sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
9247{
9248 struct adapter *sc = arg1;
9249 struct sbuf *sb;
9250 int rc;
9251 struct tp_usm_stats stats;
9252
9253 rc = sysctl_wire_old_buffer(req, 0);
9254 if (rc != 0)
9255 return(rc);
9256
9257 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9258 if (sb == NULL)
9259 return (ENOMEM);
9260
9261 mtx_lock(&sc->reg_lock);
9262 if (hw_off_limits(sc))
9263 rc = ENXIO;
9264 else
9265 t4_get_usm_stats(sc, &stats, 1);
9266 mtx_unlock(&sc->reg_lock);
9267 if (rc == 0) {
9268 sbuf_printf(sb, "Frames: %u\n", stats.frames);
9269 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
9270 sbuf_printf(sb, "Drops: %u", stats.drops);
9271 rc = sbuf_finish(sb);
9272 }
9273 sbuf_delete(sb);
9274
9275 return (rc);
9276}
9277
9278static int
9279sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
9280{
9281 struct adapter *sc = arg1;
9282 struct sbuf *sb;
9283 int rc;
9284 struct tp_tid_stats stats;
9285
9286 rc = sysctl_wire_old_buffer(req, 0);
9287 if (rc != 0)
9288 return(rc);
9289
9290 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9291 if (sb == NULL)
9292 return (ENOMEM);
9293
9294 mtx_lock(&sc->reg_lock);
9295 if (hw_off_limits(sc))
9296 rc = ENXIO;
9297 else
9298 t4_tp_get_tid_stats(sc, &stats, 1);
9299 mtx_unlock(&sc->reg_lock);
9300 if (rc == 0) {
9301 sbuf_printf(sb, "Delete: %u\n", stats.del);
9302 sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
9303 sbuf_printf(sb, "Active: %u\n", stats.act);
9304 sbuf_printf(sb, "Passive: %u", stats.pas);
9305 rc = sbuf_finish(sb);
9306 }
9307 sbuf_delete(sb);
9308
9309 return (rc);
9310}
9311
9312static const char * const devlog_level_strings[] = {
9313 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
9314 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
9315 [FW_DEVLOG_LEVEL_ERR] = "ERR",
9316 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
9317 [FW_DEVLOG_LEVEL_INFO] = "INFO",
9318 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
9319};
9320
9321static const char * const devlog_facility_strings[] = {
9322 [FW_DEVLOG_FACILITY_CORE] = "CORE",
9323 [FW_DEVLOG_FACILITY_CF] = "CF",
9324 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
9325 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
9326 [FW_DEVLOG_FACILITY_RES] = "RES",
9327 [FW_DEVLOG_FACILITY_HW] = "HW",
9328 [FW_DEVLOG_FACILITY_FLR] = "FLR",
9329 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
9330 [FW_DEVLOG_FACILITY_PHY] = "PHY",
9331 [FW_DEVLOG_FACILITY_MAC] = "MAC",
9332 [FW_DEVLOG_FACILITY_PORT] = "PORT",
9333 [FW_DEVLOG_FACILITY_VI] = "VI",
9334 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
9335 [FW_DEVLOG_FACILITY_ACL] = "ACL",
9336 [FW_DEVLOG_FACILITY_TM] = "TM",
9337 [FW_DEVLOG_FACILITY_QFC] = "QFC",
9338 [FW_DEVLOG_FACILITY_DCB] = "DCB",
9339 [FW_DEVLOG_FACILITY_ETH] = "ETH",
9340 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
9341 [FW_DEVLOG_FACILITY_RI] = "RI",
9342 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
9343 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
9344 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
9345 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
9346 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
9347};
9348
9349static int
9350sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
9351{
9352 int i, j, rc, nentries, first = 0;
9353 struct devlog_params *dparams = &sc->params.devlog;
9354 struct fw_devlog_e *buf, *e;
9355 uint64_t ftstamp = UINT64_MAX;
9356
9357 if (dparams->addr == 0)
9358 return (ENXIO);
9359
9360 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
9361 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
9362 if (buf == NULL)
9363 return (ENOMEM);
9364
9365 mtx_lock(&sc->reg_lock);
9366 if (hw_off_limits(sc))
9367 rc = ENXIO;
9368 else
9369 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
9370 dparams->size);
9371 mtx_unlock(&sc->reg_lock);
9372 if (rc != 0)
9373 goto done;
9374
9375 nentries = dparams->size / sizeof(struct fw_devlog_e);
9376 for (i = 0; i < nentries; i++) {
9377 e = &buf[i];
9378
9379 if (e->timestamp == 0)
9380 break; /* end */
9381
9382 e->timestamp = be64toh(e->timestamp);
9383 e->seqno = be32toh(e->seqno);
9384 for (j = 0; j < 8; j++)
9385 e->params[j] = be32toh(e->params[j]);
9386
9387 if (e->timestamp < ftstamp) {
9388 ftstamp = e->timestamp;
9389 first = i;
9390 }
9391 }
9392
9393 if (buf[first].timestamp == 0)
9394 goto done; /* nothing in the log */
9395
9396 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
9397 "Seq#", "Tstamp", "Level", "Facility", "Message");
9398
9399 i = first;
9400 do {
9401 e = &buf[i];
9402 if (e->timestamp == 0)
9403 break; /* end */
9404
9405 sbuf_printf(sb, "%10d %15ju %8s %8s ",
9406 e->seqno, e->timestamp,
9407 (e->level < nitems(devlog_level_strings) ?
9408 devlog_level_strings[e->level] : "UNKNOWN"),
9409 (e->facility < nitems(devlog_facility_strings) ?
9410 devlog_facility_strings[e->facility] : "UNKNOWN"));
9411 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
9412 e->params[2], e->params[3], e->params[4],
9413 e->params[5], e->params[6], e->params[7]);
9414
9415 if (++i == nentries)
9416 i = 0;
9417 } while (i != first);
9418done:
9419 free(buf, M_CXGBE);
9420 return (rc);
9421}
9422
9423static int
9424sysctl_devlog(SYSCTL_HANDLER_ARGS)
9425{
9426 struct adapter *sc = arg1;
9427 int rc;
9428 struct sbuf *sb;
9429
9430 rc = sysctl_wire_old_buffer(req, 0);
9431 if (rc != 0)
9432 return (rc);
9433 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9434 if (sb == NULL)
9435 return (ENOMEM);
9436
9437 rc = sbuf_devlog(sc, sb, M_WAITOK);
9438 if (rc == 0)
9439 rc = sbuf_finish(sb);
9440 sbuf_delete(sb);
9441 return (rc);
9442}
9443
9444static void
9446{
9447 int rc;
9448 struct sbuf sb;
9449
9450 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9451 log(LOG_DEBUG, "%s: failed to generate devlog dump.\n",
9452 device_get_nameunit(sc->dev));
9453 return;
9454 }
9455 rc = sbuf_devlog(sc, &sb, M_NOWAIT);
9456 if (rc == 0) {
9457 rc = sbuf_finish(&sb);
9458 if (rc == 0) {
9459 log(LOG_DEBUG, "%s: device log follows.\n%s",
9460 device_get_nameunit(sc->dev), sbuf_data(&sb));
9461 }
9462 }
9463 sbuf_delete(&sb);
9464}
9465
9466static int
9467sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
9468{
9469 struct adapter *sc = arg1;
9470 struct sbuf *sb;
9471 int rc;
9472 struct tp_fcoe_stats stats[MAX_NCHAN];
9473 int i, nchan = sc->chip_params->nchan;
9474
9475 rc = sysctl_wire_old_buffer(req, 0);
9476 if (rc != 0)
9477 return (rc);
9478
9479 mtx_lock(&sc->reg_lock);
9480 if (hw_off_limits(sc))
9481 rc = ENXIO;
9482 else {
9483 for (i = 0; i < nchan; i++)
9484 t4_get_fcoe_stats(sc, i, &stats[i], 1);
9485 }
9486 mtx_unlock(&sc->reg_lock);
9487 if (rc != 0)
9488 return (rc);
9489
9490 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9491 if (sb == NULL)
9492 return (ENOMEM);
9493
9494 if (nchan > 2) {
9495 sbuf_printf(sb, " channel 0 channel 1"
9496 " channel 2 channel 3");
9497 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
9498 stats[0].octets_ddp, stats[1].octets_ddp,
9499 stats[2].octets_ddp, stats[3].octets_ddp);
9500 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
9501 stats[0].frames_ddp, stats[1].frames_ddp,
9502 stats[2].frames_ddp, stats[3].frames_ddp);
9503 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
9504 stats[0].frames_drop, stats[1].frames_drop,
9505 stats[2].frames_drop, stats[3].frames_drop);
9506 } else {
9507 sbuf_printf(sb, " channel 0 channel 1");
9508 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
9509 stats[0].octets_ddp, stats[1].octets_ddp);
9510 sbuf_printf(sb, "\nframesDDP: %16u %16u",
9511 stats[0].frames_ddp, stats[1].frames_ddp);
9512 sbuf_printf(sb, "\nframesDrop: %16u %16u",
9513 stats[0].frames_drop, stats[1].frames_drop);
9514 }
9515
9516 rc = sbuf_finish(sb);
9517 sbuf_delete(sb);
9518
9519 return (rc);
9520}
9521
9522static int
9523sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
9524{
9525 struct adapter *sc = arg1;
9526 struct sbuf *sb;
9527 int rc, i;
9528 unsigned int map, kbps, ipg, mode;
9529 unsigned int pace_tab[NTX_SCHED];
9530
9531 rc = sysctl_wire_old_buffer(req, 0);
9532 if (rc != 0)
9533 return (rc);
9534
9535 sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
9536 if (sb == NULL)
9537 return (ENOMEM);
9538
9539 mtx_lock(&sc->reg_lock);
9540 if (hw_off_limits(sc)) {
9541 rc = ENXIO;
9542 goto done;
9543 }
9544
9547 t4_read_pace_tbl(sc, pace_tab);
9548
9549 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
9550 "Class IPG (0.1 ns) Flow IPG (us)");
9551
9552 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
9553 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
9554 sbuf_printf(sb, "\n %u %-5s %u ", i,
9555 (mode & (1 << i)) ? "flow" : "class", map & 3);
9556 if (kbps)
9557 sbuf_printf(sb, "%9u ", kbps);
9558 else
9559 sbuf_printf(sb, " disabled ");
9560
9561 if (ipg)
9562 sbuf_printf(sb, "%13u ", ipg);
9563 else
9564 sbuf_printf(sb, " disabled ");
9565
9566 if (pace_tab[i])
9567 sbuf_printf(sb, "%10u", pace_tab[i]);
9568 else
9569 sbuf_printf(sb, " disabled");
9570 }
9571 rc = sbuf_finish(sb);
9572done:
9573 mtx_unlock(&sc->reg_lock);
9574 sbuf_delete(sb);
9575 return (rc);
9576}
9577
9578static int
9579sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
9580{
9581 struct adapter *sc = arg1;
9582 struct sbuf *sb;
9583 int rc, i, j;
9584 uint64_t *p0, *p1;
9585 struct lb_port_stats s[2];
9586 static const char *stat_name[] = {
9587 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
9588 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
9589 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
9590 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
9591 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
9592 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
9593 "BG2FramesTrunc:", "BG3FramesTrunc:"
9594 };
9595
9596 rc = sysctl_wire_old_buffer(req, 0);
9597 if (rc != 0)
9598 return (rc);
9599
9600 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9601 if (sb == NULL)
9602 return (ENOMEM);
9603
9604 memset(s, 0, sizeof(s));
9605
9606 for (i = 0; i < sc->chip_params->nchan; i += 2) {
9607 mtx_lock(&sc->reg_lock);
9608 if (hw_off_limits(sc))
9609 rc = ENXIO;
9610 else {
9611 t4_get_lb_stats(sc, i, &s[0]);
9612 t4_get_lb_stats(sc, i + 1, &s[1]);
9613 }
9614 mtx_unlock(&sc->reg_lock);
9615 if (rc != 0)
9616 break;
9617
9618 p0 = &s[0].octets;
9619 p1 = &s[1].octets;
9620 sbuf_printf(sb, "%s Loopback %u"
9621 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
9622
9623 for (j = 0; j < nitems(stat_name); j++)
9624 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
9625 *p0++, *p1++);
9626 }
9627
9628 rc = sbuf_finish(sb);
9629 sbuf_delete(sb);
9630
9631 return (rc);
9632}
9633
9634static int
9635sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
9636{
9637 int rc = 0;
9638 struct port_info *pi = arg1;
9639 struct link_config *lc = &pi->link_cfg;
9640 struct sbuf *sb;
9641
9642 rc = sysctl_wire_old_buffer(req, 0);
9643 if (rc != 0)
9644 return(rc);
9645 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
9646 if (sb == NULL)
9647 return (ENOMEM);
9648
9649 if (lc->link_ok || lc->link_down_rc == 255)
9650 sbuf_printf(sb, "n/a");
9651 else
9652 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
9653
9654 rc = sbuf_finish(sb);
9655 sbuf_delete(sb);
9656
9657 return (rc);
9658}
9659
9660struct mem_desc {
9661 unsigned int base;
9662 unsigned int limit;
9663 unsigned int idx;
9664};
9665
9666static int
9667mem_desc_cmp(const void *a, const void *b)
9668{
9669 return ((const struct mem_desc *)a)->base -
9670 ((const struct mem_desc *)b)->base;
9671}
9672
9673static void
9674mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
9675 unsigned int to)
9676{
9677 unsigned int size;
9678
9679 if (from == to)
9680 return;
9681
9682 size = to - from + 1;
9683 if (size == 0)
9684 return;
9685
9686 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
9687 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
9688}
9689
9690static int
9691sysctl_meminfo(SYSCTL_HANDLER_ARGS)
9692{
9693 struct adapter *sc = arg1;
9694 struct sbuf *sb;
9695 int rc, i, n;
9696 uint32_t lo, hi, used, alloc;
9697 static const char *memory[] = {
9698 "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
9699 };
9700 static const char *region[] = {
9701 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
9702 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
9703 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
9704 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
9705 "RQUDP region:", "PBL region:", "TXPBL region:",
9706 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
9707 "On-chip queues:", "TLS keys:",
9708 };
9709 struct mem_desc avail[4];
9710 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
9711 struct mem_desc *md = mem;
9712
9713 rc = sysctl_wire_old_buffer(req, 0);
9714 if (rc != 0)
9715 return (rc);
9716
9717 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9718 if (sb == NULL)
9719 return (ENOMEM);
9720
9721 for (i = 0; i < nitems(mem); i++) {
9722 mem[i].limit = 0;
9723 mem[i].idx = i;
9724 }
9725
9726 mtx_lock(&sc->reg_lock);
9727 if (hw_off_limits(sc)) {
9728 rc = ENXIO;
9729 goto done;
9730 }
9731
9732 /* Find and sort the populated memory ranges */
9733 i = 0;
9735 if (lo & F_EDRAM0_ENABLE) {
9736 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
9737 avail[i].base = G_EDRAM0_BASE(hi) << 20;
9738 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
9739 avail[i].idx = 0;
9740 i++;
9741 }
9742 if (lo & F_EDRAM1_ENABLE) {
9743 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
9744 avail[i].base = G_EDRAM1_BASE(hi) << 20;
9745 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
9746 avail[i].idx = 1;
9747 i++;
9748 }
9749 if (lo & F_EXT_MEM_ENABLE) {
9751 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
9752 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
9753 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
9754 i++;
9755 }
9756 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
9758 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9759 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9760 avail[i].idx = 4;
9761 i++;
9762 }
9763 if (is_t6(sc) && lo & F_HMA_MUX) {
9765 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9766 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9767 avail[i].idx = 5;
9768 i++;
9769 }
9770 MPASS(i <= nitems(avail));
9771 if (!i) /* no memory available */
9772 goto done;
9773 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
9774
9775 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
9776 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
9777 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
9778 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9779 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
9780 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
9784
9785 /* the next few have explicit upper bounds */
9787 md->limit = md->base - 1 +
9790 md++;
9791
9793 md->limit = md->base - 1 +
9796 md++;
9797
9798 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
9799 if (chip_id(sc) <= CHELSIO_T5)
9801 else
9803 md->limit = 0;
9804 } else {
9805 md->base = 0;
9806 md->idx = nitems(region); /* hide it */
9807 }
9808 md++;
9809
9810#define ulp_region(reg) \
9811 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
9812 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
9813
9814 ulp_region(RX_ISCSI);
9815 ulp_region(RX_TDDP);
9816 ulp_region(TX_TPT);
9817 ulp_region(RX_STAG);
9818 ulp_region(RX_RQ);
9819 ulp_region(RX_RQUDP);
9820 ulp_region(RX_PBL);
9821 ulp_region(TX_PBL);
9822#undef ulp_region
9823
9824 md->base = 0;
9825 if (is_t4(sc))
9826 md->idx = nitems(region);
9827 else {
9828 uint32_t size = 0;
9829 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
9830 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
9831
9832 if (is_t5(sc)) {
9833 if (sge_ctrl & F_VFIFO_ENABLE)
9834 size = fifo_size << 2;
9835 } else
9836 size = G_T6_DBVFIFO_SIZE(fifo_size) << 6;
9837
9838 if (size) {
9840 md->limit = md->base + size - 1;
9841 } else
9842 md->idx = nitems(region);
9843 }
9844 md++;
9845
9847 md->limit = 0;
9848 md++;
9850 md->limit = 0;
9851 md++;
9852
9853 md->base = sc->vres.ocq.start;
9854 if (sc->vres.ocq.size)
9855 md->limit = md->base + sc->vres.ocq.size - 1;
9856 else
9857 md->idx = nitems(region); /* hide it */
9858 md++;
9859
9860 md->base = sc->vres.key.start;
9861 if (sc->vres.key.size)
9862 md->limit = md->base + sc->vres.key.size - 1;
9863 else
9864 md->idx = nitems(region); /* hide it */
9865 md++;
9866
9867 /* add any address-space holes, there can be up to 3 */
9868 for (n = 0; n < i - 1; n++)
9869 if (avail[n].limit < avail[n + 1].base)
9870 (md++)->base = avail[n].limit;
9871 if (avail[n].limit)
9872 (md++)->base = avail[n].limit;
9873
9874 n = md - mem;
9875 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
9876
9877 for (lo = 0; lo < i; lo++)
9878 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
9879 avail[lo].limit - 1);
9880
9881 sbuf_printf(sb, "\n");
9882 for (i = 0; i < n; i++) {
9883 if (mem[i].idx >= nitems(region))
9884 continue; /* skip holes */
9885 if (!mem[i].limit)
9886 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
9887 mem_region_show(sb, region[mem[i].idx], mem[i].base,
9888 mem[i].limit);
9889 }
9890
9891 sbuf_printf(sb, "\n");
9893 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
9894 mem_region_show(sb, "uP RAM:", lo, hi);
9895
9897 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
9898 mem_region_show(sb, "uP Extmem2:", lo, hi);
9899
9901 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
9902 G_PMRXMAXPAGE(lo),
9904 (lo & F_PMRXNUMCHN) ? 2 : 1);
9905
9908 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
9909 G_PMTXMAXPAGE(lo),
9910 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
9911 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
9912 sbuf_printf(sb, "%u p-structs\n",
9914
9915 for (i = 0; i < 4; i++) {
9916 if (chip_id(sc) > CHELSIO_T5)
9917 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
9918 else
9919 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
9920 if (is_t5(sc)) {
9921 used = G_T5_USED(lo);
9922 alloc = G_T5_ALLOC(lo);
9923 } else {
9924 used = G_USED(lo);
9925 alloc = G_ALLOC(lo);
9926 }
9927 /* For T6 these are MAC buffer groups */
9928 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
9929 i, used, alloc);
9930 }
9931 for (i = 0; i < sc->chip_params->nchan; i++) {
9932 if (chip_id(sc) > CHELSIO_T5)
9933 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
9934 else
9935 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
9936 if (is_t5(sc)) {
9937 used = G_T5_USED(lo);
9938 alloc = G_T5_ALLOC(lo);
9939 } else {
9940 used = G_USED(lo);
9941 alloc = G_ALLOC(lo);
9942 }
9943 /* For T6 these are MAC buffer groups */
9944 sbuf_printf(sb,
9945 "\nLoopback %d using %u pages out of %u allocated",
9946 i, used, alloc);
9947 }
9948done:
9949 mtx_unlock(&sc->reg_lock);
9950 if (rc == 0)
9951 rc = sbuf_finish(sb);
9952 sbuf_delete(sb);
9953 return (rc);
9954}
9955
9956static inline void
9957tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
9958{
9959 *mask = x | y;
9960 y = htobe64(y);
9961 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
9962}
9963
9964static int
9965sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
9966{
9967 struct adapter *sc = arg1;
9968 struct sbuf *sb;
9969 int rc, i;
9970
9971 MPASS(chip_id(sc) <= CHELSIO_T5);
9972
9973 rc = sysctl_wire_old_buffer(req, 0);
9974 if (rc != 0)
9975 return (rc);
9976
9977 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9978 if (sb == NULL)
9979 return (ENOMEM);
9980
9981 sbuf_printf(sb,
9982 "Idx Ethernet address Mask Vld Ports PF"
9983 " VF Replication P0 P1 P2 P3 ML");
9984 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
9985 uint64_t tcamx, tcamy, mask;
9986 uint32_t cls_lo, cls_hi;
9987 uint8_t addr[ETHER_ADDR_LEN];
9988
9989 mtx_lock(&sc->reg_lock);
9990 if (hw_off_limits(sc))
9991 rc = ENXIO;
9992 else {
9993 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
9994 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
9995 }
9996 mtx_unlock(&sc->reg_lock);
9997 if (rc != 0)
9998 break;
9999 if (tcamx & tcamy)
10000 continue;
10001 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10002 mtx_lock(&sc->reg_lock);
10003 if (hw_off_limits(sc))
10004 rc = ENXIO;
10005 else {
10006 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10007 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10008 }
10009 mtx_unlock(&sc->reg_lock);
10010 if (rc != 0)
10011 break;
10012 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
10013 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
10014 addr[3], addr[4], addr[5], (uintmax_t)mask,
10015 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
10016 G_PORTMAP(cls_hi), G_PF(cls_lo),
10017 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
10018
10019 if (cls_lo & F_REPLICATE) {
10020 struct fw_ldst_cmd ldst_cmd;
10021
10022 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10023 ldst_cmd.op_to_addrspace =
10024 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10027 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10028 ldst_cmd.u.mps.rplc.fid_idx =
10031
10032 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10033 "t4mps");
10034 if (rc)
10035 break;
10036 if (hw_off_limits(sc))
10037 rc = ENXIO;
10038 else
10039 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10040 sizeof(ldst_cmd), &ldst_cmd);
10041 end_synchronized_op(sc, 0);
10042 if (rc != 0)
10043 break;
10044 else {
10045 sbuf_printf(sb, " %08x %08x %08x %08x",
10046 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10047 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10048 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10049 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10050 }
10051 } else
10052 sbuf_printf(sb, "%36s", "");
10053
10054 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
10055 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
10056 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
10057 }
10058
10059 if (rc)
10060 (void) sbuf_finish(sb);
10061 else
10062 rc = sbuf_finish(sb);
10063 sbuf_delete(sb);
10064
10065 return (rc);
10066}
10067
10068static int
10069sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
10070{
10071 struct adapter *sc = arg1;
10072 struct sbuf *sb;
10073 int rc, i;
10074
10075 MPASS(chip_id(sc) > CHELSIO_T5);
10076
10077 rc = sysctl_wire_old_buffer(req, 0);
10078 if (rc != 0)
10079 return (rc);
10080
10081 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10082 if (sb == NULL)
10083 return (ENOMEM);
10084
10085 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
10086 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
10087 " Replication"
10088 " P0 P1 P2 P3 ML\n");
10089
10090 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
10091 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
10092 uint16_t ivlan;
10093 uint64_t tcamx, tcamy, val, mask;
10094 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
10095 uint8_t addr[ETHER_ADDR_LEN];
10096
10097 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
10098 if (i < 256)
10099 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
10100 else
10101 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
10102 mtx_lock(&sc->reg_lock);
10103 if (hw_off_limits(sc))
10104 rc = ENXIO;
10105 else {
10108 tcamy = G_DMACH(val) << 32;
10111 }
10112 mtx_unlock(&sc->reg_lock);
10113 if (rc != 0)
10114 break;
10115
10116 lookup_type = G_DATALKPTYPE(data2);
10117 port_num = G_DATAPORTNUM(data2);
10118 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10119 /* Inner header VNI */
10120 vniy = ((data2 & F_DATAVIDH2) << 23) |
10121 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10122 dip_hit = data2 & F_DATADIPHIT;
10123 vlan_vld = 0;
10124 } else {
10125 vniy = 0;
10126 dip_hit = 0;
10127 vlan_vld = data2 & F_DATAVIDH2;
10128 ivlan = G_VIDL(val);
10129 }
10130
10131 ctl |= V_CTLXYBITSEL(1);
10132 mtx_lock(&sc->reg_lock);
10133 if (hw_off_limits(sc))
10134 rc = ENXIO;
10135 else {
10138 tcamx = G_DMACH(val) << 32;
10141 }
10142 mtx_unlock(&sc->reg_lock);
10143 if (rc != 0)
10144 break;
10145
10146 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10147 /* Inner header VNI mask */
10148 vnix = ((data2 & F_DATAVIDH2) << 23) |
10149 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10150 } else
10151 vnix = 0;
10152
10153 if (tcamx & tcamy)
10154 continue;
10155 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10156
10157 mtx_lock(&sc->reg_lock);
10158 if (hw_off_limits(sc))
10159 rc = ENXIO;
10160 else {
10161 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10162 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10163 }
10164 mtx_unlock(&sc->reg_lock);
10165 if (rc != 0)
10166 break;
10167
10168 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10169 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10170 "%012jx %06x %06x - - %3c"
10171 " I %4x %3c %#x%4u%4d", i, addr[0],
10172 addr[1], addr[2], addr[3], addr[4], addr[5],
10173 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
10174 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10175 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10176 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10177 } else {
10178 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10179 "%012jx - - ", i, addr[0], addr[1],
10180 addr[2], addr[3], addr[4], addr[5],
10181 (uintmax_t)mask);
10182
10183 if (vlan_vld)
10184 sbuf_printf(sb, "%4u Y ", ivlan);
10185 else
10186 sbuf_printf(sb, " - N ");
10187
10188 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
10189 lookup_type ? 'I' : 'O', port_num,
10190 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10191 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10192 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10193 }
10194
10195
10196 if (cls_lo & F_T6_REPLICATE) {
10197 struct fw_ldst_cmd ldst_cmd;
10198
10199 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10200 ldst_cmd.op_to_addrspace =
10201 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10204 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10205 ldst_cmd.u.mps.rplc.fid_idx =
10208
10209 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10210 "t6mps");
10211 if (rc)
10212 break;
10213 if (hw_off_limits(sc))
10214 rc = ENXIO;
10215 else
10216 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10217 sizeof(ldst_cmd), &ldst_cmd);
10218 end_synchronized_op(sc, 0);
10219 if (rc != 0)
10220 break;
10221 else {
10222 sbuf_printf(sb, " %08x %08x %08x %08x"
10223 " %08x %08x %08x %08x",
10224 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
10225 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
10226 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
10227 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
10228 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10229 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10230 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10231 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10232 }
10233 } else
10234 sbuf_printf(sb, "%72s", "");
10235
10236 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
10237 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
10238 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
10239 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
10240 }
10241
10242 if (rc)
10243 (void) sbuf_finish(sb);
10244 else
10245 rc = sbuf_finish(sb);
10246 sbuf_delete(sb);
10247
10248 return (rc);
10249}
10250
10251static int
10252sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
10253{
10254 struct adapter *sc = arg1;
10255 struct sbuf *sb;
10256 int rc;
10257 uint16_t mtus[NMTUS];
10258
10259 rc = sysctl_wire_old_buffer(req, 0);
10260 if (rc != 0)
10261 return (rc);
10262
10263 mtx_lock(&sc->reg_lock);
10264 if (hw_off_limits(sc))
10265 rc = ENXIO;
10266 else
10267 t4_read_mtu_tbl(sc, mtus, NULL);
10268 mtx_unlock(&sc->reg_lock);
10269 if (rc != 0)
10270 return (rc);
10271
10272 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10273 if (sb == NULL)
10274 return (ENOMEM);
10275
10276 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
10277 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
10278 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
10279 mtus[14], mtus[15]);
10280
10281 rc = sbuf_finish(sb);
10282 sbuf_delete(sb);
10283
10284 return (rc);
10285}
10286
10287static int
10288sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
10289{
10290 struct adapter *sc = arg1;
10291 struct sbuf *sb;
10292 int rc, i;
10293 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
10294 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
10295 static const char *tx_stats[MAX_PM_NSTATS] = {
10296 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
10297 "Tx FIFO wait", NULL, "Tx latency"
10298 };
10299 static const char *rx_stats[MAX_PM_NSTATS] = {
10300 "Read:", "Write bypass:", "Write mem:", "Flush:",
10301 "Rx FIFO wait", NULL, "Rx latency"
10302 };
10303
10304 rc = sysctl_wire_old_buffer(req, 0);
10305 if (rc != 0)
10306 return (rc);
10307
10308 mtx_lock(&sc->reg_lock);
10309 if (hw_off_limits(sc))
10310 rc = ENXIO;
10311 else {
10312 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
10313 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
10314 }
10315 mtx_unlock(&sc->reg_lock);
10316 if (rc != 0)
10317 return (rc);
10318
10319 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10320 if (sb == NULL)
10321 return (ENOMEM);
10322
10323 sbuf_printf(sb, " Tx pcmds Tx bytes");
10324 for (i = 0; i < 4; i++) {
10325 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10326 tx_cyc[i]);
10327 }
10328
10329 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
10330 for (i = 0; i < 4; i++) {
10331 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10332 rx_cyc[i]);
10333 }
10334
10335 if (chip_id(sc) > CHELSIO_T5) {
10336 sbuf_printf(sb,
10337 "\n Total wait Total occupancy");
10338 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10339 tx_cyc[i]);
10340 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10341 rx_cyc[i]);
10342
10343 i += 2;
10344 MPASS(i < nitems(tx_stats));
10345
10346 sbuf_printf(sb,
10347 "\n Reads Total wait");
10348 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10349 tx_cyc[i]);
10350 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10351 rx_cyc[i]);
10352 }
10353
10354 rc = sbuf_finish(sb);
10355 sbuf_delete(sb);
10356
10357 return (rc);
10358}
10359
10360static int
10361sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
10362{
10363 struct adapter *sc = arg1;
10364 struct sbuf *sb;
10365 int rc;
10366 struct tp_rdma_stats stats;
10367
10368 rc = sysctl_wire_old_buffer(req, 0);
10369 if (rc != 0)
10370 return (rc);
10371
10372 mtx_lock(&sc->reg_lock);
10373 if (hw_off_limits(sc))
10374 rc = ENXIO;
10375 else
10376 t4_tp_get_rdma_stats(sc, &stats, 0);
10377 mtx_unlock(&sc->reg_lock);
10378 if (rc != 0)
10379 return (rc);
10380
10381 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10382 if (sb == NULL)
10383 return (ENOMEM);
10384
10385 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
10386 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
10387
10388 rc = sbuf_finish(sb);
10389 sbuf_delete(sb);
10390
10391 return (rc);
10392}
10393
10394static int
10395sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
10396{
10397 struct adapter *sc = arg1;
10398 struct sbuf *sb;
10399 int rc;
10400 struct tp_tcp_stats v4, v6;
10401
10402 rc = sysctl_wire_old_buffer(req, 0);
10403 if (rc != 0)
10404 return (rc);
10405
10406 mtx_lock(&sc->reg_lock);
10407 if (hw_off_limits(sc))
10408 rc = ENXIO;
10409 else
10410 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
10411 mtx_unlock(&sc->reg_lock);
10412 if (rc != 0)
10413 return (rc);
10414
10415 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10416 if (sb == NULL)
10417 return (ENOMEM);
10418
10419 sbuf_printf(sb,
10420 " IP IPv6\n");
10421 sbuf_printf(sb, "OutRsts: %20u %20u\n",
10422 v4.tcp_out_rsts, v6.tcp_out_rsts);
10423 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
10424 v4.tcp_in_segs, v6.tcp_in_segs);
10425 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
10426 v4.tcp_out_segs, v6.tcp_out_segs);
10427 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
10429
10430 rc = sbuf_finish(sb);
10431 sbuf_delete(sb);
10432
10433 return (rc);
10434}
10435
10436static int
10437sysctl_tids(SYSCTL_HANDLER_ARGS)
10438{
10439 struct adapter *sc = arg1;
10440 struct sbuf *sb;
10441 int rc;
10442 uint32_t x, y;
10443 struct tid_info *t = &sc->tids;
10444
10445 rc = sysctl_wire_old_buffer(req, 0);
10446 if (rc != 0)
10447 return (rc);
10448
10449 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10450 if (sb == NULL)
10451 return (ENOMEM);
10452
10453 if (t->natids) {
10454 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
10455 t->atids_in_use);
10456 }
10457
10458 if (t->nhpftids) {
10459 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
10461 }
10462
10463 if (t->ntids) {
10464 bool hashen = false;
10465
10466 mtx_lock(&sc->reg_lock);
10467 if (hw_off_limits(sc))
10468 rc = ENXIO;
10469 else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
10470 hashen = true;
10471 if (chip_id(sc) <= CHELSIO_T5) {
10472 x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
10473 y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
10474 } else {
10477 }
10478 }
10479 mtx_unlock(&sc->reg_lock);
10480 if (rc != 0)
10481 goto done;
10482
10483 sbuf_printf(sb, "TID range: ");
10484 if (hashen) {
10485 if (x)
10486 sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1);
10487 sbuf_printf(sb, "%u-%u", y, t->ntids - 1);
10488 } else {
10489 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
10490 t->ntids - 1);
10491 }
10492 sbuf_printf(sb, ", in use: %u\n",
10493 atomic_load_acq_int(&t->tids_in_use));
10494 }
10495
10496 if (t->nstids) {
10497 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
10498 t->stid_base + t->nstids - 1, t->stids_in_use);
10499 }
10500
10501 if (t->nftids) {
10502 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
10503 t->ftid_end, t->ftids_in_use);
10504 }
10505
10506 if (t->netids) {
10507 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
10508 t->etid_base + t->netids - 1, t->etids_in_use);
10509 }
10510
10511 mtx_lock(&sc->reg_lock);
10512 if (hw_off_limits(sc))
10513 rc = ENXIO;
10514 else {
10517 }
10518 mtx_unlock(&sc->reg_lock);
10519 if (rc != 0)
10520 goto done;
10521 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y);
10522done:
10523 if (rc == 0)
10524 rc = sbuf_finish(sb);
10525 else
10526 (void)sbuf_finish(sb);
10527 sbuf_delete(sb);
10528
10529 return (rc);
10530}
10531
10532static int
10533sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
10534{
10535 struct adapter *sc = arg1;
10536 struct sbuf *sb;
10537 int rc;
10538 struct tp_err_stats stats;
10539
10540 rc = sysctl_wire_old_buffer(req, 0);
10541 if (rc != 0)
10542 return (rc);
10543
10544 mtx_lock(&sc->reg_lock);
10545 if (hw_off_limits(sc))
10546 rc = ENXIO;
10547 else
10548 t4_tp_get_err_stats(sc, &stats, 0);
10549 mtx_unlock(&sc->reg_lock);
10550 if (rc != 0)
10551 return (rc);
10552
10553 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10554 if (sb == NULL)
10555 return (ENOMEM);
10556
10557 if (sc->chip_params->nchan > 2) {
10558 sbuf_printf(sb, " channel 0 channel 1"
10559 " channel 2 channel 3\n");
10560 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
10561 stats.mac_in_errs[0], stats.mac_in_errs[1],
10562 stats.mac_in_errs[2], stats.mac_in_errs[3]);
10563 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
10564 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
10565 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
10566 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
10567 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
10568 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
10569 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
10570 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
10571 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
10572 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
10573 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
10574 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
10575 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
10576 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
10577 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
10578 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
10579 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
10580 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
10581 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
10582 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
10583 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
10584 } else {
10585 sbuf_printf(sb, " channel 0 channel 1\n");
10586 sbuf_printf(sb, "macInErrs: %10u %10u\n",
10587 stats.mac_in_errs[0], stats.mac_in_errs[1]);
10588 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
10589 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
10590 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
10591 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
10592 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
10593 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
10594 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
10595 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
10596 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
10597 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
10598 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
10599 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
10600 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
10601 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
10602 }
10603
10604 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
10605 stats.ofld_no_neigh, stats.ofld_cong_defer);
10606
10607 rc = sbuf_finish(sb);
10608 sbuf_delete(sb);
10609
10610 return (rc);
10611}
10612
10613static int
10614sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
10615{
10616 struct adapter *sc = arg1;
10617 struct sbuf *sb;
10618 int rc;
10619 struct tp_tnl_stats stats;
10620
10621 rc = sysctl_wire_old_buffer(req, 0);
10622 if (rc != 0)
10623 return(rc);
10624
10625 mtx_lock(&sc->reg_lock);
10626 if (hw_off_limits(sc))
10627 rc = ENXIO;
10628 else
10629 t4_tp_get_tnl_stats(sc, &stats, 1);
10630 mtx_unlock(&sc->reg_lock);
10631 if (rc != 0)
10632 return (rc);
10633
10634 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10635 if (sb == NULL)
10636 return (ENOMEM);
10637
10638 if (sc->chip_params->nchan > 2) {
10639 sbuf_printf(sb, " channel 0 channel 1"
10640 " channel 2 channel 3\n");
10641 sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n",
10642 stats.out_pkt[0], stats.out_pkt[1],
10643 stats.out_pkt[2], stats.out_pkt[3]);
10644 sbuf_printf(sb, "InPkts: %10u %10u %10u %10u",
10645 stats.in_pkt[0], stats.in_pkt[1],
10646 stats.in_pkt[2], stats.in_pkt[3]);
10647 } else {
10648 sbuf_printf(sb, " channel 0 channel 1\n");
10649 sbuf_printf(sb, "OutPkts: %10u %10u\n",
10650 stats.out_pkt[0], stats.out_pkt[1]);
10651 sbuf_printf(sb, "InPkts: %10u %10u",
10652 stats.in_pkt[0], stats.in_pkt[1]);
10653 }
10654
10655 rc = sbuf_finish(sb);
10656 sbuf_delete(sb);
10657
10658 return (rc);
10659}
10660
10661static int
10662sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
10663{
10664 struct adapter *sc = arg1;
10665 struct tp_params *tpp = &sc->params.tp;
10666 u_int mask;
10667 int rc;
10668
10669 mask = tpp->la_mask >> 16;
10670 rc = sysctl_handle_int(oidp, &mask, 0, req);
10671 if (rc != 0 || req->newptr == NULL)
10672 return (rc);
10673 if (mask > 0xffff)
10674 return (EINVAL);
10675 mtx_lock(&sc->reg_lock);
10676 if (hw_off_limits(sc))
10677 rc = ENXIO;
10678 else {
10679 tpp->la_mask = mask << 16;
10680 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U,
10681 tpp->la_mask);
10682 }
10683 mtx_unlock(&sc->reg_lock);
10684
10685 return (rc);
10686}
10687
10688struct field_desc {
10689 const char *name;
10690 u_int start;
10691 u_int width;
10692};
10693
10694static void
10695field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
10696{
10697 char buf[32];
10698 int line_size = 0;
10699
10700 while (f->name) {
10701 uint64_t mask = (1ULL << f->width) - 1;
10702 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
10703 ((uintmax_t)v >> f->start) & mask);
10704
10705 if (line_size + len >= 79) {
10706 line_size = 8;
10707 sbuf_printf(sb, "\n ");
10708 }
10709 sbuf_printf(sb, "%s ", buf);
10710 line_size += len + 1;
10711 f++;
10712 }
10713 sbuf_printf(sb, "\n");
10714}
10715
10716static const struct field_desc tp_la0[] = {
10717 { "RcfOpCodeOut", 60, 4 },
10718 { "State", 56, 4 },
10719 { "WcfState", 52, 4 },
10720 { "RcfOpcSrcOut", 50, 2 },
10721 { "CRxError", 49, 1 },
10722 { "ERxError", 48, 1 },
10723 { "SanityFailed", 47, 1 },
10724 { "SpuriousMsg", 46, 1 },
10725 { "FlushInputMsg", 45, 1 },
10726 { "FlushInputCpl", 44, 1 },
10727 { "RssUpBit", 43, 1 },
10728 { "RssFilterHit", 42, 1 },
10729 { "Tid", 32, 10 },
10730 { "InitTcb", 31, 1 },
10731 { "LineNumber", 24, 7 },
10732 { "Emsg", 23, 1 },
10733 { "EdataOut", 22, 1 },
10734 { "Cmsg", 21, 1 },
10735 { "CdataOut", 20, 1 },
10736 { "EreadPdu", 19, 1 },
10737 { "CreadPdu", 18, 1 },
10738 { "TunnelPkt", 17, 1 },
10739 { "RcfPeerFin", 16, 1 },
10740 { "RcfReasonOut", 12, 4 },
10741 { "TxCchannel", 10, 2 },
10742 { "RcfTxChannel", 8, 2 },
10743 { "RxEchannel", 6, 2 },
10744 { "RcfRxChannel", 5, 1 },
10745 { "RcfDataOutSrdy", 4, 1 },
10746 { "RxDvld", 3, 1 },
10747 { "RxOoDvld", 2, 1 },
10748 { "RxCongestion", 1, 1 },
10749 { "TxCongestion", 0, 1 },
10750 { NULL }
10751};
10752
10753static const struct field_desc tp_la1[] = {
10754 { "CplCmdIn", 56, 8 },
10755 { "CplCmdOut", 48, 8 },
10756 { "ESynOut", 47, 1 },
10757 { "EAckOut", 46, 1 },
10758 { "EFinOut", 45, 1 },
10759 { "ERstOut", 44, 1 },
10760 { "SynIn", 43, 1 },
10761 { "AckIn", 42, 1 },
10762 { "FinIn", 41, 1 },
10763 { "RstIn", 40, 1 },
10764 { "DataIn", 39, 1 },
10765 { "DataInVld", 38, 1 },
10766 { "PadIn", 37, 1 },
10767 { "RxBufEmpty", 36, 1 },
10768 { "RxDdp", 35, 1 },
10769 { "RxFbCongestion", 34, 1 },
10770 { "TxFbCongestion", 33, 1 },
10771 { "TxPktSumSrdy", 32, 1 },
10772 { "RcfUlpType", 28, 4 },
10773 { "Eread", 27, 1 },
10774 { "Ebypass", 26, 1 },
10775 { "Esave", 25, 1 },
10776 { "Static0", 24, 1 },
10777 { "Cread", 23, 1 },
10778 { "Cbypass", 22, 1 },
10779 { "Csave", 21, 1 },
10780 { "CPktOut", 20, 1 },
10781 { "RxPagePoolFull", 18, 2 },
10782 { "RxLpbkPkt", 17, 1 },
10783 { "TxLpbkPkt", 16, 1 },
10784 { "RxVfValid", 15, 1 },
10785 { "SynLearned", 14, 1 },
10786 { "SetDelEntry", 13, 1 },
10787 { "SetInvEntry", 12, 1 },
10788 { "CpcmdDvld", 11, 1 },
10789 { "CpcmdSave", 10, 1 },
10790 { "RxPstructsFull", 8, 2 },
10791 { "EpcmdDvld", 7, 1 },
10792 { "EpcmdFlush", 6, 1 },
10793 { "EpcmdTrimPrefix", 5, 1 },
10794 { "EpcmdTrimPostfix", 4, 1 },
10795 { "ERssIp4Pkt", 3, 1 },
10796 { "ERssIp6Pkt", 2, 1 },
10797 { "ERssTcpUdpPkt", 1, 1 },
10798 { "ERssFceFipPkt", 0, 1 },
10799 { NULL }
10800};
10801
10802static const struct field_desc tp_la2[] = {
10803 { "CplCmdIn", 56, 8 },
10804 { "MpsVfVld", 55, 1 },
10805 { "MpsPf", 52, 3 },
10806 { "MpsVf", 44, 8 },
10807 { "SynIn", 43, 1 },
10808 { "AckIn", 42, 1 },
10809 { "FinIn", 41, 1 },
10810 { "RstIn", 40, 1 },
10811 { "DataIn", 39, 1 },
10812 { "DataInVld", 38, 1 },
10813 { "PadIn", 37, 1 },
10814 { "RxBufEmpty", 36, 1 },
10815 { "RxDdp", 35, 1 },
10816 { "RxFbCongestion", 34, 1 },
10817 { "TxFbCongestion", 33, 1 },
10818 { "TxPktSumSrdy", 32, 1 },
10819 { "RcfUlpType", 28, 4 },
10820 { "Eread", 27, 1 },
10821 { "Ebypass", 26, 1 },
10822 { "Esave", 25, 1 },
10823 { "Static0", 24, 1 },
10824 { "Cread", 23, 1 },
10825 { "Cbypass", 22, 1 },
10826 { "Csave", 21, 1 },
10827 { "CPktOut", 20, 1 },
10828 { "RxPagePoolFull", 18, 2 },
10829 { "RxLpbkPkt", 17, 1 },
10830 { "TxLpbkPkt", 16, 1 },
10831 { "RxVfValid", 15, 1 },
10832 { "SynLearned", 14, 1 },
10833 { "SetDelEntry", 13, 1 },
10834 { "SetInvEntry", 12, 1 },
10835 { "CpcmdDvld", 11, 1 },
10836 { "CpcmdSave", 10, 1 },
10837 { "RxPstructsFull", 8, 2 },
10838 { "EpcmdDvld", 7, 1 },
10839 { "EpcmdFlush", 6, 1 },
10840 { "EpcmdTrimPrefix", 5, 1 },
10841 { "EpcmdTrimPostfix", 4, 1 },
10842 { "ERssIp4Pkt", 3, 1 },
10843 { "ERssIp6Pkt", 2, 1 },
10844 { "ERssTcpUdpPkt", 1, 1 },
10845 { "ERssFceFipPkt", 0, 1 },
10846 { NULL }
10847};
10848
10849static void
10850tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
10851{
10852
10853 field_desc_show(sb, *p, tp_la0);
10854}
10855
10856static void
10857tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
10858{
10859
10860 if (idx)
10861 sbuf_printf(sb, "\n");
10862 field_desc_show(sb, p[0], tp_la0);
10863 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10864 field_desc_show(sb, p[1], tp_la0);
10865}
10866
10867static void
10868tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
10869{
10870
10871 if (idx)
10872 sbuf_printf(sb, "\n");
10873 field_desc_show(sb, p[0], tp_la0);
10874 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10875 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
10876}
10877
10878static int
10879sysctl_tp_la(SYSCTL_HANDLER_ARGS)
10880{
10881 struct adapter *sc = arg1;
10882 struct sbuf *sb;
10883 uint64_t *buf, *p;
10884 int rc;
10885 u_int i, inc;
10886 void (*show_func)(struct sbuf *, uint64_t *, int);
10887
10888 rc = sysctl_wire_old_buffer(req, 0);
10889 if (rc != 0)
10890 return (rc);
10891
10892 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10893 if (sb == NULL)
10894 return (ENOMEM);
10895
10896 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
10897
10898 mtx_lock(&sc->reg_lock);
10899 if (hw_off_limits(sc))
10900 rc = ENXIO;
10901 else {
10902 t4_tp_read_la(sc, buf, NULL);
10904 case 2:
10905 inc = 2;
10906 show_func = tp_la_show2;
10907 break;
10908 case 3:
10909 inc = 2;
10910 show_func = tp_la_show3;
10911 break;
10912 default:
10913 inc = 1;
10914 show_func = tp_la_show;
10915 }
10916 }
10917 mtx_unlock(&sc->reg_lock);
10918 if (rc != 0)
10919 goto done;
10920
10921 p = buf;
10922 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
10923 (*show_func)(sb, p, i);
10924 rc = sbuf_finish(sb);
10925done:
10926 sbuf_delete(sb);
10927 free(buf, M_CXGBE);
10928 return (rc);
10929}
10930
10931static int
10932sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
10933{
10934 struct adapter *sc = arg1;
10935 struct sbuf *sb;
10936 int rc;
10937 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
10938
10939 rc = sysctl_wire_old_buffer(req, 0);
10940 if (rc != 0)
10941 return (rc);
10942
10943 mtx_lock(&sc->reg_lock);
10944 if (hw_off_limits(sc))
10945 rc = ENXIO;
10946 else
10947 t4_get_chan_txrate(sc, nrate, orate);
10948 mtx_unlock(&sc->reg_lock);
10949 if (rc != 0)
10950 return (rc);
10951
10952 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10953 if (sb == NULL)
10954 return (ENOMEM);
10955
10956 if (sc->chip_params->nchan > 2) {
10957 sbuf_printf(sb, " channel 0 channel 1"
10958 " channel 2 channel 3\n");
10959 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
10960 nrate[0], nrate[1], nrate[2], nrate[3]);
10961 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
10962 orate[0], orate[1], orate[2], orate[3]);
10963 } else {
10964 sbuf_printf(sb, " channel 0 channel 1\n");
10965 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
10966 nrate[0], nrate[1]);
10967 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
10968 orate[0], orate[1]);
10969 }
10970
10971 rc = sbuf_finish(sb);
10972 sbuf_delete(sb);
10973
10974 return (rc);
10975}
10976
10977static int
10978sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
10979{
10980 struct adapter *sc = arg1;
10981 struct sbuf *sb;
10982 uint32_t *buf, *p;
10983 int rc, i;
10984
10985 rc = sysctl_wire_old_buffer(req, 0);
10986 if (rc != 0)
10987 return (rc);
10988
10989 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10990 if (sb == NULL)
10991 return (ENOMEM);
10992
10993 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
10994 M_ZERO | M_WAITOK);
10995
10996 mtx_lock(&sc->reg_lock);
10997 if (hw_off_limits(sc))
10998 rc = ENXIO;
10999 else
11000 t4_ulprx_read_la(sc, buf);
11001 mtx_unlock(&sc->reg_lock);
11002 if (rc != 0)
11003 goto done;
11004
11005 p = buf;
11006 sbuf_printf(sb, " Pcmd Type Message"
11007 " Data");
11008 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
11009 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
11010 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
11011 }
11012 rc = sbuf_finish(sb);
11013done:
11014 sbuf_delete(sb);
11015 free(buf, M_CXGBE);
11016 return (rc);
11017}
11018
11019static int
11020sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
11021{
11022 struct adapter *sc = arg1;
11023 struct sbuf *sb;
11024 int rc;
11025 uint32_t cfg, s1, s2;
11026
11027 MPASS(chip_id(sc) >= CHELSIO_T5);
11028
11029 rc = sysctl_wire_old_buffer(req, 0);
11030 if (rc != 0)
11031 return (rc);
11032
11033 mtx_lock(&sc->reg_lock);
11034 if (hw_off_limits(sc))
11035 rc = ENXIO;
11036 else {
11037 cfg = t4_read_reg(sc, A_SGE_STAT_CFG);
11038 s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL);
11039 s2 = t4_read_reg(sc, A_SGE_STAT_MATCH);
11040 }
11041 mtx_unlock(&sc->reg_lock);
11042 if (rc != 0)
11043 return (rc);
11044
11045 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11046 if (sb == NULL)
11047 return (ENOMEM);
11048
11049 if (G_STATSOURCE_T5(cfg) == 7) {
11050 int mode;
11051
11052 mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg);
11053 if (mode == 0)
11054 sbuf_printf(sb, "total %d, incomplete %d", s1, s2);
11055 else if (mode == 1)
11056 sbuf_printf(sb, "total %d, data overflow %d", s1, s2);
11057 else
11058 sbuf_printf(sb, "unknown mode %d", mode);
11059 }
11060 rc = sbuf_finish(sb);
11061 sbuf_delete(sb);
11062
11063 return (rc);
11064}
11065
11066static int
11067sysctl_cpus(SYSCTL_HANDLER_ARGS)
11068{
11069 struct adapter *sc = arg1;
11070 enum cpu_sets op = arg2;
11071 cpuset_t cpuset;
11072 struct sbuf *sb;
11073 int i, rc;
11074
11075 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
11076
11077 CPU_ZERO(&cpuset);
11078 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
11079 if (rc != 0)
11080 return (rc);
11081
11082 rc = sysctl_wire_old_buffer(req, 0);
11083 if (rc != 0)
11084 return (rc);
11085
11086 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11087 if (sb == NULL)
11088 return (ENOMEM);
11089
11090 CPU_FOREACH(i)
11091 sbuf_printf(sb, "%d ", i);
11092 rc = sbuf_finish(sb);
11093 sbuf_delete(sb);
11094
11095 return (rc);
11096}
11097
11098static int
11099sysctl_reset(SYSCTL_HANDLER_ARGS)
11100{
11101 struct adapter *sc = arg1;
11102 u_int val;
11103 int rc;
11104
11105 val = atomic_load_int(&sc->num_resets);
11106 rc = sysctl_handle_int(oidp, &val, 0, req);
11107 if (rc != 0 || req->newptr == NULL)
11108 return (rc);
11109
11110 if (val == 0) {
11111 /* Zero out the counter that tracks reset. */
11112 atomic_store_int(&sc->num_resets, 0);
11113 return (0);
11114 }
11115
11116 if (val != 1)
11117 return (EINVAL); /* 0 or 1 are the only legal values */
11118
11119 if (hw_off_limits(sc)) /* harmless race */
11120 return (EALREADY);
11121
11122 taskqueue_enqueue(reset_tq, &sc->reset_task);
11123 return (0);
11124}
11125
11126#ifdef TCP_OFFLOAD
11127static int
11128sysctl_tls(SYSCTL_HANDLER_ARGS)
11129{
11130 struct adapter *sc = arg1;
11131 int i, j, v, rc;
11132 struct vi_info *vi;
11133
11134 v = sc->tt.tls;
11135 rc = sysctl_handle_int(oidp, &v, 0, req);
11136 if (rc != 0 || req->newptr == NULL)
11137 return (rc);
11138
11139 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
11140 return (ENOTSUP);
11141
11142 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
11143 if (rc)
11144 return (rc);
11145 if (hw_off_limits(sc))
11146 rc = ENXIO;
11147 else {
11148 sc->tt.tls = !!v;
11149 for_each_port(sc, i) {
11150 for_each_vi(sc->port[i], j, vi) {
11151 if (vi->flags & VI_INIT_DONE)
11153 }
11154 }
11155 }
11156 end_synchronized_op(sc, 0);
11157
11158 return (rc);
11159
11160}
11161
11162static int
11163sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
11164{
11165 struct adapter *sc = arg1;
11166 int *old_ports, *new_ports;
11167 int i, new_count, rc;
11168
11169 if (req->newptr == NULL && req->oldptr == NULL)
11170 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
11171 sizeof(sc->tt.tls_rx_ports[0])));
11172
11173 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
11174 if (rc)
11175 return (rc);
11176
11177 if (hw_off_limits(sc)) {
11178 rc = ENXIO;
11179 goto done;
11180 }
11181
11182 if (sc->tt.num_tls_rx_ports == 0) {
11183 i = -1;
11184 rc = SYSCTL_OUT(req, &i, sizeof(i));
11185 } else
11186 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
11187 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
11188 if (rc == 0 && req->newptr != NULL) {
11189 new_count = req->newlen / sizeof(new_ports[0]);
11190 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
11191 M_WAITOK);
11192 rc = SYSCTL_IN(req, new_ports, new_count *
11193 sizeof(new_ports[0]));
11194 if (rc)
11195 goto err;
11196
11197 /* Allow setting to a single '-1' to clear the list. */
11198 if (new_count == 1 && new_ports[0] == -1) {
11199 ADAPTER_LOCK(sc);
11200 old_ports = sc->tt.tls_rx_ports;
11201 sc->tt.tls_rx_ports = NULL;
11202 sc->tt.num_tls_rx_ports = 0;
11203 ADAPTER_UNLOCK(sc);
11204 free(old_ports, M_CXGBE);
11205 } else {
11206 for (i = 0; i < new_count; i++) {
11207 if (new_ports[i] < 1 ||
11208 new_ports[i] > IPPORT_MAX) {
11209 rc = EINVAL;
11210 goto err;
11211 }
11212 }
11213
11214 ADAPTER_LOCK(sc);
11215 old_ports = sc->tt.tls_rx_ports;
11216 sc->tt.tls_rx_ports = new_ports;
11217 sc->tt.num_tls_rx_ports = new_count;
11218 ADAPTER_UNLOCK(sc);
11219 free(old_ports, M_CXGBE);
11220 new_ports = NULL;
11221 }
11222 err:
11223 free(new_ports, M_CXGBE);
11224 }
11225done:
11226 end_synchronized_op(sc, 0);
11227 return (rc);
11228}
11229
11230static int
11231sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS)
11232{
11233 struct adapter *sc = arg1;
11234 int v, rc;
11235
11236 v = sc->tt.tls_rx_timeout;
11237 rc = sysctl_handle_int(oidp, &v, 0, req);
11238 if (rc != 0 || req->newptr == NULL)
11239 return (rc);
11240
11241 if (v < 0)
11242 return (EINVAL);
11243
11244 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
11245 return (ENOTSUP);
11246
11247 sc->tt.tls_rx_timeout = v;
11248
11249 return (0);
11250
11251}
11252
11253static void
11254unit_conv(char *buf, size_t len, u_int val, u_int factor)
11255{
11256 u_int rem = val % factor;
11257
11258 if (rem == 0)
11259 snprintf(buf, len, "%u", val / factor);
11260 else {
11261 while (rem % 10 == 0)
11262 rem /= 10;
11263 snprintf(buf, len, "%u.%u", val / factor, rem);
11264 }
11265}
11266
11267static int
11268sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
11269{
11270 struct adapter *sc = arg1;
11271 char buf[16];
11272 u_int res, re;
11273 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11274
11275 mtx_lock(&sc->reg_lock);
11276 if (hw_off_limits(sc))
11277 res = (u_int)-1;
11278 else
11280 mtx_unlock(&sc->reg_lock);
11281 if (res == (u_int)-1)
11282 return (ENXIO);
11283
11284 switch (arg2) {
11285 case 0:
11286 /* timer_tick */
11287 re = G_TIMERRESOLUTION(res);
11288 break;
11289 case 1:
11290 /* TCP timestamp tick */
11292 break;
11293 case 2:
11294 /* DACK tick */
11296 break;
11297 default:
11298 return (EDOOFUS);
11299 }
11300
11301 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
11302
11303 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
11304}
11305
11306static int
11307sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
11308{
11309 struct adapter *sc = arg1;
11310 int rc;
11311 u_int dack_tmr, dack_re, v;
11312 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11313
11314 mtx_lock(&sc->reg_lock);
11315 if (hw_off_limits(sc))
11316 rc = ENXIO;
11317 else {
11318 rc = 0;
11321 dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER);
11322 }
11323 mtx_unlock(&sc->reg_lock);
11324 if (rc != 0)
11325 return (rc);
11326
11327 v = ((cclk_ps << dack_re) / 1000000) * dack_tmr;
11328
11329 return (sysctl_handle_int(oidp, &v, 0, req));
11330}
11331
11332static int
11333sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
11334{
11335 struct adapter *sc = arg1;
11336 int rc, reg = arg2;
11337 u_int tre;
11338 u_long tp_tick_us, v;
11339 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11340
11341 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
11342 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
11343 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
11344 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
11345
11346 mtx_lock(&sc->reg_lock);
11347 if (hw_off_limits(sc))
11348 rc = ENXIO;
11349 else {
11350 rc = 0;
11352 tp_tick_us = (cclk_ps << tre) / 1000000;
11353 if (reg == A_TP_INIT_SRTT)
11354 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
11355 else
11356 v = tp_tick_us * t4_read_reg(sc, reg);
11357 }
11358 mtx_unlock(&sc->reg_lock);
11359 if (rc != 0)
11360 return (rc);
11361 else
11362 return (sysctl_handle_long(oidp, &v, 0, req));
11363}
11364
11365/*
11366 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
11367 * passed to this function.
11368 */
11369static int
11370sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
11371{
11372 struct adapter *sc = arg1;
11373 int rc, idx = arg2;
11374 u_int v;
11375
11376 MPASS(idx >= 0 && idx <= 24);
11377
11378 mtx_lock(&sc->reg_lock);
11379 if (hw_off_limits(sc))
11380 rc = ENXIO;
11381 else {
11382 rc = 0;
11383 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
11384 }
11385 mtx_unlock(&sc->reg_lock);
11386 if (rc != 0)
11387 return (rc);
11388 else
11389 return (sysctl_handle_int(oidp, &v, 0, req));
11390}
11391
11392static int
11393sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
11394{
11395 struct adapter *sc = arg1;
11396 int rc, idx = arg2;
11397 u_int shift, v, r;
11398
11399 MPASS(idx >= 0 && idx < 16);
11400
11401 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
11402 shift = (idx & 3) << 3;
11403 mtx_lock(&sc->reg_lock);
11404 if (hw_off_limits(sc))
11405 rc = ENXIO;
11406 else {
11407 rc = 0;
11408 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
11409 }
11410 mtx_unlock(&sc->reg_lock);
11411 if (rc != 0)
11412 return (rc);
11413 else
11414 return (sysctl_handle_int(oidp, &v, 0, req));
11415}
11416
11417static int
11418sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
11419{
11420 struct vi_info *vi = arg1;
11421 struct adapter *sc = vi->adapter;
11422 int idx, rc, i;
11423 struct sge_ofld_rxq *ofld_rxq;
11424 uint8_t v;
11425
11426 idx = vi->ofld_tmr_idx;
11427
11428 rc = sysctl_handle_int(oidp, &idx, 0, req);
11429 if (rc != 0 || req->newptr == NULL)
11430 return (rc);
11431
11432 if (idx < 0 || idx >= SGE_NTIMERS)
11433 return (EINVAL);
11434
11436 "t4otmr");
11437 if (rc)
11438 return (rc);
11439
11440 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
11441 for_each_ofld_rxq(vi, i, ofld_rxq) {
11442#ifdef atomic_store_rel_8
11443 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
11444#else
11445 ofld_rxq->iq.intr_params = v;
11446#endif
11447 }
11448 vi->ofld_tmr_idx = idx;
11449
11451 return (0);
11452}
11453
11454static int
11455sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
11456{
11457 struct vi_info *vi = arg1;
11458 struct adapter *sc = vi->adapter;
11459 int idx, rc;
11460
11461 idx = vi->ofld_pktc_idx;
11462
11463 rc = sysctl_handle_int(oidp, &idx, 0, req);
11464 if (rc != 0 || req->newptr == NULL)
11465 return (rc);
11466
11467 if (idx < -1 || idx >= SGE_NCOUNTERS)
11468 return (EINVAL);
11469
11471 "t4opktc");
11472 if (rc)
11473 return (rc);
11474
11475 if (vi->flags & VI_INIT_DONE)
11476 rc = EBUSY; /* cannot be changed once the queues are created */
11477 else
11478 vi->ofld_pktc_idx = idx;
11479
11481 return (rc);
11482}
11483#endif
11484
11485static int
11486get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
11487{
11488 int rc;
11489
11490 if (cntxt->cid > M_CTXTQID)
11491 return (EINVAL);
11492
11493 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
11494 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
11495 return (EINVAL);
11496
11497 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
11498 if (rc)
11499 return (rc);
11500
11501 if (hw_off_limits(sc)) {
11502 rc = ENXIO;
11503 goto done;
11504 }
11505
11506 if (sc->flags & FW_OK) {
11507 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
11508 &cntxt->data[0]);
11509 if (rc == 0)
11510 goto done;
11511 }
11512
11513 /*
11514 * Read via firmware failed or wasn't even attempted. Read directly via
11515 * the backdoor.
11516 */
11517 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
11518done:
11519 end_synchronized_op(sc, 0);
11520 return (rc);
11521}
11522
11523static int
11524load_fw(struct adapter *sc, struct t4_data *fw)
11525{
11526 int rc;
11527 uint8_t *fw_data;
11528
11529 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
11530 if (rc)
11531 return (rc);
11532
11533 if (hw_off_limits(sc)) {
11534 rc = ENXIO;
11535 goto done;
11536 }
11537
11538 /*
11539 * The firmware, with the sole exception of the memory parity error
11540 * handler, runs from memory and not flash. It is almost always safe to
11541 * install a new firmware on a running system. Just set bit 1 in
11542 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
11543 */
11544 if (sc->flags & FULL_INIT_DONE &&
11545 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
11546 rc = EBUSY;
11547 goto done;
11548 }
11549
11550 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
11551
11552 rc = copyin(fw->data, fw_data, fw->len);
11553 if (rc == 0)
11554 rc = -t4_load_fw(sc, fw_data, fw->len);
11555
11556 free(fw_data, M_CXGBE);
11557done:
11558 end_synchronized_op(sc, 0);
11559 return (rc);
11560}
11561
11562static int
11563load_cfg(struct adapter *sc, struct t4_data *cfg)
11564{
11565 int rc;
11566 uint8_t *cfg_data = NULL;
11567
11568 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11569 if (rc)
11570 return (rc);
11571
11572 if (hw_off_limits(sc)) {
11573 rc = ENXIO;
11574 goto done;
11575 }
11576
11577 if (cfg->len == 0) {
11578 /* clear */
11579 rc = -t4_load_cfg(sc, NULL, 0);
11580 goto done;
11581 }
11582
11583 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
11584
11585 rc = copyin(cfg->data, cfg_data, cfg->len);
11586 if (rc == 0)
11587 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
11588
11589 free(cfg_data, M_CXGBE);
11590done:
11591 end_synchronized_op(sc, 0);
11592 return (rc);
11593}
11594
11595static int
11596load_boot(struct adapter *sc, struct t4_bootrom *br)
11597{
11598 int rc;
11599 uint8_t *br_data = NULL;
11600 u_int offset;
11601
11602 if (br->len > 1024 * 1024)
11603 return (EFBIG);
11604
11605 if (br->pf_offset == 0) {
11606 /* pfidx */
11607 if (br->pfidx_addr > 7)
11608 return (EINVAL);
11609 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
11611 } else if (br->pf_offset == 1) {
11612 /* offset */
11613 offset = G_OFFSET(br->pfidx_addr);
11614 } else {
11615 return (EINVAL);
11616 }
11617
11618 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
11619 if (rc)
11620 return (rc);
11621
11622 if (hw_off_limits(sc)) {
11623 rc = ENXIO;
11624 goto done;
11625 }
11626
11627 if (br->len == 0) {
11628 /* clear */
11629 rc = -t4_load_boot(sc, NULL, offset, 0);
11630 goto done;
11631 }
11632
11633 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
11634
11635 rc = copyin(br->data, br_data, br->len);
11636 if (rc == 0)
11637 rc = -t4_load_boot(sc, br_data, offset, br->len);
11638
11639 free(br_data, M_CXGBE);
11640done:
11641 end_synchronized_op(sc, 0);
11642 return (rc);
11643}
11644
11645static int
11646load_bootcfg(struct adapter *sc, struct t4_data *bc)
11647{
11648 int rc;
11649 uint8_t *bc_data = NULL;
11650
11651 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11652 if (rc)
11653 return (rc);
11654
11655 if (hw_off_limits(sc)) {
11656 rc = ENXIO;
11657 goto done;
11658 }
11659
11660 if (bc->len == 0) {
11661 /* clear */
11662 rc = -t4_load_bootcfg(sc, NULL, 0);
11663 goto done;
11664 }
11665
11666 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
11667
11668 rc = copyin(bc->data, bc_data, bc->len);
11669 if (rc == 0)
11670 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
11671
11672 free(bc_data, M_CXGBE);
11673done:
11674 end_synchronized_op(sc, 0);
11675 return (rc);
11676}
11677
11678static int
11679cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
11680{
11681 int rc;
11682 struct cudbg_init *cudbg;
11683 void *handle, *buf;
11684
11685 /* buf is large, don't block if no memory is available */
11686 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
11687 if (buf == NULL)
11688 return (ENOMEM);
11689
11690 handle = cudbg_alloc_handle();
11691 if (handle == NULL) {
11692 rc = ENOMEM;
11693 goto done;
11694 }
11695
11696 cudbg = cudbg_get_init(handle);
11697 cudbg->adap = sc;
11698 cudbg->print = (cudbg_print_cb)printf;
11699
11700#ifndef notyet
11701 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
11702 __func__, dump->wr_flash, dump->len, dump->data);
11703#endif
11704
11705 if (dump->wr_flash)
11706 cudbg->use_flash = 1;
11707 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
11708 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
11709
11710 rc = cudbg_collect(handle, buf, &dump->len);
11711 if (rc != 0)
11712 goto done;
11713
11714 rc = copyout(buf, dump->data, dump->len);
11715done:
11716 cudbg_free_handle(handle);
11717 free(buf, M_CXGBE);
11718 return (rc);
11719}
11720
11721static void
11723{
11724 struct offload_rule *r;
11725 int i;
11726
11727 if (op == NULL)
11728 return;
11729
11730 r = &op->rule[0];
11731 for (i = 0; i < op->nrules; i++, r++) {
11732 free(r->bpf_prog.bf_insns, M_CXGBE);
11733 }
11734 free(op->rule, M_CXGBE);
11735 free(op, M_CXGBE);
11736}
11737
11738static int
11740{
11741 int i, rc, len;
11742 struct t4_offload_policy *op, *old;
11743 struct bpf_program *bf;
11744 const struct offload_settings *s;
11745 struct offload_rule *r;
11746 void *u;
11747
11748 if (!is_offload(sc))
11749 return (ENODEV);
11750
11751 if (uop->nrules == 0) {
11752 /* Delete installed policies. */
11753 op = NULL;
11754 goto set_policy;
11755 } else if (uop->nrules > 256) { /* arbitrary */
11756 return (E2BIG);
11757 }
11758
11759 /* Copy userspace offload policy to kernel */
11760 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
11761 op->nrules = uop->nrules;
11762 len = op->nrules * sizeof(struct offload_rule);
11763 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11764 rc = copyin(uop->rule, op->rule, len);
11765 if (rc) {
11766 free(op->rule, M_CXGBE);
11767 free(op, M_CXGBE);
11768 return (rc);
11769 }
11770
11771 r = &op->rule[0];
11772 for (i = 0; i < op->nrules; i++, r++) {
11773
11774 /* Validate open_type */
11775 if (r->open_type != OPEN_TYPE_LISTEN &&
11776 r->open_type != OPEN_TYPE_ACTIVE &&
11777 r->open_type != OPEN_TYPE_PASSIVE &&
11778 r->open_type != OPEN_TYPE_DONTCARE) {
11779error:
11780 /*
11781 * Rules 0 to i have malloc'd filters that need to be
11782 * freed. Rules i+1 to nrules have userspace pointers
11783 * and should be left alone.
11784 */
11785 op->nrules = i;
11787 return (rc);
11788 }
11789
11790 /* Validate settings */
11791 s = &r->settings;
11792 if ((s->offload != 0 && s->offload != 1) ||
11793 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
11794 s->sched_class < -1 ||
11795 s->sched_class >= sc->params.nsched_cls) {
11796 rc = EINVAL;
11797 goto error;
11798 }
11799
11800 bf = &r->bpf_prog;
11801 u = bf->bf_insns; /* userspace ptr */
11802 bf->bf_insns = NULL;
11803 if (bf->bf_len == 0) {
11804 /* legal, matches everything */
11805 continue;
11806 }
11807 len = bf->bf_len * sizeof(*bf->bf_insns);
11808 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11809 rc = copyin(u, bf->bf_insns, len);
11810 if (rc != 0)
11811 goto error;
11812
11813 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
11814 rc = EINVAL;
11815 goto error;
11816 }
11817 }
11818set_policy:
11819 rw_wlock(&sc->policy_lock);
11820 old = sc->policy;
11821 sc->policy = op;
11822 rw_wunlock(&sc->policy_lock);
11824
11825 return (0);
11826}
11827
11828#define MAX_READ_BUF_SIZE (128 * 1024)
11829static int
11830read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
11831{
11832 uint32_t addr, remaining, n;
11833 uint32_t *buf;
11834 int rc;
11835 uint8_t *dst;
11836
11837 mtx_lock(&sc->reg_lock);
11838 if (hw_off_limits(sc))
11839 rc = ENXIO;
11840 else
11841 rc = validate_mem_range(sc, mr->addr, mr->len);
11842 mtx_unlock(&sc->reg_lock);
11843 if (rc != 0)
11844 return (rc);
11845
11846 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
11847 addr = mr->addr;
11848 remaining = mr->len;
11849 dst = (void *)mr->data;
11850
11851 while (remaining) {
11852 n = min(remaining, MAX_READ_BUF_SIZE);
11853 mtx_lock(&sc->reg_lock);
11854 if (hw_off_limits(sc))
11855 rc = ENXIO;
11856 else
11857 read_via_memwin(sc, 2, addr, buf, n);
11858 mtx_unlock(&sc->reg_lock);
11859 if (rc != 0)
11860 break;
11861
11862 rc = copyout(buf, dst, n);
11863 if (rc != 0)
11864 break;
11865
11866 dst += n;
11867 remaining -= n;
11868 addr += n;
11869 }
11870
11871 free(buf, M_CXGBE);
11872 return (rc);
11873}
11874#undef MAX_READ_BUF_SIZE
11875
11876static int
11877read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
11878{
11879 int rc;
11880
11881 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
11882 return (EINVAL);
11883
11884 if (i2cd->len > sizeof(i2cd->data))
11885 return (EFBIG);
11886
11887 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
11888 if (rc)
11889 return (rc);
11890 if (hw_off_limits(sc))
11891 rc = ENXIO;
11892 else
11893 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
11894 i2cd->offset, i2cd->len, &i2cd->data[0]);
11895 end_synchronized_op(sc, 0);
11896
11897 return (rc);
11898}
11899
11900static int
11901clear_stats(struct adapter *sc, u_int port_id)
11902{
11903 int i, v, chan_map;
11904 struct port_info *pi;
11905 struct vi_info *vi;
11906 struct sge_rxq *rxq;
11907 struct sge_txq *txq;
11908 struct sge_wrq *wrq;
11909#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
11910 struct sge_ofld_txq *ofld_txq;
11911#endif
11912#ifdef TCP_OFFLOAD
11913 struct sge_ofld_rxq *ofld_rxq;
11914#endif
11915
11916 if (port_id >= sc->params.nports)
11917 return (EINVAL);
11918 pi = sc->port[port_id];
11919 if (pi == NULL)
11920 return (EIO);
11921
11922 mtx_lock(&sc->reg_lock);
11923 if (!hw_off_limits(sc)) {
11924 /* MAC stats */
11925 t4_clr_port_stats(sc, pi->tx_chan);
11926 if (is_t6(sc)) {
11927 if (pi->fcs_reg != -1)
11928 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
11929 else
11930 pi->stats.rx_fcs_err = 0;
11931 }
11932 for_each_vi(pi, v, vi) {
11933 if (vi->flags & VI_INIT_DONE)
11934 t4_clr_vi_stats(sc, vi->vin);
11935 }
11936 chan_map = pi->rx_e_chan_map;
11937 v = 0; /* reuse */
11938 while (chan_map) {
11939 i = ffs(chan_map) - 1;
11942 chan_map &= ~(1 << i);
11943 }
11944 }
11945 mtx_unlock(&sc->reg_lock);
11946 pi->tx_parse_error = 0;
11947 pi->tnl_cong_drops = 0;
11948
11949 /*
11950 * Since this command accepts a port, clear stats for
11951 * all VIs on this port.
11952 */
11953 for_each_vi(pi, v, vi) {
11954 if (vi->flags & VI_INIT_DONE) {
11955
11956 for_each_rxq(vi, i, rxq) {
11957#if defined(INET) || defined(INET6)
11958 rxq->lro.lro_queued = 0;
11959 rxq->lro.lro_flushed = 0;
11960#endif
11961 rxq->rxcsum = 0;
11962 rxq->vlan_extraction = 0;
11963 rxq->vxlan_rxcsum = 0;
11964
11965 rxq->fl.cl_allocated = 0;
11966 rxq->fl.cl_recycled = 0;
11967 rxq->fl.cl_fast_recycled = 0;
11968 }
11969
11970 for_each_txq(vi, i, txq) {
11971 txq->txcsum = 0;
11972 txq->tso_wrs = 0;
11973 txq->vlan_insertion = 0;
11974 txq->imm_wrs = 0;
11975 txq->sgl_wrs = 0;
11976 txq->txpkt_wrs = 0;
11977 txq->txpkts0_wrs = 0;
11978 txq->txpkts1_wrs = 0;
11979 txq->txpkts0_pkts = 0;
11980 txq->txpkts1_pkts = 0;
11981 txq->txpkts_flush = 0;
11982 txq->raw_wrs = 0;
11983 txq->vxlan_tso_wrs = 0;
11984 txq->vxlan_txcsum = 0;
11985 txq->kern_tls_records = 0;
11986 txq->kern_tls_short = 0;
11987 txq->kern_tls_partial = 0;
11988 txq->kern_tls_full = 0;
11989 txq->kern_tls_octets = 0;
11990 txq->kern_tls_waste = 0;
11991 txq->kern_tls_options = 0;
11992 txq->kern_tls_header = 0;
11993 txq->kern_tls_fin = 0;
11994 txq->kern_tls_fin_short = 0;
11995 txq->kern_tls_cbc = 0;
11996 txq->kern_tls_gcm = 0;
11997 mp_ring_reset_stats(txq->r);
11998 }
11999
12000#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12001 for_each_ofld_txq(vi, i, ofld_txq) {
12002 ofld_txq->wrq.tx_wrs_direct = 0;
12003 ofld_txq->wrq.tx_wrs_copied = 0;
12004 counter_u64_zero(ofld_txq->tx_iscsi_pdus);
12005 counter_u64_zero(ofld_txq->tx_iscsi_octets);
12006 counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs);
12007 counter_u64_zero(ofld_txq->tx_toe_tls_records);
12008 counter_u64_zero(ofld_txq->tx_toe_tls_octets);
12009 }
12010#endif
12011#ifdef TCP_OFFLOAD
12012 for_each_ofld_rxq(vi, i, ofld_rxq) {
12013 ofld_rxq->fl.cl_allocated = 0;
12014 ofld_rxq->fl.cl_recycled = 0;
12015 ofld_rxq->fl.cl_fast_recycled = 0;
12016 counter_u64_zero(
12017 ofld_rxq->rx_iscsi_ddp_setup_ok);
12018 counter_u64_zero(
12019 ofld_rxq->rx_iscsi_ddp_setup_error);
12020 ofld_rxq->rx_iscsi_ddp_pdus = 0;
12021 ofld_rxq->rx_iscsi_ddp_octets = 0;
12022 ofld_rxq->rx_iscsi_fl_pdus = 0;
12023 ofld_rxq->rx_iscsi_fl_octets = 0;
12024 ofld_rxq->rx_toe_tls_records = 0;
12025 ofld_rxq->rx_toe_tls_octets = 0;
12026 }
12027#endif
12028
12029 if (IS_MAIN_VI(vi)) {
12030 wrq = &sc->sge.ctrlq[pi->port_id];
12031 wrq->tx_wrs_direct = 0;
12032 wrq->tx_wrs_copied = 0;
12033 }
12034 }
12035 }
12036
12037 return (0);
12038}
12039
12040static int
12041hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
12042{
12043#ifdef INET6
12044 struct in6_addr in6;
12045
12046 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12047 if (t4_get_clip_entry(sc, &in6, true) != NULL)
12048 return (0);
12049 else
12050 return (EIO);
12051#else
12052 return (ENOTSUP);
12053#endif
12054}
12055
12056static int
12058{
12059#ifdef INET6
12060 struct in6_addr in6;
12061
12062 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12063 return (t4_release_clip_addr(sc, &in6));
12064#else
12065 return (ENOTSUP);
12066#endif
12067}
12068
12069int
12071{
12072 int i;
12073
12074 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
12075}
12076
12077int
12079{
12080 device_t dev;
12081 struct pci_devinfo *dinfo;
12082
12083 dev = sc->dev;
12084 dinfo = device_get_ivars(dev);
12085
12086 pci_cfg_save(dev, dinfo, 0);
12087 return (0);
12088}
12089
12090int
12092{
12093 device_t dev;
12094 struct pci_devinfo *dinfo;
12095
12096 dev = sc->dev;
12097 dinfo = device_get_ivars(dev);
12098
12099 pci_cfg_restore(dev, dinfo);
12100 return (0);
12101}
12102
12103void
12105{
12106 struct adapter *sc = pi->adapter;
12107 struct vi_info *vi;
12108 struct ifnet *ifp;
12109 static const char *mod_str[] = {
12110 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
12111 };
12112
12113 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
12114 ("%s: port_type %u", __func__, pi->port_type));
12115
12116 vi = &pi->vi[0];
12117 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
12118 PORT_LOCK(pi);
12119 build_medialist(pi);
12120 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
12123 }
12124 PORT_UNLOCK(pi);
12126 }
12127
12128 ifp = vi->ifp;
12130 if_printf(ifp, "transceiver unplugged.\n");
12131 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
12132 if_printf(ifp, "unknown transceiver inserted.\n");
12134 if_printf(ifp, "unsupported transceiver inserted.\n");
12135 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
12136 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
12137 port_top_speed(pi), mod_str[pi->mod_type]);
12138 } else {
12139 if_printf(ifp, "transceiver (type %d) inserted.\n",
12140 pi->mod_type);
12141 }
12142}
12143
12144void
12146{
12147 struct vi_info *vi;
12148 struct ifnet *ifp;
12149 struct link_config *lc = &pi->link_cfg;
12150 struct adapter *sc = pi->adapter;
12151 int v;
12152
12154
12155 if (is_t6(sc)) {
12156 if (lc->link_ok) {
12157 if (lc->speed > 25000 ||
12158 (lc->speed == 25000 && lc->fec == FEC_RS)) {
12159 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12161 } else {
12162 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12164 }
12165 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
12166 pi->stats.rx_fcs_err = 0;
12167 } else {
12168 pi->fcs_reg = -1;
12169 }
12170 } else {
12171 MPASS(pi->fcs_reg != -1);
12172 MPASS(pi->fcs_base == 0);
12173 }
12174
12175 for_each_vi(pi, v, vi) {
12176 ifp = vi->ifp;
12177 if (ifp == NULL)
12178 continue;
12179
12180 if (lc->link_ok) {
12181 ifp->if_baudrate = IF_Mbps(lc->speed);
12182 if_link_state_change(ifp, LINK_STATE_UP);
12183 } else {
12184 if_link_state_change(ifp, LINK_STATE_DOWN);
12185 }
12186 }
12187}
12188
12189void
12190t4_iterate(void (*func)(struct adapter *, void *), void *arg)
12191{
12192 struct adapter *sc;
12193
12194 sx_slock(&t4_list_lock);
12195 SLIST_FOREACH(sc, &t4_list, link) {
12196 /*
12197 * func should not make any assumptions about what state sc is
12198 * in - the only guarantee is that sc->sc_lock is a valid lock.
12199 */
12200 func(sc, arg);
12201 }
12202 sx_sunlock(&t4_list_lock);
12203}
12204
12205static int
12206t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
12207 struct thread *td)
12208{
12209 int rc;
12210 struct adapter *sc = dev->si_drv1;
12211
12212 rc = priv_check(td, PRIV_DRIVER);
12213 if (rc != 0)
12214 return (rc);
12215
12216 switch (cmd) {
12217 case CHELSIO_T4_GETREG: {
12218 struct t4_reg *edata = (struct t4_reg *)data;
12219
12220 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12221 return (EFAULT);
12222
12223 mtx_lock(&sc->reg_lock);
12224 if (hw_off_limits(sc))
12225 rc = ENXIO;
12226 else if (edata->size == 4)
12227 edata->val = t4_read_reg(sc, edata->addr);
12228 else if (edata->size == 8)
12229 edata->val = t4_read_reg64(sc, edata->addr);
12230 else
12231 rc = EINVAL;
12232 mtx_unlock(&sc->reg_lock);
12233
12234 break;
12235 }
12236 case CHELSIO_T4_SETREG: {
12237 struct t4_reg *edata = (struct t4_reg *)data;
12238
12239 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12240 return (EFAULT);
12241
12242 mtx_lock(&sc->reg_lock);
12243 if (hw_off_limits(sc))
12244 rc = ENXIO;
12245 else if (edata->size == 4) {
12246 if (edata->val & 0xffffffff00000000)
12247 rc = EINVAL;
12248 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
12249 } else if (edata->size == 8)
12250 t4_write_reg64(sc, edata->addr, edata->val);
12251 else
12252 rc = EINVAL;
12253 mtx_unlock(&sc->reg_lock);
12254
12255 break;
12256 }
12257 case CHELSIO_T4_REGDUMP: {
12258 struct t4_regdump *regs = (struct t4_regdump *)data;
12259 int reglen = t4_get_regs_len(sc);
12260 uint8_t *buf;
12261
12262 if (regs->len < reglen) {
12263 regs->len = reglen; /* hint to the caller */
12264 return (ENOBUFS);
12265 }
12266
12267 regs->len = reglen;
12268 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
12269 mtx_lock(&sc->reg_lock);
12270 if (hw_off_limits(sc))
12271 rc = ENXIO;
12272 else
12273 get_regs(sc, regs, buf);
12274 mtx_unlock(&sc->reg_lock);
12275 if (rc == 0)
12276 rc = copyout(buf, regs->data, reglen);
12277 free(buf, M_CXGBE);
12278 break;
12279 }
12281 rc = get_filter_mode(sc, (uint32_t *)data);
12282 break;
12284 rc = set_filter_mode(sc, *(uint32_t *)data);
12285 break;
12287 rc = set_filter_mask(sc, *(uint32_t *)data);
12288 break;
12290 rc = get_filter(sc, (struct t4_filter *)data);
12291 break;
12293 rc = set_filter(sc, (struct t4_filter *)data);
12294 break;
12296 rc = del_filter(sc, (struct t4_filter *)data);
12297 break;
12299 rc = get_sge_context(sc, (struct t4_sge_context *)data);
12300 break;
12301 case CHELSIO_T4_LOAD_FW:
12302 rc = load_fw(sc, (struct t4_data *)data);
12303 break;
12304 case CHELSIO_T4_GET_MEM:
12305 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
12306 break;
12307 case CHELSIO_T4_GET_I2C:
12308 rc = read_i2c(sc, (struct t4_i2c_data *)data);
12309 break;
12311 rc = clear_stats(sc, *(uint32_t *)data);
12312 break;
12314 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
12315 break;
12317 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
12318 break;
12320 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
12321 break;
12323 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
12324 break;
12326 rc = load_cfg(sc, (struct t4_data *)data);
12327 break;
12329 rc = load_boot(sc, (struct t4_bootrom *)data);
12330 break;
12332 rc = load_bootcfg(sc, (struct t4_data *)data);
12333 break;
12335 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
12336 break;
12338 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
12339 break;
12341 rc = hold_clip_addr(sc, (struct t4_clip_addr *)data);
12342 break;
12344 rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
12345 break;
12346 default:
12347 rc = ENOTTY;
12348 }
12349
12350 return (rc);
12351}
12352
12353#ifdef TCP_OFFLOAD
12354static int
12355toe_capability(struct vi_info *vi, bool enable)
12356{
12357 int rc;
12358 struct port_info *pi = vi->pi;
12359 struct adapter *sc = pi->adapter;
12360
12362
12363 if (!is_offload(sc))
12364 return (ENODEV);
12365 if (hw_off_limits(sc))
12366 return (ENXIO);
12367
12368 if (enable) {
12369#ifdef KERN_TLS
12370 if (sc->flags & KERN_TLS_ON) {
12371 int i, j, n;
12372 struct port_info *p;
12373 struct vi_info *v;
12374
12375 /*
12376 * Reconfigure hardware for TOE if TXTLS is not enabled
12377 * on any ifnet.
12378 */
12379 n = 0;
12380 for_each_port(sc, i) {
12381 p = sc->port[i];
12382 for_each_vi(p, j, v) {
12383 if (v->ifp->if_capenable & IFCAP_TXTLS) {
12384 CH_WARN(sc,
12385 "%s has NIC TLS enabled.\n",
12386 device_get_nameunit(v->dev));
12387 n++;
12388 }
12389 }
12390 }
12391 if (n > 0) {
12392 CH_WARN(sc, "Disable NIC TLS on all interfaces "
12393 "associated with this adapter before "
12394 "trying to enable TOE.\n");
12395 return (EAGAIN);
12396 }
12397 rc = t4_config_kern_tls(sc, false);
12398 if (rc)
12399 return (rc);
12400 }
12401#endif
12402 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
12403 /* TOE is already enabled. */
12404 return (0);
12405 }
12406
12407 /*
12408 * We need the port's queues around so that we're able to send
12409 * and receive CPLs to/from the TOE even if the ifnet for this
12410 * port has never been UP'd administratively.
12411 */
12412 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
12413 return (rc);
12414 if (!(pi->vi[0].flags & VI_INIT_DONE) &&
12415 ((rc = vi_init(&pi->vi[0])) != 0))
12416 return (rc);
12417
12418 if (isset(&sc->offload_map, pi->port_id)) {
12419 /* TOE is enabled on another VI of this port. */
12420 pi->uld_vis++;
12421 return (0);
12422 }
12423
12424 if (!uld_active(sc, ULD_TOM)) {
12425 rc = t4_activate_uld(sc, ULD_TOM);
12426 if (rc == EAGAIN) {
12427 log(LOG_WARNING,
12428 "You must kldload t4_tom.ko before trying "
12429 "to enable TOE on a cxgbe interface.\n");
12430 }
12431 if (rc != 0)
12432 return (rc);
12433 KASSERT(sc->tom_softc != NULL,
12434 ("%s: TOM activated but softc NULL", __func__));
12435 KASSERT(uld_active(sc, ULD_TOM),
12436 ("%s: TOM activated but flag not set", __func__));
12437 }
12438
12439 /* Activate iWARP and iSCSI too, if the modules are loaded. */
12440 if (!uld_active(sc, ULD_IWARP))
12441 (void) t4_activate_uld(sc, ULD_IWARP);
12442 if (!uld_active(sc, ULD_ISCSI))
12443 (void) t4_activate_uld(sc, ULD_ISCSI);
12444
12445 pi->uld_vis++;
12446 setbit(&sc->offload_map, pi->port_id);
12447 } else {
12448 pi->uld_vis--;
12449
12450 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
12451 return (0);
12452
12453 KASSERT(uld_active(sc, ULD_TOM),
12454 ("%s: TOM never initialized?", __func__));
12455 clrbit(&sc->offload_map, pi->port_id);
12456 }
12457
12458 return (0);
12459}
12460
12461/*
12462 * Add an upper layer driver to the global list.
12463 */
12464int
12465t4_register_uld(struct uld_info *ui)
12466{
12467 int rc = 0;
12468 struct uld_info *u;
12469
12470 sx_xlock(&t4_uld_list_lock);
12471 SLIST_FOREACH(u, &t4_uld_list, link) {
12472 if (u->uld_id == ui->uld_id) {
12473 rc = EEXIST;
12474 goto done;
12475 }
12476 }
12477
12478 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
12479 ui->refcount = 0;
12480done:
12481 sx_xunlock(&t4_uld_list_lock);
12482 return (rc);
12483}
12484
12485int
12486t4_unregister_uld(struct uld_info *ui)
12487{
12488 int rc = EINVAL;
12489 struct uld_info *u;
12490
12491 sx_xlock(&t4_uld_list_lock);
12492
12493 SLIST_FOREACH(u, &t4_uld_list, link) {
12494 if (u == ui) {
12495 if (ui->refcount > 0) {
12496 rc = EBUSY;
12497 goto done;
12498 }
12499
12500 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
12501 rc = 0;
12502 goto done;
12503 }
12504 }
12505done:
12506 sx_xunlock(&t4_uld_list_lock);
12507 return (rc);
12508}
12509
12510int
12511t4_activate_uld(struct adapter *sc, int id)
12512{
12513 int rc;
12514 struct uld_info *ui;
12515
12517
12518 if (id < 0 || id > ULD_MAX)
12519 return (EINVAL);
12520 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
12521
12522 sx_slock(&t4_uld_list_lock);
12523
12524 SLIST_FOREACH(ui, &t4_uld_list, link) {
12525 if (ui->uld_id == id) {
12526 if (!(sc->flags & FULL_INIT_DONE)) {
12527 rc = adapter_init(sc);
12528 if (rc != 0)
12529 break;
12530 }
12531
12532 rc = ui->activate(sc);
12533 if (rc == 0) {
12534 setbit(&sc->active_ulds, id);
12535 ui->refcount++;
12536 }
12537 break;
12538 }
12539 }
12540
12541 sx_sunlock(&t4_uld_list_lock);
12542
12543 return (rc);
12544}
12545
12546int
12547t4_deactivate_uld(struct adapter *sc, int id)
12548{
12549 int rc;
12550 struct uld_info *ui;
12551
12553
12554 if (id < 0 || id > ULD_MAX)
12555 return (EINVAL);
12556 rc = ENXIO;
12557
12558 sx_slock(&t4_uld_list_lock);
12559
12560 SLIST_FOREACH(ui, &t4_uld_list, link) {
12561 if (ui->uld_id == id) {
12562 rc = ui->deactivate(sc);
12563 if (rc == 0) {
12564 clrbit(&sc->active_ulds, id);
12565 ui->refcount--;
12566 }
12567 break;
12568 }
12569 }
12570
12571 sx_sunlock(&t4_uld_list_lock);
12572
12573 return (rc);
12574}
12575
12576static void
12577t4_async_event(struct adapter *sc)
12578{
12579 struct uld_info *ui;
12580
12581 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
12582 return;
12583 sx_slock(&t4_uld_list_lock);
12584 SLIST_FOREACH(ui, &t4_uld_list, link) {
12585 if (ui->uld_id == ULD_IWARP) {
12586 ui->async_event(sc);
12587 break;
12588 }
12589 }
12590 sx_sunlock(&t4_uld_list_lock);
12591 end_synchronized_op(sc, 0);
12592}
12593
12594int
12595uld_active(struct adapter *sc, int uld_id)
12596{
12597
12598 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
12599
12600 return (isset(&sc->active_ulds, uld_id));
12601}
12602#endif
12603
12604#ifdef KERN_TLS
12605static int
12606ktls_capability(struct adapter *sc, bool enable)
12607{
12609
12610 if (!is_ktls(sc))
12611 return (ENODEV);
12612 if (hw_off_limits(sc))
12613 return (ENXIO);
12614
12615 if (enable) {
12616 if (sc->flags & KERN_TLS_ON)
12617 return (0); /* already on */
12618 if (sc->offload_map != 0) {
12619 CH_WARN(sc,
12620 "Disable TOE on all interfaces associated with "
12621 "this adapter before trying to enable NIC TLS.\n");
12622 return (EAGAIN);
12623 }
12624 return (t4_config_kern_tls(sc, true));
12625 } else {
12626 /*
12627 * Nothing to do for disable. If TOE is enabled sometime later
12628 * then toe_capability will reconfigure the hardware.
12629 */
12630 return (0);
12631 }
12632}
12633#endif
12634
12635/*
12636 * t = ptr to tunable.
12637 * nc = number of CPUs.
12638 * c = compiled in default for that tunable.
12639 */
12640static void
12641calculate_nqueues(int *t, int nc, const int c)
12642{
12643 int nq;
12644
12645 if (*t > 0)
12646 return;
12647 nq = *t < 0 ? -*t : c;
12648 *t = min(nc, nq);
12649}
12650
12651/*
12652 * Come up with reasonable defaults for some of the tunables, provided they're
12653 * not set by the user (in which case we'll use the values as is).
12654 */
12655static void
12657{
12658 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
12659
12660 if (t4_ntxq < 1) {
12661#ifdef RSS
12662 t4_ntxq = rss_getnumbuckets();
12663#else
12665#endif
12666 }
12667
12668 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
12669
12670 if (t4_nrxq < 1) {
12671#ifdef RSS
12672 t4_nrxq = rss_getnumbuckets();
12673#else
12675#endif
12676 }
12677
12678 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
12679
12680#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12681 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
12682 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
12683#endif
12684#ifdef TCP_OFFLOAD
12685 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
12686 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
12687#endif
12688
12689#if defined(TCP_OFFLOAD) || defined(KERN_TLS)
12690 if (t4_toecaps_allowed == -1)
12691 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
12692#else
12693 if (t4_toecaps_allowed == -1)
12694 t4_toecaps_allowed = 0;
12695#endif
12696
12697#ifdef TCP_OFFLOAD
12698 if (t4_rdmacaps_allowed == -1) {
12699 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
12701 }
12702
12703 if (t4_iscsicaps_allowed == -1) {
12704 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
12707 }
12708
12709 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
12710 t4_tmr_idx_ofld = TMR_IDX_OFLD;
12711
12712 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
12713 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
12714
12715 if (t4_toe_tls_rx_timeout < 0)
12716 t4_toe_tls_rx_timeout = 0;
12717#else
12718 if (t4_rdmacaps_allowed == -1)
12719 t4_rdmacaps_allowed = 0;
12720
12721 if (t4_iscsicaps_allowed == -1)
12722 t4_iscsicaps_allowed = 0;
12723#endif
12724
12725#ifdef DEV_NETMAP
12726 calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
12727 calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
12728 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
12729 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
12730#endif
12731
12732 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
12734
12735 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
12737
12738 if (t4_qsize_txq < 128)
12739 t4_qsize_txq = 128;
12740
12741 if (t4_qsize_rxq < 128)
12742 t4_qsize_rxq = 128;
12743 while (t4_qsize_rxq & 7)
12744 t4_qsize_rxq++;
12745
12747
12748 /*
12749 * Number of VIs to create per-port. The first VI is the "main" regular
12750 * VI for the port. The rest are additional virtual interfaces on the
12751 * same physical port. Note that the main VI does not have native
12752 * netmap support but the extra VIs do.
12753 *
12754 * Limit the number of VIs per port to the number of available
12755 * MAC addresses per port.
12756 */
12757 if (t4_num_vis < 1)
12758 t4_num_vis = 1;
12759 if (t4_num_vis > nitems(vi_mac_funcs)) {
12760 t4_num_vis = nitems(vi_mac_funcs);
12761 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
12762 }
12763
12764 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
12765 pcie_relaxed_ordering = 1;
12766#if defined(__i386__) || defined(__amd64__)
12767 if (cpu_vendor_id == CPU_VENDOR_INTEL)
12768 pcie_relaxed_ordering = 0;
12769#endif
12770 }
12771}
12772
12773#ifdef DDB
12774static void
12775t4_dump_tcb(struct adapter *sc, int tid)
12776{
12777 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
12778
12780 save = t4_read_reg(sc, reg);
12781 base = sc->memwin[2].mw_base;
12782
12783 /* Dump TCB for the tid */
12784 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
12785 tcb_addr += tid * TCB_SIZE;
12786
12787 if (is_t4(sc)) {
12788 pf = 0;
12789 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
12790 } else {
12791 pf = V_PFNUM(sc->pf);
12792 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
12793 }
12794 t4_write_reg(sc, reg, win_pos | pf);
12795 t4_read_reg(sc, reg);
12796
12797 off = tcb_addr - win_pos;
12798 for (i = 0; i < 4; i++) {
12799 uint32_t buf[8];
12800 for (j = 0; j < 8; j++, off += 4)
12801 buf[j] = htonl(t4_read_reg(sc, base + off));
12802
12803 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
12804 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
12805 buf[7]);
12806 }
12807
12808 t4_write_reg(sc, reg, save);
12809 t4_read_reg(sc, reg);
12810}
12811
12812static void
12813t4_dump_devlog(struct adapter *sc)
12814{
12815 struct devlog_params *dparams = &sc->params.devlog;
12816 struct fw_devlog_e e;
12817 int i, first, j, m, nentries, rc;
12818 uint64_t ftstamp = UINT64_MAX;
12819
12820 if (dparams->start == 0) {
12821 db_printf("devlog params not valid\n");
12822 return;
12823 }
12824
12825 nentries = dparams->size / sizeof(struct fw_devlog_e);
12826 m = fwmtype_to_hwmtype(dparams->memtype);
12827
12828 /* Find the first entry. */
12829 first = -1;
12830 for (i = 0; i < nentries && !db_pager_quit; i++) {
12831 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12832 sizeof(e), (void *)&e);
12833 if (rc != 0)
12834 break;
12835
12836 if (e.timestamp == 0)
12837 break;
12838
12839 e.timestamp = be64toh(e.timestamp);
12840 if (e.timestamp < ftstamp) {
12841 ftstamp = e.timestamp;
12842 first = i;
12843 }
12844 }
12845
12846 if (first == -1)
12847 return;
12848
12849 i = first;
12850 do {
12851 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12852 sizeof(e), (void *)&e);
12853 if (rc != 0)
12854 return;
12855
12856 if (e.timestamp == 0)
12857 return;
12858
12859 e.timestamp = be64toh(e.timestamp);
12860 e.seqno = be32toh(e.seqno);
12861 for (j = 0; j < 8; j++)
12862 e.params[j] = be32toh(e.params[j]);
12863
12864 db_printf("%10d %15ju %8s %8s ",
12865 e.seqno, e.timestamp,
12866 (e.level < nitems(devlog_level_strings) ?
12867 devlog_level_strings[e.level] : "UNKNOWN"),
12868 (e.facility < nitems(devlog_facility_strings) ?
12869 devlog_facility_strings[e.facility] : "UNKNOWN"));
12870 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
12871 e.params[3], e.params[4], e.params[5], e.params[6],
12872 e.params[7]);
12873
12874 if (++i == nentries)
12875 i = 0;
12876 } while (i != first && !db_pager_quit);
12877}
12878
12879static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
12880_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
12881
12882DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
12883{
12884 device_t dev;
12885 int t;
12886 bool valid;
12887
12888 valid = false;
12889 t = db_read_token();
12890 if (t == tIDENT) {
12891 dev = device_lookup_by_name(db_tok_string);
12892 valid = true;
12893 }
12894 db_skip_to_eol();
12895 if (!valid) {
12896 db_printf("usage: show t4 devlog <nexus>\n");
12897 return;
12898 }
12899
12900 if (dev == NULL) {
12901 db_printf("device not found\n");
12902 return;
12903 }
12904
12905 t4_dump_devlog(device_get_softc(dev));
12906}
12907
12908DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
12909{
12910 device_t dev;
12911 int radix, tid, t;
12912 bool valid;
12913
12914 valid = false;
12915 radix = db_radix;
12916 db_radix = 10;
12917 t = db_read_token();
12918 if (t == tIDENT) {
12919 dev = device_lookup_by_name(db_tok_string);
12920 t = db_read_token();
12921 if (t == tNUMBER) {
12922 tid = db_tok_number;
12923 valid = true;
12924 }
12925 }
12926 db_radix = radix;
12927 db_skip_to_eol();
12928 if (!valid) {
12929 db_printf("usage: show t4 tcb <nexus> <tid>\n");
12930 return;
12931 }
12932
12933 if (dev == NULL) {
12934 db_printf("device not found\n");
12935 return;
12936 }
12937 if (tid < 0) {
12938 db_printf("invalid tid\n");
12939 return;
12940 }
12941
12942 t4_dump_tcb(device_get_softc(dev), tid);
12943}
12944#endif
12945
12946static eventhandler_tag vxlan_start_evtag;
12947static eventhandler_tag vxlan_stop_evtag;
12948
12950 struct ifnet *ifp;
12951 uint16_t port;
12952};
12953
12954static void
12956{
12957 int i, rc;
12958 struct port_info *pi;
12959 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
12960
12962
12964 F_VXLAN_EN);
12965 for_each_port(sc, i) {
12966 pi = sc->port[i];
12967 if (pi->vxlan_tcam_entry == true)
12968 continue;
12969 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac,
12970 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
12971 true);
12972 if (rc < 0) {
12973 rc = -rc;
12974 CH_ERR(&pi->vi[0],
12975 "failed to add VXLAN TCAM entry: %d.\n", rc);
12976 } else {
12977 MPASS(rc == sc->rawf_base + pi->port_id);
12978 pi->vxlan_tcam_entry = true;
12979 }
12980 }
12981}
12982
12983static void
12984t4_vxlan_start(struct adapter *sc, void *arg)
12985{
12986 struct vxlan_evargs *v = arg;
12987
12988 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
12989 return;
12990 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0)
12991 return;
12992
12993 if (sc->vxlan_refcount == 0) {
12994 sc->vxlan_port = v->port;
12995 sc->vxlan_refcount = 1;
12996 if (!hw_off_limits(sc))
12997 enable_vxlan_rx(sc);
12998 } else if (sc->vxlan_port == v->port) {
12999 sc->vxlan_refcount++;
13000 } else {
13001 CH_ERR(sc, "VXLAN already configured on port %d; "
13002 "ignoring attempt to configure it on port %d\n",
13003 sc->vxlan_port, v->port);
13004 }
13005 end_synchronized_op(sc, 0);
13006}
13007
13008static void
13009t4_vxlan_stop(struct adapter *sc, void *arg)
13010{
13011 struct vxlan_evargs *v = arg;
13012
13013 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
13014 return;
13015 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0)
13016 return;
13017
13018 /*
13019 * VXLANs may have been configured before the driver was loaded so we
13020 * may see more stops than starts. This is not handled cleanly but at
13021 * least we keep the refcount sane.
13022 */
13023 if (sc->vxlan_port != v->port)
13024 goto done;
13025 if (sc->vxlan_refcount == 0) {
13026 CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; "
13027 "ignoring attempt to stop it again.\n", sc->vxlan_port);
13028 } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc))
13030done:
13031 end_synchronized_op(sc, 0);
13032}
13033
13034static void
13035t4_vxlan_start_handler(void *arg __unused, struct ifnet *ifp,
13036 sa_family_t family, u_int port)
13037{
13038 struct vxlan_evargs v;
13039
13040 MPASS(family == AF_INET || family == AF_INET6);
13041 v.ifp = ifp;
13042 v.port = port;
13043
13045}
13046
13047static void
13048t4_vxlan_stop_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family,
13049 u_int port)
13050{
13051 struct vxlan_evargs v;
13052
13053 MPASS(family == AF_INET || family == AF_INET6);
13054 v.ifp = ifp;
13055 v.port = port;
13056
13058}
13059
13060
13061static struct sx mlu; /* mod load unload */
13062SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
13063
13064static int
13065mod_event(module_t mod, int cmd, void *arg)
13066{
13067 int rc = 0;
13068 static int loaded = 0;
13069
13070 switch (cmd) {
13071 case MOD_LOAD:
13072 sx_xlock(&mlu);
13073 if (loaded++ == 0) {
13089 sx_init(&t4_list_lock, "T4/T5 adapters");
13090 SLIST_INIT(&t4_list);
13091 callout_init(&fatal_callout, 1);
13092#ifdef TCP_OFFLOAD
13093 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
13094 SLIST_INIT(&t4_uld_list);
13095#endif
13096#ifdef INET6
13098#endif
13099#ifdef KERN_TLS
13101#endif
13105 EVENTHANDLER_REGISTER(vxlan_start,
13107 EVENTHANDLER_PRI_ANY);
13109 EVENTHANDLER_REGISTER(vxlan_stop,
13111 EVENTHANDLER_PRI_ANY);
13112 reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK,
13113 taskqueue_thread_enqueue, &reset_tq);
13114 taskqueue_start_threads(&reset_tq, 1, PI_SOFT,
13115 "t4_rst_thr");
13116 }
13117 sx_xunlock(&mlu);
13118 break;
13119
13120 case MOD_UNLOAD:
13121 sx_xlock(&mlu);
13122 if (--loaded == 0) {
13123 int tries;
13124
13125 taskqueue_free(reset_tq);
13126 sx_slock(&t4_list_lock);
13127 if (!SLIST_EMPTY(&t4_list)) {
13128 rc = EBUSY;
13129 sx_sunlock(&t4_list_lock);
13130 goto done_unload;
13131 }
13132#ifdef TCP_OFFLOAD
13133 sx_slock(&t4_uld_list_lock);
13134 if (!SLIST_EMPTY(&t4_uld_list)) {
13135 rc = EBUSY;
13136 sx_sunlock(&t4_uld_list_lock);
13137 sx_sunlock(&t4_list_lock);
13138 goto done_unload;
13139 }
13140#endif
13141 tries = 0;
13142 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
13143 uprintf("%ju clusters with custom free routine "
13144 "still is use.\n", t4_sge_extfree_refs());
13145 pause("t4unload", 2 * hz);
13146 }
13147#ifdef TCP_OFFLOAD
13148 sx_sunlock(&t4_uld_list_lock);
13149#endif
13150 sx_sunlock(&t4_list_lock);
13151
13152 if (t4_sge_extfree_refs() == 0) {
13153 EVENTHANDLER_DEREGISTER(vxlan_start,
13155 EVENTHANDLER_DEREGISTER(vxlan_stop,
13158#ifdef KERN_TLS
13160#endif
13161#ifdef INET6
13163#endif
13164#ifdef TCP_OFFLOAD
13165 sx_destroy(&t4_uld_list_lock);
13166#endif
13167 sx_destroy(&t4_list_lock);
13169 loaded = 0;
13170 } else {
13171 rc = EBUSY;
13172 loaded++; /* undo earlier decrement */
13173 }
13174 }
13175done_unload:
13176 sx_xunlock(&mlu);
13177 break;
13178 }
13179
13180 return (rc);
13181}
13182
13186
13189MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
13190#ifdef DEV_NETMAP
13191MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
13192#endif /* DEV_NETMAP */
13193
13196MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
13197#ifdef DEV_NETMAP
13198MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
13199#endif /* DEV_NETMAP */
13200
13203MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
13204#ifdef DEV_NETMAP
13205MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
13206#endif /* DEV_NETMAP */
13207
13210
13213
13216
13219
13222
#define SET_DOOMED(vi)
Definition: adapter.h:193
@ CPL_COOKIE_HASHFILTER
Definition: adapter.h:402
@ CPL_COOKIE_FILTER
Definition: adapter.h:398
@ RX_IQ_QSIZE
Definition: adapter.h:102
@ CL_METADATA_SIZE
Definition: adapter.h:116
@ TX_SGL_SEGS_TSO
Definition: adapter.h:120
@ TX_EQ_QSIZE
Definition: adapter.h:109
@ TX_SGL_SEGS_EO_TSO
Definition: adapter.h:123
@ TX_SGL_SEGS_VM_TSO
Definition: adapter.h:122
void t4_intr_evt(void *)
Definition: t4_sge.c:1325
struct sge_fl fl
Definition: adapter.h:1
unsigned int t4_qsize_txq
void t4_tracer_modunload(void)
Definition: t4_tracer.c:243
static struct wrqe * alloc_wrqe(int wr_len, struct sge_wrq *wrq)
Definition: adapter.h:1437
void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *, struct sysctl_oid_list *)
Definition: t4_sge.c:994
#define T4VF_EXTRA_INTR
Definition: adapter.h:1085
int t4_nrxq
#define PORT_LOCK(pi)
Definition: adapter.h:1025
int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_filter.c:1224
#define ADAPTER_UNLOCK(sc)
Definition: adapter.h:1016
int t4_hashfilter_ao_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_filter.c:1296
int parse_pkt(struct mbuf **, bool)
Definition: t4_sge.c:2682
#define IS_BUSY(sc)
Definition: adapter.h:194
int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_tracer.c:420
static uint32_t t4_read_reg(struct adapter *sc, uint32_t reg)
Definition: adapter.h:1104
int t4_init_tx_sched(struct adapter *)
Definition: t4_sched.c:454
void t4_sge_modload(void)
Definition: t4_sge.c:533
struct sge_eq eq
Definition: adapter.h:0
int t4_get_tracer(struct adapter *, struct t4_tracer *)
Definition: t4_tracer.c:275
struct sge_wrq wrq
Definition: adapter.h:0
int set_filter_mask(struct adapter *, uint32_t)
Definition: t4_filter.c:552
void t4_update_fl_bufsize(struct ifnet *)
Definition: t4_sge.c:2222
int t4_intr_types
void t4_tweak_chip_settings(struct adapter *)
Definition: t4_sge.c:712
int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_filter.c:1380
@ MEMWIN2_APERTURE_T4
Definition: adapter.h:511
@ MEMWIN2_APERTURE_T5
Definition: adapter.h:514
@ MEMWIN1_APERTURE
Definition: adapter.h:508
@ MEMWIN0_BASE
Definition: adapter.h:506
@ MEMWIN1_BASE
Definition: adapter.h:509
@ MEMWIN2_BASE_T5
Definition: adapter.h:515
@ NUM_MEMWIN
Definition: adapter.h:503
@ MEMWIN0_APERTURE
Definition: adapter.h:505
@ MEMWIN2_BASE_T4
Definition: adapter.h:512
uint8_t ss[SGE_MAX_WR_LEN]
Definition: adapter.h:27
int del_filter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:1146
int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_tracer.c:442
int t4_setup_adapter_queues(struct adapter *)
Definition: t4_sge.c:1036
#define FL_LOCK(fl)
Definition: adapter.h:1030
struct resource * res
Definition: adapter.h:0
void t4_tracer_port_detach(struct adapter *)
Definition: t4_tracer.c:260
unsigned int t4_qsize_rxq
#define for_each_vi(_pi, _iter, _vi)
Definition: adapter.h:1071
void free_fl_buffers(struct adapter *, struct sge_fl *)
Definition: t4_sge.c:5066
struct sge_rxq * rxq
Definition: adapter.h:3
#define IS_DOOMED(vi)
Definition: adapter.h:192
int sysctl_tc_params(SYSCTL_HANDLER_ARGS)
Definition: t4_sched.c:616
static int read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len)
Definition: adapter.h:1473
#define PORT_UNLOCK(pi)
Definition: adapter.h:1026
int t4_ntxq
static void t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
Definition: adapter.h:1134
int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *)
Definition: t4_filter.c:1345
void t4_register_cpl_handler(int, cpl_handler_t)
Definition: t4_sge.c:387
struct mp_ring * r
Definition: adapter.h:3
struct ifnet * ifp
Definition: adapter.h:2
static void t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
Definition: adapter.h:1112
int rid
Definition: adapter.h:1
#define SET_BUSY(sc)
Definition: adapter.h:195
static void * wrtod(struct wrqe *wr)
Definition: adapter.h:1451
#define ADAPTER_LOCK_ASSERT_OWNED(sc)
Definition: adapter.h:1017
#define FL_UNLOCK(fl)
Definition: adapter.h:1032
int t4_create_dma_tag(struct adapter *)
Definition: t4_sge.c:977
int t4_set_tracer(struct adapter *, struct t4_tracer *)
Definition: t4_tracer.c:333
#define TXQ_LOCK(txq)
Definition: adapter.h:1047
@ IQ_HW_ALLOCATED
Definition: adapter.h:383
@ IQS_DISABLED
Definition: adapter.h:386
@ IQ_LRO_ENABLED
Definition: adapter.h:381
@ IQ_RX_TIMESTAMP
Definition: adapter.h:380
@ IQS_IDLE
Definition: adapter.h:388
@ IQ_HAS_FL
Definition: adapter.h:379
struct sge_nm_rxq * nm_rxq
Definition: adapter.h:4
int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *)
Definition: t4_sched.c:405
#define sysctl_handle_64
Definition: adapter.h:83
void t4_register_shared_cpl_handler(int, cpl_handler_t, int)
Definition: t4_sge.c:496
uint8_t doorbells
Definition: adapter.h:7
static void t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
Definition: adapter.h:1463
int t4_pktc_idx
void t4_intr_err(void *)
Definition: t4_sge.c:1301
int t4_tmr_idx
void t4_sge_modunload(void)
Definition: t4_sge.c:613
int t4_free_tx_sched(struct adapter *)
Definition: t4_sched.c:472
int t4_setup_vi_queues(struct vi_info *)
Definition: t4_sge.c:1107
int t4_teardown_adapter_queues(struct adapter *)
Definition: t4_sge.c:1075
int set_filter_mode(struct adapter *, uint32_t)
Definition: t4_filter.c:510
static bool hw_off_limits(struct adapter *sc)
Definition: adapter.h:1096
#define for_each_rxq(vi, iter, q)
Definition: adapter.h:1056
static int forwarding_intr_to_fwq(struct adapter *sc)
Definition: adapter.h:1088
int t4_destroy_dma_tag(struct adapter *)
Definition: t4_sge.c:1020
int get_filter_mode(struct adapter *, uint32_t *)
Definition: t4_filter.c:494
@ XGMAC_VLANEX
Definition: adapter.h:139
@ XGMAC_PROMISC
Definition: adapter.h:137
@ XGMAC_ALL
Definition: adapter.h:143
@ XGMAC_UCADDR
Definition: adapter.h:140
@ XGMAC_MTU
Definition: adapter.h:136
@ XGMAC_ALLMULTI
Definition: adapter.h:138
@ XGMAC_MCADDRS
Definition: adapter.h:141
#define ADAPTER_LOCK(sc)
Definition: adapter.h:1015
#define ASSERT_SYNCHRONIZED_OP(sc)
Definition: adapter.h:1020
#define CLR_BUSY(sc)
Definition: adapter.h:196
int get_filter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:626
#define for_each_ofld_rxq(vi, iter, q)
Definition: adapter.h:1062
#define IDXINCR(idx, incr, wrap)
Definition: adapter.h:1075
@ DOORBELL_KDB
Definition: adapter.h:456
@ DOORBELL_UDBWC
Definition: adapter.h:456
@ DOORBELL_UDB
Definition: adapter.h:456
@ DOORBELL_WCWR
Definition: adapter.h:456
#define T4_EXTRA_INTR
Definition: adapter.h:1082
void t4_tracer_modload(void)
Definition: t4_tracer.c:234
struct sge_iq iq
Definition: adapter.h:0
static int write_via_memwin(struct adapter *sc, int idx, uint32_t addr, const uint32_t *val, int len)
Definition: adapter.h:1481
@ FL_DOOMED
Definition: adapter.h:527
@ FL_STARVING
Definition: adapter.h:526
@ LOCK_HELD
Definition: adapter.h:153
@ SLEEP_OK
Definition: adapter.h:149
@ HOLD_LOCK
Definition: adapter.h:148
@ INTR_OK
Definition: adapter.h:150
#define IS_MAIN_VI(vi)
Definition: adapter.h:342
@ EQ_HW_ALLOCATED
Definition: adapter.h:450
@ EQ_ENABLED
Definition: adapter.h:451
@ EQ_QFLUSH
Definition: adapter.h:452
@ EQ_SW_ALLOCATED
Definition: adapter.h:449
#define TXQ_UNLOCK(txq)
Definition: adapter.h:1049
#define for_each_ofld_txq(vi, iter, q)
Definition: adapter.h:1059
uint64_t t4_sge_extfree_refs(void)
Definition: t4_sge.c:623
int t4_set_sched_class(struct adapter *, struct t4_sched_params *)
Definition: t4_sched.c:304
void free_hftid_hash(struct tid_info *)
Definition: t4_filter.c:140
#define for_each_txq(vi, iter, q)
Definition: adapter.h:1053
@ ADAP_STOPPED
Definition: adapter.h:168
@ FW_OK
Definition: adapter.h:159
@ DF_LOAD_FW_ANYTIME
Definition: adapter.h:186
@ CHK_MBOX_ACCESS
Definition: adapter.h:160
@ DF_VERBOSE_SLOWINTR
Definition: adapter.h:189
@ VI_SKIP_STATS
Definition: adapter.h:182
@ HW_OFF_LIMITS
Definition: adapter.h:170
@ KERN_TLS_ON
Definition: adapter.h:164
@ TX_USES_VM_WR
Definition: adapter.h:181
@ MASTER_PF
Definition: adapter.h:161
@ ADAP_FATAL_ERR
Definition: adapter.h:169
@ ADAP_CIM_ERR
Definition: adapter.h:171
@ HAS_TRACEQ
Definition: adapter.h:174
@ VI_INIT_DONE
Definition: adapter.h:179
@ IS_VF
Definition: adapter.h:163
@ DF_DISABLE_CFG_RETRY
Definition: adapter.h:188
@ FIXED_IFMEDIA
Definition: adapter.h:175
@ FULL_INIT_DONE
Definition: adapter.h:158
#define PORT_LOCK_ASSERT_OWNED(pi)
Definition: adapter.h:1027
@ INTR_INTX
Definition: adapter.h:130
@ INTR_MSIX
Definition: adapter.h:132
@ INTR_MSI
Definition: adapter.h:131
int set_filter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:909
void t4_init_rx_buf_info(struct adapter *)
Definition: t4_sge.c:854
int t4_verify_chip_settings(struct adapter *)
Definition: t4_sge.c:920
#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc)
Definition: adapter.h:1018
#define CXGBE_UNIMPLEMENTED(s)
Definition: adapter.h:69
int t4_teardown_vi_queues(struct vi_info *)
Definition: t4_sge.c:1215
void t4_intr_all(void *)
Definition: t4_sge.c:1282
void t4_intr(void *)
Definition: t4_sge.c:1339
static uint64_t t4_read_reg64(struct adapter *sc, uint32_t reg)
Definition: adapter.h:1120
uint32_t speed_to_fwcap(unsigned int speed)
Definition: t4_hw.c:8850
int t4_shutdown_adapter(struct adapter *adapter)
Definition: t4_hw.c:9407
int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, u32 *data)
Definition: t4_hw.c:11162
void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
Definition: t4_hw.c:824
void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st, bool sleep_ok)
Definition: t4_hw.c:6205
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq)
Definition: t4_hw.c:5565
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force)
Definition: t4_hw.c:7738
int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid)
Definition: t4_hw.c:8049
@ MEM_MC1
Definition: common.h:52
@ MEM_MC
Definition: common.h:52
@ MEM_EDC0
Definition: common.h:52
@ MEM_MC0
Definition: common.h:52
@ MEM_EDC1
Definition: common.h:52
bool t4_slow_intr_handler(struct adapter *adapter, bool verbose)
Definition: t4_hw.c:5295
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
Definition: t4_hw.c:6355
#define CHELSIO_T5
Definition: common.h:415
int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok)
Definition: t4_hw.c:8176
static u_int us_to_tcp_ticks(const struct adapter *adap, u_long us)
Definition: common.h:567
void t4_intr_enable(struct adapter *adapter)
Definition: t4_hw.c:5403
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
Definition: t4_hw.c:3716
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
Definition: t4_hw.c:6760
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size)
Definition: t4_hw.c:10470
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6, bool sleep_ok)
Definition: t4_hw.c:6126
int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size, __be32 *data)
Definition: t4_hw.c:690
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val)
Definition: t4_hw.c:7825
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx)
Definition: t4_hw.c:121
void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
Definition: t4_hw.c:10433
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, struct tp_fcoe_stats *st, bool sleep_ok)
Definition: t4_hw.c:6272
void t4_intr_disable(struct adapter *adapter)
Definition: t4_hw.c:5431
static int is_offload(const struct adapter *adap)
Definition: common.h:492
int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
Definition: t4_hw.c:7770
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
Definition: t4_hw.c:6332
static int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl)
Definition: common.h:595
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val)
Definition: t4_hw.c:100
int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, enum ctxt_type ctype, u32 *data)
Definition: t4_hw.c:11118
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
Definition: t4_hw.c:7624
int t4_fw_bye(struct adapter *adap, unsigned int mbox)
Definition: t4_hw.c:7607
#define PCI_VENDOR_ID_CHELSIO
Definition: common.h:465
int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, u16 *rss_size, uint8_t *vfvld, uint16_t *vin, unsigned int portfunc, unsigned int idstype)
Definition: t4_hw.c:7961
int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, unsigned int *valp)
Definition: t4_hw.c:7233
int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf)
Definition: t4_hw.c:11092
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
Definition: t4_hw.c:3737
static int is_t6(struct adapter *adap)
Definition: common.h:532
int t4_get_version_info(struct adapter *adapter)
Definition: t4_hw.c:3463
#define CHELSIO_T4
Definition: common.h:414
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
Definition: t4_hw.c:6606
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
Definition: t4_hw.c:3569
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta)
Definition: t4_hw.c:6444
int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size)
Definition: t4_hw.c:10706
unsigned int t4_get_regs_len(struct adapter *adapter)
Definition: t4_hw.c:792
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok)
Definition: t4_hw.c:8546
int t4_prep_adapter(struct adapter *adapter, u32 *buf)
Definition: t4_hw.c:9334
int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, unsigned int *valp)
Definition: t4_hw.c:10056
void t4_clr_port_stats(struct adapter *adap, int idx)
Definition: t4_hw.c:11004
int t4_update_port_info(struct port_info *pi)
Definition: t4_hw.c:9022
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq, unsigned int skeyidx, unsigned int skey)
Definition: t4_hw.c:5686
u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg)
Definition: t4_hw.c:164
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, bool sleep_ok)
Definition: t4_hw.c:6164
int t4_init_devlog_params(struct adapter *adapter, int fw_attach)
Definition: t4_hw.c:9538
#define CHELSIO_T6
Definition: common.h:416
static int chip_id(struct adapter *adap)
Definition: common.h:512
int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size)
Definition: t4_hw.c:10847
void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, bool sleep_ok)
Definition: t4_hw.c:6297
static int chip_rev(struct adapter *adap)
Definition: common.h:517
#define for_each_port(adapter, iter)
Definition: common.h:468
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val)
Definition: t4_hw.c:7885
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en)
Definition: t4_hw.c:8602
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, bool sleep_ok)
Definition: t4_hw.c:5922
void t4_intr_clear(struct adapter *adapter)
Definition: t4_hw.c:5445
static int is_t4(struct adapter *adap)
Definition: common.h:522
void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st, bool sleep_ok)
Definition: t4_hw.c:6317
@ FEC_AUTO
Definition: common.h:78
@ FEC_BASER_RS
Definition: common.h:70
@ FEC_MODULE
Definition: common.h:79
@ FEC_RS
Definition: common.h:69
@ FEC_NONE
Definition: common.h:71
@ PAUSE_AUTONEG
Definition: common.h:61
@ PAUSE_TX
Definition: common.h:60
@ PAUSE_RX
Definition: common.h:59
void t4_report_fw_error(struct adapter *adap)
Definition: t4_hw.c:199
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
Definition: t4_hw.c:9931
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state)
Definition: t4_hw.c:7483
void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, bool sleep_ok)
Definition: t4_hw.c:6256
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc)
Definition: t4_hw.c:3880
@ MAX_NPORTS
Definition: common.h:38
dev_state
Definition: common.h:56
@ DEV_STATE_UNINIT
Definition: common.h:56
@ DEV_STATE_ERR
Definition: common.h:56
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
Definition: t4_hw.c:8239
int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
Definition: t4_hw.c:10131
static int is_ethoffload(const struct adapter *adap)
Definition: common.h:497
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
Definition: t4_hw.c:6928
static int is_ktls(const struct adapter *adap)
Definition: common.h:507
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
Definition: t4_hw.c:6787
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
Definition: t4_hw.c:8494
void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
Definition: t4_hw.c:7033
void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
Definition: t4_hw.c:10413
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
Definition: t4_hw.c:10200
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val)
Definition: t4_hw.c:6377
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr)
Definition: t4_hw.c:3682
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx)
Definition: t4_hw.c:144
static int port_top_speed(const struct port_info *pi)
Definition: common.h:956
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok)
Definition: t4_hw.c:8080
static int is_t5(struct adapter *adap)
Definition: common.h:527
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
Definition: t4_hw.c:9874
@ MASTER_MAY
Definition: common.h:54
int t4_init_tp_params(struct adapter *adap)
Definition: t4_hw.c:9779
int t4_init_sge_params(struct adapter *adapter)
Definition: t4_hw.c:9603
const char * t4_link_down_rc_str(unsigned char link_down_rc)
Definition: t4_hw.c:8802
static int is_fpga(struct adapter *adap)
Definition: common.h:537
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
Definition: t4_hw.c:3311
int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
Definition: t4_hw.c:10008
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
Definition: t4_hw.c:9966
int t4_flash_cfg_addr(struct adapter *adapter)
Definition: t4_hw.c:3525
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, bool sleep_ok)
Definition: t4_hw.c:6239
int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
Definition: cudbg_lib.c:203
void * cudbg_alloc_handle(void)
Definition: cudbg_common.c:81
int(* cudbg_print_cb)(char *str,...)
Definition: cudbg.h:310
static struct cudbg_init * cudbg_get_init(void *handle)
Definition: cudbg.h:425
void cudbg_free_handle(IN void *handle)
static const char *const region[]
Definition: cudbg_entity.h:305
#define MAX_ATIDS
Definition: offload.h:75
@ ULD_ISCSI
Definition: offload.h:207
@ ULD_MAX
Definition: offload.h:208
@ ULD_IWARP
Definition: offload.h:206
@ ULD_TOM
Definition: offload.h:205
#define INIT_TP_WR_MIT_CPL(w, cpl, tid)
Definition: offload.h:57
uint32_t __be32
Definition: osdep.h:69
#define AUTONEG_DISABLE
Definition: osdep.h:116
#define AUTONEG_ENABLE
Definition: osdep.h:117
uint64_t u64
Definition: osdep.h:62
static int ilog2(long x)
Definition: osdep.h:136
#define false
Definition: osdep.h:82
uint8_t __u8
Definition: osdep.h:63
#define AUTONEG_AUTO
Definition: osdep.h:115
uint16_t __be16
Definition: osdep.h:68
#define CH_WARN(adap, fmt,...)
Definition: osdep.h:47
#define CH_ERR(adap, fmt,...)
Definition: osdep.h:45
#define CH_ALERT(adap, fmt,...)
Definition: osdep.h:49
uint32_t u32
Definition: osdep.h:61
unsigned int eo_wr_cred
Definition: common.h:399
unsigned int scfg_vers
Definition: common.h:375
struct devlog_params devlog
Definition: common.h:363
unsigned int hash_filter
Definition: common.h:393
unsigned short a_wnd[NCCTRL_WIN]
Definition: common.h:379
unsigned int max_ird_adapter
Definition: common.h:402
bool fr_nsmr_tpte_wr_support
Definition: common.h:407
uint32_t mps_bg_map
Definition: common.h:404
struct tp_params tp
Definition: common.h:360
bool ulptx_memwrite_dsgl
Definition: common.h:406
unsigned short mtus[NMTUS]
Definition: common.h:378
unsigned int fw_vers
Definition: common.h:371
uint8_t portvec
Definition: common.h:385
struct pci_params pci
Definition: common.h:362
unsigned int offload
Definition: common.h:389
bool viid_smt_extn_support
Definition: common.h:409
unsigned int core_vdd
Definition: common.h:366
unsigned int tp_vers
Definition: common.h:373
unsigned int port_caps32
Definition: common.h:395
unsigned int cim_la_size
Definition: common.h:382
unsigned int ofldq_wr_cred
Definition: common.h:398
struct sge_params sge
Definition: common.h:359
unsigned int bs_vers
Definition: common.h:372
unsigned int filter2_wr_support
Definition: common.h:394
uint8_t nsched_cls
Definition: common.h:411
unsigned int er_vers
Definition: common.h:374
unsigned int max_pkts_per_eth_tx_pkts_wr
Definition: common.h:410
struct vpd_params vpd
Definition: common.h:361
bool dev_512sgl_mr
Definition: common.h:408
unsigned int ethoffload
Definition: common.h:392
uint8_t nports
Definition: common.h:384
unsigned int vpd_vers
Definition: common.h:376
unsigned short b_wnd[NCCTRL_WIN]
Definition: common.h:380
unsigned int max_ordird_qp
Definition: common.h:401
struct adapter_params params
Definition: t4_main.c:2007
struct t4_virt_res vres
Definition: t4_main.c:2008
struct tid_info tids
Definition: t4_main.c:2009
int msix_rid
Definition: adapter.h:873
struct sysctl_oid * fwq_oid
Definition: adapter.h:974
unsigned int mbox
Definition: adapter.h:883
const void * reset_thread
Definition: adapter.h:1001
const void * last_op_thr
Definition: adapter.h:1006
int bt_map
Definition: adapter.h:938
uint16_t cryptocaps
Definition: adapter.h:968
int nrawf
Definition: adapter.h:909
struct mtx ifp_lock
Definition: adapter.h:945
int active_ulds
Definition: adapter.h:939
struct cdev * cdev
Definition: adapter.h:867
int lro_timeout
Definition: adapter.h:903
int sc_do_rxcopy
Definition: adapter.h:904
const struct devnames * names
Definition: adapter.h:868
void * tom_softc
Definition: adapter.h:921
int intr_type
Definition: adapter.h:888
char fw_version[16]
Definition: adapter.h:952
uint8_t chan_map[MAX_NCHAN]
Definition: adapter.h:913
char cfg_file[32]
Definition: adapter.h:956
char bs_version[16]
Definition: adapter.h:955
uint16_t nbmcaps
Definition: adapter.h:962
unsigned int pf
Definition: adapter.h:882
uint16_t switchcaps
Definition: adapter.h:964
struct mtx reg_lock
Definition: adapter.h:992
int flags
Definition: adapter.h:940
int swintr
Definition: adapter.h:1009
u_int vxlan_refcount
Definition: adapter.h:907
struct tid_info tids
Definition: adapter.h:932
int num_resets
Definition: adapter.h:1002
struct t4_offload_policy * policy
Definition: adapter.h:923
char tp_version[16]
Definition: adapter.h:953
uint16_t toecaps
Definition: adapter.h:966
volatile uint8_t * udbs_base
Definition: adapter.h:880
bus_space_handle_t bh
Definition: adapter.h:875
bus_size_t mmio_len
Definition: adapter.h:877
const struct chip_params * chip_params
Definition: adapter.h:959
struct callout ktls_tick
Definition: adapter.h:1012
struct sysctl_ctx_list ctx
Definition: adapter.h:972
struct resource * regs_res
Definition: adapter.h:872
struct resource * udbs_res
Definition: adapter.h:879
int debug_flags
Definition: adapter.h:941
int error_flags
Definition: adapter.h:942
char er_version[16]
Definition: adapter.h:954
struct tls_tunables tlst
Definition: adapter.h:934
struct mtx sc_lock
Definition: adapter.h:976
struct adapter_params params
Definition: adapter.h:958
struct tom_tunables tt
Definition: adapter.h:922
struct smt_data * smt
Definition: adapter.h:931
struct rwlock policy_lock
Definition: adapter.h:924
int traceq
Definition: adapter.h:948
struct task reset_task
Definition: adapter.h:1000
int udbs_rid
Definition: adapter.h:878
struct l2t_data * l2t
Definition: adapter.h:930
char ifp_lockname[16]
Definition: adapter.h:944
u_int cfcsum
Definition: adapter.h:957
int offload_map
Definition: adapter.h:937
int intr_count
Definition: adapter.h:889
uint16_t iscsicaps
Definition: adapter.h:969
struct resource * msix_res
Definition: adapter.h:874
int vxlan_port
Definition: adapter.h:906
int sge_gts_reg
Definition: adapter.h:897
struct iw_tunables iwt
Definition: adapter.h:927
struct t4_virt_res vres
Definition: adapter.h:960
int incarnation
Definition: adapter.h:1003
uint16_t niccaps
Definition: adapter.h:965
struct port_info * port[MAX_NPORTS]
Definition: adapter.h:912
uint16_t fcoecaps
Definition: adapter.h:970
struct task fatal_error_task
Definition: adapter.h:999
bus_space_tag_t bt
Definition: adapter.h:876
struct sysctl_oid * ctrlq_oid
Definition: adapter.h:973
uint16_t linkcaps
Definition: adapter.h:963
struct memwin memwin[NUM_MEMWIN]
Definition: adapter.h:994
struct mtx sfl_lock
Definition: adapter.h:980
struct sge sge
Definition: adapter.h:902
const char * last_op
Definition: adapter.h:1005
char lockname[16]
Definition: adapter.h:977
int sensor_resets
Definition: adapter.h:1010
int last_op_flags
Definition: adapter.h:1007
int rawf_base
Definition: adapter.h:908
vmem_t * key_map
Definition: adapter.h:933
struct callout sfl_callout
Definition: adapter.h:982
uint8_t doorbells
Definition: adapter.h:936
struct taskqueue * tq[MAX_NCHAN]
Definition: adapter.h:911
device_t dev
Definition: adapter.h:866
int regs_rid
Definition: adapter.h:871
uint16_t rdmacaps
Definition: adapter.h:967
int sge_kdoorbell_reg
Definition: adapter.h:898
uint16_t niccaps
Definition: t4_main.c:4824
uint16_t linkcaps
Definition: t4_main.c:4822
uint16_t switchcaps
Definition: t4_main.c:4823
uint16_t rdmacaps
Definition: t4_main.c:4826
uint16_t fcoecaps
Definition: t4_main.c:4829
uint16_t iscsicaps
Definition: t4_main.c:4828
uint16_t cryptocaps
Definition: t4_main.c:4827
uint16_t toecaps
Definition: t4_main.c:4825
uint16_t nbmcaps
Definition: t4_main.c:4821
u16 mps_tcam_size
Definition: common.h:316
u8 cim_num_obq
Definition: common.h:311
u8 nchan
Definition: common.h:307
u8 nsched_cls
Definition: common.h:310
struct adapter * adap
Definition: cudbg.h:373
u32 use_flash
Definition: cudbg.h:378
u8 dbg_bitmap[CUDBG_MAX_BITMAP_LEN]
Definition: cudbg.h:387
cudbg_print_cb print
Definition: cudbg.h:375
u32 memtype
Definition: common.h:299
const char * vi_ifnet_name
Definition: adapter.h:856
const char * ifnet_name
Definition: adapter.h:855
const char * nexus_name
Definition: adapter.h:854
u_int start
Definition: t4_main.c:10690
u_int width
Definition: t4_main.c:10691
const char * name
Definition: cudbg_entity.h:407
__u8 fmt[FW_DEVLOG_FMT_LEN]
__be32 params[FW_DEVLOG_FMT_PARAMS_NUM]
Definition: t4_main.c:4349
__u8 chip
Definition: t4_main.c:4351
__u8 intfver_vnic
Definition: t4_main.c:4356
__u8 intfver_nic
Definition: t4_main.c:4355
__u8 intfver_fcoepdu
Definition: t4_main.c:4361
__u8 intfver_ri
Definition: t4_main.c:4358
__u8 intfver_iscsi
Definition: t4_main.c:4360
__be16 len512
Definition: t4_main.c:4352
__u8 intfver_iscsipdu
Definition: t4_main.c:4359
__u8 intfver_ofld
Definition: t4_main.c:4357
__u8 ver
Definition: t4_main.c:4350
__u8 intfver_fcoe
Definition: t4_main.c:4362
__be32 fw_ver
Definition: t4_main.c:4353
__be32 tp_microcode_ver
Definition: t4_main.c:4354
__be32 flags
struct fw_h fw_h
Definition: t4_main.c:4373
char * fw_mod_name
Definition: t4_main.c:4372
char * kld_name
Definition: t4_main.c:4371
uint8_t chip
Definition: t4_main.c:4370
union fw_ldst_cmd::fw_ldst u
__be32 op_to_addrspace
__be32 cycles_to_len16
uint16_t intr_type
Definition: t4_main.c:717
uint16_t nofldrxq
Definition: t4_main.c:723
uint16_t nirq
Definition: t4_main.c:719
uint16_t num_vis
Definition: t4_main.c:718
uint16_t ntxq
Definition: t4_main.c:720
uint16_t nrxq_vi
Definition: t4_main.c:729
uint16_t nofldtxq
Definition: t4_main.c:722
uint16_t nnmrxq
Definition: t4_main.c:725
uint16_t nofldrxq_vi
Definition: t4_main.c:731
uint16_t nofldtxq_vi
Definition: t4_main.c:730
uint16_t ntxq_vi
Definition: t4_main.c:728
uint16_t nnmtxq
Definition: t4_main.c:724
uint16_t nnmtxq_vi
Definition: t4_main.c:732
uint16_t nrxq
Definition: t4_main.c:721
uint16_t nnmrxq_vi
Definition: t4_main.c:733
int wc_en
Definition: offload.h:241
u64 octets
Definition: common.h:152
struct ifnet * ifp
Definition: t4_main.c:5940
uint64_t hash
Definition: t4_main.c:5942
const uint8_t * mcaddr[FW_MAC_EXACT_CHUNK]
Definition: t4_main.c:5941
unsigned int limit
Definition: t4_main.c:9662
unsigned int idx
Definition: t4_main.c:9663
unsigned int base
Definition: t4_main.c:9661
uint32_t base
Definition: t4_main.c:3672
uint32_t aperture
Definition: t4_main.c:3673
uint32_t mw_base
Definition: adapter.h:520
uint32_t mw_curpos
Definition: adapter.h:522
uint32_t mw_aperture
Definition: adapter.h:521
counter_u64_t dropped
Definition: t4_mp_ring.h:51
int8_t cong_algo
Definition: t4_ioctl.h:382
int8_t offload
Definition: t4_ioctl.h:380
int8_t sched_class
Definition: t4_ioctl.h:383
unsigned short width
Definition: common.h:292
unsigned short speed
Definition: common.h:291
unsigned int mps
Definition: common.h:290
char lockname[16]
Definition: adapter.h:317
struct ifmedia media
Definition: adapter.h:331
bool vxlan_tcam_entry
Definition: adapter.h:312
struct mtx pi_lock
Definition: adapter.h:316
struct sysctl_ctx_list ctx
Definition: adapter.h:339
uint8_t rx_c_chan
Definition: adapter.h:328
uint8_t port_type
Definition: adapter.h:322
uint64_t fcs_base
Definition: adapter.h:337
int uld_vis
Definition: adapter.h:311
u_int tx_parse_error
Definition: adapter.h:335
int fcs_reg
Definition: adapter.h:336
struct link_config link_cfg
Definition: adapter.h:330
u_int tnl_cong_drops
Definition: adapter.h:334
uint8_t tx_chan
Definition: adapter.h:325
device_t dev
Definition: adapter.h:305
uint8_t mod_type
Definition: adapter.h:323
struct adapter * adapter
Definition: adapter.h:306
int nvi
Definition: adapter.h:309
struct tx_sched_params * sched_params
Definition: adapter.h:314
uint8_t mps_bg_map
Definition: adapter.h:326
struct vi_info * vi
Definition: adapter.h:308
int up_vis
Definition: adapter.h:310
struct port_stats stats
Definition: adapter.h:333
uint8_t rx_e_chan_map
Definition: adapter.h:327
uint8_t port_id
Definition: adapter.h:324
int8_t mdio_addr
Definition: adapter.h:321
unsigned long flags
Definition: adapter.h:318
u64 rx_frames
Definition: common.h:112
u64 tx_frames
Definition: common.h:86
u64 rx_ovflow1
Definition: common.h:142
u64 rx_ovflow3
Definition: common.h:144
u64 tx_mcast_frames
Definition: common.h:88
u64 rx_ovflow0
Definition: common.h:141
u64 tx_octets
Definition: common.h:85
u64 rx_octets
Definition: common.h:111
u64 rx_jabber
Definition: common.h:117
u64 rx_too_long
Definition: common.h:116
u64 rx_mcast_frames
Definition: common.h:114
u64 rx_ovflow2
Definition: common.h:143
u64 rx_trunc2
Definition: common.h:147
u64 rx_fcs_err
Definition: common.h:118
u64 rx_len_err
Definition: common.h:119
u64 rx_trunc1
Definition: common.h:146
u64 tx_drop
Definition: common.h:100
u64 rx_trunc0
Definition: common.h:145
u64 rx_runt
Definition: common.h:121
u64 tx_error_frames
Definition: common.h:90
u64 rx_trunc3
Definition: common.h:148
uint16_t sidx
Definition: adapter.h:476
unsigned int flags
Definition: adapter.h:465
uint16_t pidx
Definition: adapter.h:478
struct tx_desc * desc
Definition: adapter.h:473
uint16_t cidx
Definition: adapter.h:477
uint64_t cl_fast_recycled
Definition: adapter.h:562
int flags
Definition: adapter.h:544
uint64_t cl_recycled
Definition: adapter.h:561
uint64_t cl_allocated
Definition: adapter.h:560
uint8_t intr_params
Definition: adapter.h:426
uint32_t flags
Definition: adapter.h:420
volatile int state
Definition: adapter.h:421
uint16_t abs_id
Definition: adapter.h:432
struct vi_info * vi
Definition: adapter.h:760
counter_u64_t rx_iscsi_ddp_setup_error
Definition: adapter.h:677
uint64_t rx_iscsi_fl_pdus
Definition: adapter.h:680
uint64_t rx_iscsi_ddp_pdus
Definition: adapter.h:678
u_long rx_toe_tls_records
Definition: adapter.h:685
uint64_t rx_iscsi_fl_octets
Definition: adapter.h:681
struct sge_fl fl
Definition: adapter.h:675
uint64_t rx_iscsi_ddp_octets
Definition: adapter.h:679
struct sge_iq iq
Definition: adapter.h:674
u_long rx_toe_tls_octets
Definition: adapter.h:686
counter_u64_t rx_iscsi_ddp_setup_ok
Definition: adapter.h:676
counter_u64_t tx_iscsi_octets
Definition: adapter.h:749
counter_u64_t tx_iscsi_iso_wrs
Definition: adapter.h:750
counter_u64_t tx_toe_tls_octets
Definition: adapter.h:752
counter_u64_t tx_toe_tls_records
Definition: adapter.h:751
struct sge_wrq wrq
Definition: adapter.h:747
counter_u64_t tx_iscsi_pdus
Definition: adapter.h:748
int counter_val[SGE_NCOUNTERS]
Definition: common.h:239
int timer_val[SGE_NTIMERS]
Definition: common.h:238
volatile __be16 pidx
Definition: t4_hw.h:105
volatile __be16 cidx
Definition: t4_hw.h:104
uint64_t rxcsum
Definition: adapter.h:657
uint64_t vxlan_rxcsum
Definition: adapter.h:659
struct sge_iq iq
Definition: adapter.h:649
struct lro_ctrl lro
Definition: adapter.h:653
struct sge_fl fl
Definition: adapter.h:650
uint64_t vlan_extraction
Definition: adapter.h:658
uint64_t raw_wrs
Definition: adapter.h:624
struct mp_ring * r
Definition: adapter.h:602
uint64_t kern_tls_waste
Definition: adapter.h:633
uint64_t txpkts0_pkts
Definition: adapter.h:621
uint64_t kern_tls_fin
Definition: adapter.h:636
uint64_t kern_tls_options
Definition: adapter.h:634
uint64_t vlan_insertion
Definition: adapter.h:615
uint64_t kern_tls_octets
Definition: adapter.h:632
uint64_t imm_wrs
Definition: adapter.h:616
uint64_t txcsum
Definition: adapter.h:613
uint64_t txpkt_wrs
Definition: adapter.h:618
__be32 cpl_ctrl0
Definition: adapter.h:605
uint64_t kern_tls_header
Definition: adapter.h:635
uint64_t kern_tls_full
Definition: adapter.h:631
struct tx_sdesc * sdesc
Definition: adapter.h:603
struct txpkts txp
Definition: adapter.h:608
uint64_t txpkts0_wrs
Definition: adapter.h:619
uint64_t txpkts1_wrs
Definition: adapter.h:620
uint64_t sgl_wrs
Definition: adapter.h:617
uint64_t kern_tls_partial
Definition: adapter.h:630
uint64_t kern_tls_records
Definition: adapter.h:628
uint64_t tso_wrs
Definition: adapter.h:614
uint64_t txpkts1_pkts
Definition: adapter.h:622
uint64_t kern_tls_gcm
Definition: adapter.h:639
uint64_t kern_tls_fin_short
Definition: adapter.h:637
struct sge_eq eq
Definition: adapter.h:599
uint64_t txpkts_flush
Definition: adapter.h:623
uint64_t kern_tls_cbc
Definition: adapter.h:638
uint64_t kern_tls_short
Definition: adapter.h:629
uint64_t vxlan_txcsum
Definition: adapter.h:626
uint64_t vxlan_tso_wrs
Definition: adapter.h:625
uint64_t tx_wrs_direct
Definition: adapter.h:729
uint64_t tx_wrs_copied
Definition: adapter.h:731
struct sge_eq eq
Definition: adapter.h:714
Definition: adapter.h:821
int neq
Definition: adapter.h:829
int ntxq
Definition: adapter.h:823
struct sge_ofld_rxq * ofld_rxq
Definition: adapter.h:836
int nofldtxq
Definition: adapter.h:825
struct sge_iq fwq
Definition: adapter.h:831
struct sge_nm_rxq * nm_rxq
Definition: adapter.h:838
struct sge_txq * txq
Definition: adapter.h:833
struct sge_nm_txq * nm_txq
Definition: adapter.h:837
int nnmtxq
Definition: adapter.h:827
struct sge_wrq * ctrlq
Definition: adapter.h:832
int eq_start
Definition: adapter.h:842
int niq
Definition: adapter.h:828
int iqmap_sz
Definition: adapter.h:844
int eqmap_sz
Definition: adapter.h:845
int nrxq
Definition: adapter.h:822
struct sge_rxq * rxq
Definition: adapter.h:834
struct sge_eq ** eqmap
Definition: adapter.h:847
int nofldrxq
Definition: adapter.h:824
int nnmrxq
Definition: adapter.h:826
uint16_t iq_start
Definition: adapter.h:840
struct sge_ofld_txq * ofld_txq
Definition: adapter.h:835
struct sge_iq ** iqmap
Definition: adapter.h:846
uint32_t len
Definition: t4_ioctl.h:93
uint32_t pf_offset
Definition: t4_ioctl.h:91
uint8_t * data
Definition: t4_ioctl.h:94
uint32_t pfidx_addr
Definition: t4_ioctl.h:92
uint8_t addr[16]
Definition: t4_ioctl.h:412
uint8_t * data
Definition: t4_ioctl.h:369
uint32_t len
Definition: t4_ioctl.h:368
uint8_t wr_flash
Definition: t4_ioctl.h:366
uint8_t bitmap[16]
Definition: t4_ioctl.h:367
uint32_t len
Definition: t4_ioctl.h:86
uint8_t * data
Definition: t4_ioctl.h:87
uint8_t data[8]
Definition: t4_ioctl.h:102
uint8_t offset
Definition: t4_ioctl.h:100
uint8_t len
Definition: t4_ioctl.h:101
uint8_t dev_addr
Definition: t4_ioctl.h:99
uint8_t port_id
Definition: t4_ioctl.h:98
uint32_t len
Definition: t4_ioctl.h:342
uint32_t * data
Definition: t4_ioctl.h:343
uint32_t addr
Definition: t4_ioctl.h:341
uint32_t nrules
Definition: t4_ioctl.h:406
struct offload_rule * rule
Definition: t4_ioctl.h:407
u_int start
Definition: offload.h:186
u_int size
Definition: offload.h:187
uint32_t size
Definition: t4_ioctl.h:73
uint64_t val
Definition: t4_ioctl.h:74
uint32_t addr
Definition: t4_ioctl.h:72
uint32_t version
Definition: t4_ioctl.h:80
uint32_t len
Definition: t4_ioctl.h:81
uint32_t * data
Definition: t4_ioctl.h:82
uint32_t cid
Definition: t4_ioctl.h:336
uint32_t data[T4_SGE_CONTEXT_SIZE/4]
Definition: t4_ioctl.h:337
uint32_t mem_id
Definition: t4_ioctl.h:335
struct t4_range l2t
Definition: offload.h:200
struct t4_range pbl
Definition: offload.h:195
struct t4_range cq
Definition: offload.h:197
struct t4_range key
Definition: offload.h:201
struct t4_range stag
Definition: offload.h:193
struct t4_range ocq
Definition: offload.h:199
struct t4_range srq
Definition: offload.h:198
struct t4_range qp
Definition: offload.h:196
struct t4_range ddp
Definition: offload.h:191
struct t4_range iscsi
Definition: offload.h:192
struct t4_range rq
Definition: offload.h:194
u_int nftids
Definition: offload.h:131
u_int etid_end
Definition: offload.h:144
u_int ftid_end
Definition: offload.h:133
u_int etids_in_use
Definition: offload.h:182
u_int stids_in_use
Definition: offload.h:148
u_int stid_base
Definition: offload.h:127
u_int nhpftids
Definition: offload.h:135
u_int etid_base
Definition: offload.h:143
struct cv ftid_cv
Definition: offload.h:159
u_int hpftid_end
Definition: offload.h:137
union aopen_entry * afree
Definition: offload.h:154
u_int hpftid_base
Definition: offload.h:136
u_int atids_in_use
Definition: offload.h:155
struct filter_entry * ftid_tab
Definition: offload.h:160
void ** tid_tab
Definition: offload.h:171
struct filter_entry * hpftid_tab
Definition: offload.h:161
u_int tids_in_use
Definition: offload.h:172
u_int hpftids_in_use
Definition: offload.h:163
u_int natids
Definition: offload.h:129
u_int ftid_base
Definition: offload.h:132
u_int ftids_in_use
Definition: offload.h:162
u_int nstids
Definition: offload.h:126
u_int netids
Definition: offload.h:142
u_int tid_base
Definition: offload.h:140
union aopen_entry * atid_tab
Definition: offload.h:153
u_int ntids
Definition: offload.h:139
int inline_keys
Definition: offload.h:245
int combo_wrs
Definition: offload.h:246
int num_tls_rx_ports
Definition: offload.h:230
int update_hc_on_pmtu_change
Definition: offload.h:235
int rx_coalesce
Definition: offload.h:226
int cop_managed_offloading
Definition: offload.h:233
int tx_zcopy
Definition: offload.h:232
int * tls_rx_ports
Definition: offload.h:229
int sndbuf
Definition: offload.h:224
int autorcvbuf_inc
Definition: offload.h:234
int cong_algorithm
Definition: offload.h:223
int tx_align
Definition: offload.h:231
int tls_rx_timeout
Definition: offload.h:228
u32 req[MAX_NCHAN]
Definition: common.h:228
u32 rsp[MAX_NCHAN]
Definition: common.h:229
u32 tcp6_in_errs[MAX_NCHAN]
Definition: common.h:213
u32 tnl_tx_drops[MAX_NCHAN]
Definition: common.h:211
u32 mac_in_errs[MAX_NCHAN]
Definition: common.h:206
u32 tnl_cong_drops[MAX_NCHAN]
Definition: common.h:209
u32 ofld_no_neigh
Definition: common.h:214
u32 ofld_cong_defer
Definition: common.h:215
u32 ofld_vlan_drops[MAX_NCHAN]
Definition: common.h:212
u32 tcp_in_errs[MAX_NCHAN]
Definition: common.h:208
u32 hdr_in_errs[MAX_NCHAN]
Definition: common.h:207
u32 ofld_chan_drops[MAX_NCHAN]
Definition: common.h:210
u32 frames_ddp
Definition: common.h:200
u64 octets_ddp
Definition: common.h:202
u32 frames_drop
Definition: common.h:201
unsigned int la_mask
Definition: common.h:256
u32 rqe_dfr_pkt
Definition: common.h:233
u32 rqe_dfr_mod
Definition: common.h:234
u64 tcp_out_segs
Definition: common.h:182
u32 tcp_out_rsts
Definition: common.h:180
u64 tcp_retrans_segs
Definition: common.h:183
u64 tcp_in_segs
Definition: common.h:181
u32 out_pkt[MAX_NCHAN]
Definition: common.h:219
u32 in_pkt[MAX_NCHAN]
Definition: common.h:220
u32 drops
Definition: common.h:188
u32 frames
Definition: common.h:187
u64 octets
Definition: common.h:189
uint8_t flags
Definition: adapter.h:281
enum clrl_state state
Definition: adapter.h:279
struct tx_cl_rl_params cl_rl[]
Definition: adapter.h:301
uint8_t desc_used
Definition: adapter.h:363
struct mbuf * m
Definition: adapter.h:362
uint8_t max_npkt
Definition: adapter.h:585
uint8_t npkt
Definition: adapter.h:582
void(* async_event)(struct adapter *)
Definition: offload.h:219
int(* deactivate)(struct adapter *)
Definition: offload.h:218
int uld_id
Definition: offload.h:216
int refcount
Definition: offload.h:215
int(* activate)(struct adapter *)
Definition: offload.h:217
uint16_t * rss
Definition: adapter.h:209
struct mtx tick_mtx
Definition: adapter.h:245
uint16_t viid
Definition: adapter.h:210
uint16_t smt_idx
Definition: adapter.h:211
device_t dev
Definition: adapter.h:199
int rsrv_noflowq
Definition: adapter.h:225
int tmr_idx
Definition: adapter.h:236
uint16_t * nm_rss
Definition: adapter.h:209
int nofldtxq
Definition: adapter.h:228
int first_nm_txq
Definition: adapter.h:233
int nofldrxq
Definition: adapter.h:230
int16_t xact_addr_filt
Definition: adapter.h:214
int first_rxq
Definition: adapter.h:227
struct adapter * adapter
Definition: adapter.h:201
int ntxq
Definition: adapter.h:223
int nnmrxq
Definition: adapter.h:234
struct sysctl_ctx_list ctx
Definition: adapter.h:248
int if_flags
Definition: adapter.h:207
struct sysctl_oid * ofld_rxq_oid
Definition: adapter.h:253
uint8_t vfvld
Definition: adapter.h:213
struct sysctl_oid * rxq_oid
Definition: adapter.h:249
int first_ofld_rxq
Definition: adapter.h:231
int qsize_rxq
Definition: adapter.h:240
int hashen
Definition: adapter.h:217
int first_intr
Definition: adapter.h:220
int ofld_tmr_idx
Definition: adapter.h:237
int first_ofld_txq
Definition: adapter.h:229
int ofld_pktc_idx
Definition: adapter.h:239
int first_nm_rxq
Definition: adapter.h:235
struct fw_vi_stats_vf stats
Definition: adapter.h:244
int nrxq
Definition: adapter.h:226
struct callout tick
Definition: adapter.h:246
struct ifnet * ifp
Definition: adapter.h:203
struct timeval last_refreshed
Definition: adapter.h:243
int nnmtxq
Definition: adapter.h:232
struct sysctl_oid * nm_rxq_oid
Definition: adapter.h:251
uint16_t rss_base
Definition: adapter.h:216
uint16_t rss_size
Definition: adapter.h:215
uint16_t vin
Definition: adapter.h:212
uint8_t hw_addr[ETHER_ADDR_LEN]
Definition: adapter.h:256
struct sysctl_oid * txq_oid
Definition: adapter.h:250
struct sysctl_oid * ofld_txq_oid
Definition: adapter.h:254
unsigned long flags
Definition: adapter.h:206
struct pfil_head * pfil
Definition: adapter.h:204
struct port_info * pi
Definition: adapter.h:200
int nintr
Definition: adapter.h:219
int pktc_idx
Definition: adapter.h:238
int first_txq
Definition: adapter.h:224
int qsize_txq
Definition: adapter.h:241
struct sysctl_oid * nm_txq_oid
Definition: adapter.h:252
u8 sn[SERNUM_LEN+1]
Definition: common.h:281
u8 id[ID_LEN+1]
Definition: common.h:282
u8 ec[EC_LEN+1]
Definition: common.h:280
u8 md[MD_LEN+1]
Definition: common.h:285
u8 na[MACADDR_LEN+1]
Definition: common.h:284
unsigned int cclk
Definition: common.h:279
u8 pn[PN_LEN+1]
Definition: common.h:283
struct ifnet * ifp
Definition: t4_main.c:12950
uint16_t port
Definition: t4_main.c:12951
Definition: adapter.h:696
int t4_release_clip_addr(struct adapter *sc, struct in6_addr *in6)
Definition: t4_clip.c:354
struct clip_entry * t4_get_clip_entry(struct adapter *sc, struct in6_addr *in6, bool add)
Definition: t4_clip.c:256
void t4_init_clip_table(struct adapter *)
void t4_clip_modunload(void)
void t4_destroy_clip_table(struct adapter *)
int sysctl_clip(SYSCTL_HANDLER_ARGS)
void t4_clip_modload(void)
@ MAX_MTU
Definition: t4_hw.h:41
@ TCB_SIZE
Definition: t4_hw.h:47
@ MAX_PM_NSTATS
Definition: t4_hw.h:53
@ NCCTRL_WIN
Definition: t4_hw.h:49
@ MAX_NCHAN
Definition: t4_hw.h:40
@ NMTUS
Definition: t4_hw.h:48
@ NTX_SCHED
Definition: t4_hw.h:50
@ FLASH_CFG_MAX_SIZE
Definition: t4_hw.h:275
@ SGE_NCOUNTERS
Definition: t4_hw.h:96
@ SGE_NTIMERS
Definition: t4_hw.h:95
@ CIM_NUM_OBQ_T5
Definition: t4_hw.h:70
@ CIM_OBQ_SIZE
Definition: t4_hw.h:75
@ CIM_NUM_IBQ
Definition: t4_hw.h:68
@ CIM_PIFLA_SIZE
Definition: t4_hw.h:72
@ ULPRX_LA_SIZE
Definition: t4_hw.h:77
@ TPLA_SIZE
Definition: t4_hw.h:76
@ CIM_IBQ_SIZE
Definition: t4_hw.h:74
@ CIM_MALA_SIZE
Definition: t4_hw.h:73
@ CTXT_INGRESS
Definition: t4_hw.h:86
@ CTXT_CNM
Definition: t4_hw.h:86
@ CTXT_EGRESS
Definition: t4_hw.h:86
@ CTXT_FLM
Definition: t4_hw.h:86
#define V_QINTR_TIMER_IDX(x)
Definition: t4_hw.h:162
#define V_QINTR_CNT_EN(x)
Definition: t4_hw.h:157
int port
Definition: t4_if.m:63
INTERFACE t4
Definition: t4_if.m:32
device_t * child
Definition: t4_if.m:64
#define CHELSIO_T4_SCHED_QUEUE
Definition: t4_ioctl.h:432
#define CHELSIO_T4_REGDUMP
Definition: t4_ioctl.h:418
#define CHELSIO_T4_SET_FILTER_MODE
Definition: t4_ioctl.h:420
#define CHELSIO_T4_GET_SGE_CONTEXT
Definition: t4_ioctl.h:424
#define CHELSIO_T4_CLEAR_STATS
Definition: t4_ioctl.h:429
#define CHELSIO_T4_RELEASE_CLIP_ADDR
Definition: t4_ioctl.h:443
#define CHELSIO_T4_CUDBG_DUMP
Definition: t4_ioctl.h:439
#define CHELSIO_T4_LOAD_BOOTCFG
Definition: t4_ioctl.h:438
#define CHELSIO_T4_LOAD_BOOT
Definition: t4_ioctl.h:437
#define CHELSIO_T4_GET_TRACER
Definition: t4_ioctl.h:434
#define CHELSIO_T4_GET_FILTER_MODE
Definition: t4_ioctl.h:419
#define CHELSIO_T4_HOLD_CLIP_ADDR
Definition: t4_ioctl.h:442
#define CHELSIO_T4_SETREG
Definition: t4_ioctl.h:417
#define CHELSIO_T4_SET_FILTER_MASK
Definition: t4_ioctl.h:441
#define CHELSIO_T4_GETREG
Definition: t4_ioctl.h:416
#define CHELSIO_T4_LOAD_CFG
Definition: t4_ioctl.h:436
@ OPEN_TYPE_ACTIVE
Definition: t4_ioctl.h:374
@ OPEN_TYPE_LISTEN
Definition: t4_ioctl.h:373
@ OPEN_TYPE_PASSIVE
Definition: t4_ioctl.h:375
@ OPEN_TYPE_DONTCARE
Definition: t4_ioctl.h:376
#define CHELSIO_T4_LOAD_FW
Definition: t4_ioctl.h:426
#define CHELSIO_T4_SET_OFLD_POLICY
Definition: t4_ioctl.h:440
#define CHELSIO_T4_GET_FILTER
Definition: t4_ioctl.h:421
#define CHELSIO_T4_GET_MEM
Definition: t4_ioctl.h:427
#define CHELSIO_T4_GET_I2C
Definition: t4_ioctl.h:428
#define CHELSIO_T4_DEL_FILTER
Definition: t4_ioctl.h:423
#define CHELSIO_T4_SCHED_CLASS
Definition: t4_ioctl.h:430
#define CHELSIO_T4_SET_TRACER
Definition: t4_ioctl.h:435
#define CHELSIO_T4_SET_FILTER
Definition: t4_ioctl.h:422
int cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt)
Definition: t4_kern_tls.c:2122
void t6_ktls_modunload(void)
Definition: t4_kern_tls.c:2147
void t6_ktls_modload(void)
Definition: t4_kern_tls.c:2142
int t4_init_l2t(struct adapter *sc, int flags)
Definition: t4_l2t.c:323
int t4_free_l2t(struct l2t_data *d)
Definition: t4_l2t.c:358
int sysctl_l2t(SYSCTL_HANDLER_ARGS)
Definition: t4_l2t.c:409
int do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_l2t.c:371
@ L2T_SIZE
Definition: t4_l2t.h:40
void t4_fatal_err(struct adapter *sc, bool fw_error)
Definition: t4_main.c:3554
static void dump_cim_regs(struct adapter *)
Definition: t4_main.c:8985
static int set_params__pre_init(struct adapter *)
Definition: t4_main.c:5081
static int cxgbe_detach(device_t)
Definition: t4_main.c:2655
static int port_mword(struct port_info *pi, uint32_t speed)
Definition: t4_main.c:3217
static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10978
static eventhandler_tag vxlan_stop_evtag
Definition: t4_main.c:12947
static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8103
static struct callout fatal_callout
Definition: t4_main.c:3511
static void t4_set_desc(struct adapter *)
Definition: t4_main.c:5651
int t4_setup_intr_handlers(struct adapter *sc)
Definition: t4_main.c:6373
#define ulp_region(reg)
static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t)
Definition: t4_main.c:2696
static void mem_region_show(struct sbuf *sb, const char *name, unsigned int from, unsigned int to)
Definition: t4_main.c:9674
static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8259
static driver_t vcxgbe_driver
Definition: t4_main.c:156
static void queue_tid_release(struct adapter *sc, int tid)
Definition: t4_main.c:3892
static struct fw_info * find_fw_info(int chip)
Definition: t4_main.c:4427
static void set_current_media(struct port_info *pi)
Definition: t4_main.c:5677
static int get_params__pre_init(struct adapter *)
Definition: t4_main.c:5020
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10069
static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9523
static int notify_siblings(device_t, int)
Definition: t4_main.c:1639
#define V_PL_VFID(x)
Definition: t4_main.c:7005
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10252
static driver_t cxl_driver
Definition: t4_main.c:196
static devclass_t t6_devclass
Definition: t4_main.c:13183
static int contact_firmware(struct adapter *)
Definition: t4_main.c:4652
#define UWIRE_CF
static void ifmedia_add4(struct ifmedia *ifm, int m)
Definition: t4_main.c:5662
#define TMR_IDX
static int load_fw_module(struct adapter *sc, const struct firmware **dcfg, const struct firmware **fw)
Definition: t4_main.c:4465
static char * caps_decoder[]
Definition: t4_main.c:7159
static int mod_event(module_t, int, void *)
Definition: t4_main.c:13065
static int sysctl_reset(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:11099
static void t4_init_atid_table(struct adapter *)
Definition: t4_main.c:3813
int cxgbe_media_change(struct ifnet *ifp)
Definition: t4_main.c:3170
static int copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, uint32_t mtype, uint32_t moff)
Definition: t4_main.c:4743
static void quiesce_wrq(struct sge_wrq *)
Definition: t4_main.c:6873
static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9579
static int t4_alloc_irq(struct adapter *, struct irq *, int rid, driver_intr_t *, void *, char *)
Definition: t4_main.c:6949
int t4_os_find_pci_capability(struct adapter *sc, int cap)
Definition: t4_main.c:12070
static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9635
static int t4_reset_post(device_t, device_t)
Definition: t4_main.c:2359
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8677
static void cxgbe_init(void *)
Definition: t4_main.c:2684
#define SYSCTL_CAP(name, n, text)
static void t4_clr_vi_stats(struct adapter *sc, u_int vin)
Definition: t4_main.c:7065
static int vcxgbe_detach(device_t)
Definition: t4_main.c:3493
void t4_os_cim_err(struct adapter *sc)
Definition: t4_main.c:9026
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8465
static int t4_suspend(device_t)
Definition: t4_main.c:1871
void t4_add_adapter(struct adapter *sc)
Definition: t4_main.c:3580
static int load_cfg(struct adapter *, struct t4_data *)
Definition: t4_main.c:11563
void t4_os_link_changed(struct port_info *pi)
Definition: t4_main.c:12145
static int sysctl_link_fec(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8357
static void cxgbe_tick(void *)
Definition: t4_main.c:7134
int alloc_atid(struct adapter *sc, void *ctx)
Definition: t4_main.c:3851
void doom_vi(struct adapter *sc, struct vi_info *vi)
Definition: t4_main.c:6183
#define MAX_READ_BUF_SIZE
Definition: t4_main.c:11828
static driver_t cxgbe_driver
Definition: t4_main.c:140
static const char * qname[CIM_NUM_IBQ+CIM_NUM_OBQ_T5]
Definition: t4_main.c:8798
static void t4_vxlan_stop_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family, u_int port)
Definition: t4_main.c:13048
static void delayed_panic(void *arg)
Definition: t4_main.c:3515
static int t4_range_cmp(const void *a, const void *b)
Definition: t4_main.c:3917
static void unload_fw_module(struct adapter *sc, const struct firmware *dcfg, const struct firmware *fw)
Definition: t4_main.c:4493
static int vcxgbe_probe(device_t)
Definition: t4_main.c:3402
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9246
static const struct memwin_init t4_memwin[NUM_MEMWIN]
Definition: t4_main.c:3676
static struct sx t4_list_lock
Definition: t4_main.c:264
int adapter_init(struct adapter *sc)
Definition: t4_main.c:6568
static u_int add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
Definition: t4_main.c:5949
char * desc
Definition: t4_main.c:875
int update_mac_settings(struct ifnet *ifp, int flags)
Definition: t4_main.c:5993
void * lookup_atid(struct adapter *sc, int atid)
Definition: t4_main.c:3871
static void cxgbe_qflush(struct ifnet *)
Definition: t4_main.c:3001
static int cxgbe_probe(device_t)
Definition: t4_main.c:2430
static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *)
Definition: t4_main.c:4285
static void dump_devlog(struct adapter *)
Definition: t4_main.c:9445
void t4_init_devnames(struct adapter *sc)
Definition: t4_main.c:1076
static driver_t cc_driver
Definition: t4_main.c:235
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9965
static void t5_attribute_workaround(device_t dev)
Definition: t4_main.c:1024
int t4_map_bar_2(struct adapter *sc)
Definition: t4_main.c:3614
#define FLASH_CF
#define NRXQ
static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8162
int rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len, int rw)
Definition: t4_main.c:3765
#define SAME_INTF(x)
static void t4_vxlan_stop(struct adapter *sc, void *arg)
Definition: t4_main.c:13009
static bool fixed_ifmedia(struct port_info *)
Definition: t4_main.c:5715
#define NTXQ_VI
struct @99 t4_pciids[]
static int t4_detach(device_t)
Definition: t4_main.c:1666
static int partition_resources(struct adapter *)
Definition: t4_main.c:4973
static void fatal_error_task(void *, int)
Definition: t4_main.c:3523
static const struct field_desc tp_la2[]
Definition: t4_main.c:10802
static int get_sge_context(struct adapter *, struct t4_sge_context *)
Definition: t4_main.c:11486
static int cxgbe_init_synchronized(struct vi_info *)
Definition: t4_main.c:6219
void vi_sysctls(struct vi_info *vi)
Definition: t4_main.c:7694
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9200
static void cxgbe_sysctls(struct port_info *)
Definition: t4_main.c:7797
static void calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, int navail)
Definition: t4_main.c:4145
static void position_memwin(struct adapter *, int, uint32_t)
Definition: t4_main.c:3742
#define BUILTIN_CF
#define T4_REGSTAT(name, stat, desc)
static int cxgbe_attach(device_t)
Definition: t4_main.c:2597
#define FW_PARAM_DEV(param)
Definition: t4_main.c:4832
static int sysctl_vdd(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8647
static int t4_free_irq(struct adapter *, struct irq *)
Definition: t4_main.c:6976
static struct taskqueue * reset_tq
Definition: t4_main.c:3512
void t4_sysctls(struct adapter *sc)
Definition: t4_main.c:7179
static int stop_adapter(struct adapter *sc)
Definition: t4_main.c:1863
static void save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
Definition: t4_main.c:2018
void t4_os_portmod_changed(struct port_info *pi)
Definition: t4_main.c:12104
static device_method_t t4_methods[]
Definition: t4_main.c:107
#define T4_CAP_ENABLE
Definition: t4_main.c:2445
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8565
device_method_t cxgbe_methods[]
Definition: t4_main.c:134
static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10662
void free_atid(struct adapter *sc, int atid)
Definition: t4_main.c:3879
static const struct field_desc tp_la0[]
Definition: t4_main.c:10716
static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8013
static int cxgbe_vi_attach(device_t dev, struct vi_info *vi)
Definition: t4_main.c:2448
static int sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
Definition: t4_main.c:8933
static void build_medialist(struct port_info *)
Definition: t4_main.c:5733
static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *)
Definition: t4_main.c:6989
static driver_t t5_driver
Definition: t4_main.c:188
int t4_detach_common(device_t dev)
Definition: t4_main.c:1681
static int fixup_devlog_params(struct adapter *)
Definition: t4_main.c:4118
static void sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
Definition: t4_main.c:8901
SLIST_HEAD(adapter)
Definition: t4_main.c:265
static int mem_desc_cmp(const void *a, const void *b)
Definition: t4_main.c:9667
static int t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
Definition: t4_main.c:1090
SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload")
static devclass_t cxgbe_devclass
Definition: t4_main.c:13184
static int sysctl_int_array(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:7995
uint16_t device
Definition: t4_main.c:874
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8598
static void enable_vxlan_rx(struct adapter *)
Definition: t4_main.c:12955
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9133
int t4_map_bars_0_and_4(struct adapter *sc)
Definition: t4_main.c:3588
MODULE_VERSION(t4nex, 1)
static devclass_t t5_devclass
Definition: t4_main.c:13183
#define V_PL_ADDR(x)
Definition: t4_main.c:7010
static int t4_attach(device_t)
Definition: t4_main.c:1108
static int t6_probe(device_t)
Definition: t4_main.c:1004
static driver_t vcc_driver
Definition: t4_main.c:242
static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8034
#define COMPARE_CAPS(c)
static devclass_t vcxl_devclass
Definition: t4_main.c:13185
static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, uint32_t *)
Definition: t4_main.c:4074
#define PKTC_IDX
static void setup_memwin(struct adapter *)
Definition: t4_main.c:3689
static const struct field_desc tp_la1[]
Definition: t4_main.c:10753
static int apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, const struct caps_allowed *caps_allowed)
Definition: t4_main.c:4845
static int adapter_full_init(struct adapter *)
Definition: t4_main.c:6534
static int set_offload_policy(struct adapter *, struct t4_offload_policy *)
Definition: t4_main.c:11739
#define READ_CAPS(x)
static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8229
struct @99 t6_pciids[]
static void vi_tick(void *)
Definition: t4_main.c:7146
static int cxgbe_transmit(struct ifnet *, struct mbuf *)
Definition: t4_main.c:2951
static void vi_full_uninit(struct vi_info *)
Definition: t4_main.c:6810
static int sysctl_temperature(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8618
static void adapter_full_uninit(struct adapter *)
Definition: t4_main.c:6590
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8521
__FBSDID("$FreeBSD$")
static int t4_probe(device_t)
Definition: t4_main.c:954
#define FW_INTFVER(chip, intf)
Definition: t4_main.c:4346
static void init_link_config(struct port_info *)
Definition: t4_main.c:5787
static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *)
Definition: t4_main.c:6880
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9691
static int fwmtype_to_hwmtype(int)
Definition: t4_main.c:4051
static int read_i2c(struct adapter *, struct t4_i2c_data *)
Definition: t4_main.c:11877
static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10932
static void tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
Definition: t4_main.c:10857
static int compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
Definition: t4_main.c:2049
static const char *const devlog_level_strings[]
Definition: t4_main.c:9312
static devclass_t vcc_devclass
Definition: t4_main.c:13185
static int cxgbe_uninit_synchronized(struct vi_info *)
Definition: t4_main.c:6303
#define T4_PORTSTAT(name, desc)
static void reset_adapter_task(void *, int)
Definition: t4_main.c:2423
#define COMPARE_PARAM(p, name)
static driver_t t6_driver
Definition: t4_main.c:227
static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter)
Definition: t4_main.c:3078
CTASSERT(sizeof(struct cluster_metadata)<=CL_METADATA_SIZE)
int t4_os_pci_restore_state(struct adapter *sc)
Definition: t4_main.c:12091
static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10614
static void t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
Definition: t4_main.c:7034
static uint64_t read_vf_stat(struct adapter *sc, u_int vin, int reg)
Definition: t4_main.c:7016
static int t4_child_location(device_t, device_t, struct sbuf *)
Definition: t4_main.c:1594
static int load_bootcfg(struct adapter *, struct t4_data *)
Definition: t4_main.c:11646
static devclass_t cc_devclass
Definition: t4_main.c:13184
#define COPY_CAPS(x)
int vi_init(struct vi_info *vi)
Definition: t4_main.c:6789
static int apply_link_config(struct port_info *)
Definition: t4_main.c:5894
#define LIMIT_CAPS(x)
static int hold_clip_addr(struct adapter *, struct t4_clip_addr *)
Definition: t4_main.c:12041
static void quiesce_vi(struct vi_info *)
Definition: t4_main.c:6911
static d_ioctl_t t4_ioctl
Definition: t4_main.c:162
static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8383
static int t4_reset_prepare(device_t, device_t)
Definition: t4_main.c:2350
static int clear_stats(struct adapter *, u_int)
Definition: t4_main.c:11901
static driver_t vcxl_driver
Definition: t4_main.c:203
static int t4_read_port_device(device_t, int, device_t *)
Definition: t4_main.c:1623
static void field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
Definition: t4_main.c:10695
static eventhandler_tag vxlan_start_evtag
Definition: t4_main.c:12946
static int reset_adapter(struct adapter *sc)
Definition: t4_main.c:2368
static void tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
Definition: t4_main.c:10868
static int set_params__post_init(struct adapter *)
Definition: t4_main.c:5519
static int load_fw(struct adapter *, struct t4_data *)
Definition: t4_main.c:11524
static int sysctl_cpus(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:11067
static int fixup_link_config(struct port_info *)
Definition: t4_main.c:5832
static void t4_vxlan_start(struct adapter *sc, void *arg)
Definition: t4_main.c:12984
static int sysctl_devlog(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9424
void t4_iterate(void(*func)(struct adapter *, void *), void *arg)
Definition: t4_main.c:12190
#define NRXQ_VI
#define A_PL_INDIR_CMD
Definition: t4_main.c:6996
static void tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
Definition: t4_main.c:10850
static devclass_t vcxgbe_devclass
Definition: t4_main.c:13185
struct @99 t5_pciids[]
void cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
Definition: t4_main.c:3359
MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services")
void release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
Definition: t4_main.c:3899
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9279
static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *)
Definition: t4_main.c:11679
static void cxgbe_vi_detach(struct vi_info *vi)
Definition: t4_main.c:2629
static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10395
static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:11020
static device_method_t t6_methods[]
Definition: t4_main.c:211
static void sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
Definition: t4_main.c:8871
static void tweak_tunables(void)
Definition: t4_main.c:12656
static device_method_t vcxgbe_methods[]
Definition: t4_main.c:150
static int release_clip_addr(struct adapter *, struct t4_clip_addr *)
Definition: t4_main.c:12057
static int validate_mem_range(struct adapter *, uint32_t, uint32_t)
Definition: t4_main.c:3928
static void t4_free_atid_table(struct adapter *)
Definition: t4_main.c:3835
static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10533
int t4_os_pci_save_state(struct adapter *sc)
Definition: t4_main.c:12078
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8711
#define GET_STAT(name)
static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8199
static void cxgbe_refresh_stats(struct vi_info *)
Definition: t4_main.c:7097
static devclass_t t4_devclass
Definition: t4_main.c:13183
static int read_card_mem(struct adapter *, int, struct t4_mem_range *)
Definition: t4_main.c:11830
static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *)
Definition: t4_main.c:3415
static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9467
static int sysctl_tp_la(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10879
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8964
static int t4_resume(device_t)
Definition: t4_main.c:2144
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10288
#define NTXQ
struct fw_info fw_info[]
int begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, char *wmesg)
Definition: t4_main.c:6121
void end_synchronized_op(struct adapter *sc, int flags)
Definition: t4_main.c:6204
#define FW_MAC_EXACT_CHUNK
Definition: t4_main.c:5938
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10361
static device_method_t t5_methods[]
Definition: t4_main.c:172
static int t4_ready(device_t)
Definition: t4_main.c:1612
static int install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, const struct fw_h *drv_fw, const char *reason, int *already)
Definition: t4_main.c:4510
static int t5_probe(device_t)
Definition: t4_main.c:979
static struct sx mlu
Definition: t4_main.c:13061
static int vcxgbe_attach(device_t)
Definition: t4_main.c:3465
static void write_global_rss_key(struct adapter *sc)
Definition: t4_main.c:6513
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9083
static int sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
Definition: t4_main.c:9350
static const char *const devlog_facility_strings[]
Definition: t4_main.c:9321
#define A_PL_INDIR_DATA
Definition: t4_main.c:7013
static int sysctl_noflowq(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8084
static bool ok_to_reset(struct adapter *sc)
Definition: t4_main.c:1827
#define FW_PARAM_PFVF(param)
Definition: t4_main.c:4835
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8754
static void dump_cimla(struct adapter *)
Definition: t4_main.c:9004
static void t4_vxlan_start_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family, u_int port)
Definition: t4_main.c:13035
DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0)
static void free_offload_policy(struct t4_offload_policy *)
Definition: t4_main.c:11722
static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8289
static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8805
static devclass_t cxl_devclass
Definition: t4_main.c:13184
static int vi_full_init(struct vi_info *)
Definition: t4_main.c:6673
static void quiesce_txq(struct sge_txq *)
Definition: t4_main.c:6824
#define FW_VERSION(chip)
Definition: t4_main.c:4341
static uint64_t vi_get_counter(struct ifnet *, ift_counter)
Definition: t4_main.c:3026
MODULE_DEPEND(t4nex, firmware, 1, 1, 1)
#define T4_CAP
Definition: t4_main.c:2441
static int get_params__post_init(struct adapter *)
Definition: t4_main.c:5119
#define DEFAULT_CF
static void vi_refresh_stats(struct vi_info *)
Definition: t4_main.c:7077
static driver_t t4_driver
Definition: t4_main.c:123
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:9032
static void tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
Definition: t4_main.c:9957
static void calculate_nqueues(int *t, int nc, const int c)
Definition: t4_main.c:12641
static int fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
Definition: t4_main.c:4443
#define V_PL_AUTOINC(x)
Definition: t4_main.c:7000
#define FPGA_CF
static const struct memwin_init t5_memwin[NUM_MEMWIN]
Definition: t4_main.c:3682
static void update_nirq(struct intrs_and_queues *iaq, int nports)
Definition: t4_main.c:4130
static int sysctl_btphy(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:8055
static int load_boot(struct adapter *, struct t4_bootrom *)
Definition: t4_main.c:11596
static int sysctl_tids(SYSCTL_HANDLER_ARGS)
Definition: t4_main.c:10437
static struct cdevsw t4_cdevsw
Definition: t4_main.c:164
void mp_ring_check_drainage(struct mp_ring *r, int budget)
Definition: t4_mp_ring.c:479
bool mp_ring_is_idle(struct mp_ring *r)
Definition: t4_mp_ring.c:522
int mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
Definition: t4_mp_ring.c:347
void mp_ring_reset_stats(struct mp_ring *r)
Definition: t4_mp_ring.c:506
@ CPL_TID_RELEASE
Definition: t4_msg.h:58
@ CPL_L2T_WRITE_RPL
Definition: t4_msg.h:67
@ CPL_TRACE_PKT
Definition: t4_msg.h:138
@ CPL_SMT_WRITE_RPL
Definition: t4_msg.h:78
@ CPL_T5_TRACE_PKT
Definition: t4_msg.h:106
@ CPL_TX_PKT_XT
Definition: t4_msg.h:154
@ CPL_ACT_OPEN_RPL
Definition: t4_msg.h:69
@ CPL_ABORT_RPL_RSS
Definition: t4_msg.h:77
@ CPL_SET_TCB_RPL
Definition: t4_msg.h:91
@ CONG_ALG_HIGHSPEED
Definition: t4_msg.h:290
#define M_TID_TID
Definition: t4_msg.h:335
#define V_TXPKT_PF(x)
Definition: t4_msg.h:1286
#define V_TXPKT_VF(x)
Definition: t4_msg.h:1281
#define V_TXPKT_VF_VLD(x)
Definition: t4_msg.h:1290
#define V_TXPKT_OPCODE(x)
Definition: t4_msg.h:1339
#define V_TXPKT_INTF(x)
Definition: t4_msg.h:1306
#define A_TP_CMM_MM_RX_FLST_BASE
Definition: t4_regs.h:23748
#define M_KEEPALIVEINTVL
Definition: t4_regs.h:22976
#define A_TP_PMM_RX_MAX_PAGE
Definition: t4_regs.h:21761
#define T5_PORT_REG(idx, reg)
Definition: t4_regs.h:288
#define A_TP_FINWAIT2_TIMER
Definition: t4_regs.h:22999
#define G_PORTMAP(x)
Definition: t4_regs.h:34577
#define F_DROPERRORFRAG
Definition: t4_regs.h:22876
#define A_MPS_CLS_TCAM_DATA2_CTL
Definition: t4_regs.h:34607
#define V_PFNUM(x)
Definition: t4_regs.h:5174
#define V_KEEPALIVEINTVL(x)
Definition: t4_regs.h:22977
#define V_PASSMODE(x)
Definition: t4_regs.h:25332
#define F_PMRXNUMCHN
Definition: t4_regs.h:21765
#define F_HMA_MUX
Definition: t4_regs.h:16281
#define A_PCIE_FW
Definition: t4_regs.h:5005
#define F_PIORST
Definition: t4_regs.h:38162
#define G_EXT_MEM1_BASE(x)
Definition: t4_regs.h:16673
#define A_TP_PERS_MIN
Definition: t4_regs.h:22952
#define A_SGE_PF_GTS
Definition: t4_regs.h:546
#define A_TP_CMM_TIMER_BASE
Definition: t4_regs.h:21744
#define A_PCIE_PF_EXPROM_OFST
Definition: t4_regs.h:4078
#define A_MPS_TRC_RSS_CONTROL
Definition: t4_regs.h:33815
#define A_EDC_H_BIST_STATUS_RDATA
Definition: t4_regs.h:61865
#define MPS_CLS_SRAM_H(idx)
Definition: t4_regs.h:193
#define G_MASKSIZE(x)
Definition: t4_regs.h:23393
#define G_OFFSET(x)
Definition: t4_regs.h:4083
#define V_RXTMAX(x)
Definition: t4_regs.h:22949
#define F_DROPERRORANY
Definition: t4_regs.h:22888
#define A_MPS_RX_MAC_BG_PG_CNT0
Definition: t4_regs.h:35595
#define G_STATMODE(x)
Definition: t4_regs.h:2019
#define A_TP_KEEP_INTVL
Definition: t4_regs.h:22973
#define F_UPDBGLACAPTPCONLY
Definition: t4_regs.h:44275
#define A_TP_PMM_TX_MAX_PAGE
Definition: t4_regs.h:21773
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L
Definition: t4_regs.h:31552
#define G_DATALKPTYPE(x)
Definition: t4_regs.h:34638
#define F_DROPERRORCSUM
Definition: t4_regs.h:22844
#define A_MPS_RX_LPBK_BG_PG_CNT0
Definition: t4_regs.h:35610
#define F_ATTACKFILTERENABLE
Definition: t4_regs.h:21635
#define A_MA_TARGET_MEM_ENABLE
Definition: t4_regs.h:16261
#define F_EDRAM0_ENABLE
Definition: t4_regs.h:16277
#define M_DATALKPTYPE
Definition: t4_regs.h:34636
#define PORT_REG(idx, reg)
Definition: t4_regs.h:101
#define A_MA_EDRAM1_BAR
Definition: t4_regs.h:16124
#define A_TP_CMM_MM_BASE
Definition: t4_regs.h:21743
#define G_QUEEOPCNT(x)
Definition: t4_regs.h:44116
#define F_VF_VALID
Definition: t4_regs.h:32020
#define A_SGE_STAT_MATCH
Definition: t4_regs.h:1999
#define G_IBQRDADDR(x)
Definition: t4_regs.h:44091
#define G_ALLOC(x)
Definition: t4_regs.h:34751
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1
Definition: t4_regs.h:34665
#define F_DROPERRORTCPHDRLEN
Definition: t4_regs.h:22860
#define A_TP_KEEP_IDLE
Definition: t4_regs.h:22966
#define PCIE_MEM_ACCESS_REG(reg_addr, idx)
Definition: t4_regs.h:118
#define A_CIM_SDRAM_BASE_ADDR
Definition: t4_regs.h:20437
#define M_TIMERBACKOFFINDEX0
Definition: t4_regs.h:22156
#define M_RXTMIN
Definition: t4_regs.h:22941
#define F_DROPERRORPKTLEN
Definition: t4_regs.h:22856
#define F_DATAVIDH2
Definition: t4_regs.h:34646
#define M_KEEPALIVEMAXR1
Definition: t4_regs.h:23041
#define A_SGE_CONTROL2
Definition: t4_regs.h:2296
#define F_PIORSTMODE
Definition: t4_regs.h:38166
#define V_STATSOURCE_T5(x)
Definition: t4_regs.h:2028
#define A_MA_EDRAM0_BAR
Definition: t4_regs.h:16112
#define G_PMTXMAXPAGE(x)
Definition: t4_regs.h:21783
#define F_DROPERRORCSUMIP
Definition: t4_regs.h:22848
#define A_ULP_TX_ERR_TABLE_BASE
Definition: t4_regs.h:29067
#define F_EXT_MEM1_ENABLE
Definition: t4_regs.h:16285
#define V_RXTSHIFTMAXR1(x)
Definition: t4_regs.h:23022
#define A_UP_IBQ_0_SHADOW_RDADDR
Definition: t4_regs.h:44437
#define G_T6_SRAM_PRIO3(x)
Definition: t4_regs.h:34520
#define A_TP_CMM_MM_MAX_PSTRUCT
Definition: t4_regs.h:23751
#define A_TP_RXT_MIN
Definition: t4_regs.h:22938
#define G_PMRXMAXPAGE(x)
Definition: t4_regs.h:21770
#define V_WINDOW(x)
Definition: t4_regs.h:4931
#define A_TP_INIT_SRTT
Definition: t4_regs.h:22980
#define V_CTLREQID(x)
Definition: t4_regs.h:34614
#define F_DROPERRORTCPOPT
Definition: t4_regs.h:22852
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1
Definition: t4_regs.h:34663
#define A_TP_GLOBAL_CONFIG
Definition: t4_regs.h:21618
#define A_LE_DB_ACT_CNT_IPV6
Definition: t4_regs.h:38782
#define A_TP_MOD_CONFIG
Definition: t4_regs.h:23626
#define A_TP_RSS_CONFIG_TNL
Definition: t4_regs.h:23388
#define G_EDRAM0_BASE(x)
Definition: t4_regs.h:16117
#define M_RXTSHIFTMAXR1
Definition: t4_regs.h:23021
#define A_TP_FRAG_CONFIG
Definition: t4_regs.h:25288
#define F_VXLAN_EN
Definition: t4_regs.h:35656
#define F_T6_REPLICATE
Definition: t4_regs.h:34543
#define S_RXTSHIFTMAXR2
Definition: t4_regs.h:23025
#define A_PCIE_MEM_ACCESS_OFFSET
Definition: t4_regs.h:4934
#define V_KEEPALIVEMAXR2(x)
Definition: t4_regs.h:23047
#define F_DROPERRORIPHDRLEN
Definition: t4_regs.h:22864
#define M_CTXTQID
Definition: t4_regs.h:2642
#define A_TP_DBG_LA_CONFIG
Definition: t4_regs.h:24282
#define G_T6_DBVFIFO_SIZE(x)
Definition: t4_regs.h:2576
#define A_TP_CMM_TCB_BASE
Definition: t4_regs.h:21742
#define A_ULP_RX_CTX_BASE
Definition: t4_regs.h:37149
#define A_MA_EXT_MEMORY_BAR
Definition: t4_regs.h:16136
#define G_EXT_MEM1_SIZE(x)
Definition: t4_regs.h:16678
#define A_SGE_FLM_CACHE_BADDR
Definition: t4_regs.h:1376
#define G_T5_ALLOC(x)
Definition: t4_regs.h:34761
#define A_TP_SYNC_TIME_HI
Definition: t4_regs.h:23746
#define G_EXT_MEM_BASE(x)
Definition: t4_regs.h:16141
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H
Definition: t4_regs.h:31583
#define A_CIM_EXTMEM2_BASE_ADDR
Definition: t4_regs.h:20451
#define G_T6_SRAM_PRIO2(x)
Definition: t4_regs.h:34525
#define V_CTLXYBITSEL(x)
Definition: t4_regs.h:34627
#define A_LE_DB_ACT_CNT_IPV4
Definition: t4_regs.h:38775
#define A_CIM_SDRAM_ADDR_SIZE
Definition: t4_regs.h:20444
#define V_VXLAN(x)
Definition: t4_regs.h:35660
#define A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS
Definition: t4_regs.h:47582
#define A_TP_TIMER_RESOLUTION
Definition: t4_regs.h:22914
#define M_KEEPALIVEIDLE
Definition: t4_regs.h:22969
#define G_SRAM_PRIO0(x)
Definition: t4_regs.h:34485
#define A_TP_ERR_CONFIG
Definition: t4_regs.h:22784
#define V_KEEPALIVEMAXR1(x)
Definition: t4_regs.h:23042
#define G_SOURCEPF(x)
Definition: t4_regs.h:37838
#define A_SGE_STAT_CFG
Definition: t4_regs.h:2000
#define M_RXTMAX
Definition: t4_regs.h:22948
#define A_LE_DB_SERVER_INDEX
Definition: t4_regs.h:38747
#define A_PL_WHOAMI
Definition: t4_regs.h:37896
#define G_T6_VF(x)
Definition: t4_regs.h:34557
#define A_EDC_H_BIST_USER_WDATA2
Definition: t4_regs.h:61862
#define V_STATMODE(x)
Definition: t4_regs.h:2018
#define G_T6_STATMODE(x)
Definition: t4_regs.h:2034
#define A_UP_IBQ_0_RDADDR
Definition: t4_regs.h:44081
#define M_PASSMODE
Definition: t4_regs.h:25331
#define V_BIR(x)
Definition: t4_regs.h:4926
#define G_USED(x)
Definition: t4_regs.h:34746
#define F_REPLICATE
Definition: t4_regs.h:32011
#define M_KEEPALIVEMAXR2
Definition: t4_regs.h:23046
#define G_TIMERMODE(x)
Definition: t4_regs.h:23641
#define F_SRAM_VLD
Definition: t4_regs.h:34489
#define G_DBGLAMODE(x)
Definition: t4_regs.h:24301
#define F_HASHEN
Definition: t4_regs.h:38499
#define M_MASKFILTER
Definition: t4_regs.h:23396
#define MYPF_REG(reg_addr)
Definition: t4_regs.h:39
#define A_TP_TCP_BACKOFF_REG0
Definition: t4_regs.h:22138
#define A_TP_MIB_INDEX
Definition: t4_regs.h:23744
#define F_DATADIPHIT
Definition: t4_regs.h:34642
#define A_CIM_EXTMEM2_ADDR_SIZE
Definition: t4_regs.h:20458
#define V_MASKFILTER(x)
Definition: t4_regs.h:23397
#define G_DELAYEDACKRESOLUTION(x)
Definition: t4_regs.h:22929
#define G_EDRAM1_BASE(x)
Definition: t4_regs.h:16129
#define A_PCIE_MEM_ACCESS_BASE_WIN
Definition: t4_regs.h:4917
#define A_EDC_H_BIST_USER_WDATA0
Definition: t4_regs.h:61860
#define G_QUEREMFLITS(x)
Definition: t4_regs.h:44109
#define A_EDC_H_BIST_CMD_LEN
Definition: t4_regs.h:61858
#define F_DROPERRORETHHDRLEN
Definition: t4_regs.h:22868
#define G_TIMESTAMPRESOLUTION(x)
Definition: t4_regs.h:22924
#define A_MPS_T5_TRC_RSS_CONTROL
Definition: t4_regs.h:33968
#define V_RXTSHIFTMAXR2(x)
Definition: t4_regs.h:23027
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1
Definition: t4_regs.h:34664
#define A_SGE_DBVFIFO_BADDR
Definition: t4_regs.h:2565
#define A_LE_DB_HASH_TID_BASE
Definition: t4_regs.h:38819
#define G_T6_SRAM_PRIO0(x)
Definition: t4_regs.h:34535
#define G_DATAPORTNUM(x)
Definition: t4_regs.h:34633
#define V_T6_STATMODE(x)
Definition: t4_regs.h:2033
#define A_T6_LE_DB_HASH_TID_BASE
Definition: t4_regs.h:39549
#define M_RXTSHIFTMAXR2
Definition: t4_regs.h:23026
#define A_UP_OBQ_0_SHADOW_REALADDR
Definition: t4_regs.h:44506
#define A_UP_UP_DBG_LA_CFG
Definition: t4_regs.h:44267
#define A_LE_DB_CONFIG
Definition: t4_regs.h:38491
#define A_TP_SHIFT_CNT
Definition: t4_regs.h:23013
#define V_RSSCONTROL(x)
Definition: t4_regs.h:33819
#define A_TP_MIB_TNL_CNG_DROP_0
Definition: t4_regs.h:28689
#define S_SYNSHIFTMAX
Definition: t4_regs.h:23015
#define S_MULTILISTEN0
Definition: t4_regs.h:34463
#define G_EDRAM0_SIZE(x)
Definition: t4_regs.h:16122
#define G_VIDL(x)
Definition: t4_regs.h:34599
#define F_T6_VF_VALID
Definition: t4_regs.h:34552
#define F_EDRAM1_ENABLE
Definition: t4_regs.h:16273
#define G_STATSOURCE_T5(x)
Definition: t4_regs.h:2029
#define A_SGE_DBQ_CTXT_BADDR
Definition: t4_regs.h:1368
#define G_QUERDADDR(x)
Definition: t4_regs.h:44153
#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L
Definition: t4_regs.h:32356
#define G_INITSRTT(x)
Definition: t4_regs.h:22990
#define G_VF(x)
Definition: t4_regs.h:32025
#define G_PMTXNUMCHN(x)
Definition: t4_regs.h:21778
#define PF_REG(idx, reg)
Definition: t4_regs.h:67
#define G_IBQWRADDR(x)
Definition: t4_regs.h:44098
#define A_EDC_H_BIST_DATA_PATTERN
Definition: t4_regs.h:61859
#define G_SRAM_PRIO3(x)
Definition: t4_regs.h:34470
#define V_RXTMIN(x)
Definition: t4_regs.h:22942
#define A_TP_PMM_TX_PAGE_SIZE
Definition: t4_regs.h:21772
#define A_MA_EXT_MEMORY1_BAR
Definition: t4_regs.h:16668
#define A_MPS_RX_VXLAN_TYPE
Definition: t4_regs.h:35652
#define A_TP_PERS_MAX
Definition: t4_regs.h:22959
#define G_PF(x)
Definition: t4_regs.h:32016
#define MPS_CLS_TCAM_Y_L(idx)
Definition: t4_regs.h:196
#define A_SGE_IMSG_CTXT_BADDR
Definition: t4_regs.h:1375
#define A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS
Definition: t4_regs.h:48171
#define A_TP_PMM_RX_BASE
Definition: t4_regs.h:21759
#define G_T6_SOURCEPF(x)
Definition: t4_regs.h:37901
#define G_T5_USED(x)
Definition: t4_regs.h:34756
#define S_T6_MULTILISTEN0
Definition: t4_regs.h:34513
#define G_T6_PF(x)
Definition: t4_regs.h:34548
#define F_DROPERRORIPVER
Definition: t4_regs.h:22880
#define A_MPS_RX_PG_RSV0
Definition: t4_regs.h:34733
#define V_KEEPALIVEIDLE(x)
Definition: t4_regs.h:22970
#define F_VFIFO_ENABLE
Definition: t4_regs.h:2337
#define MPS_CLS_SRAM_L(idx)
Definition: t4_regs.h:190
#define A_TP_PMM_RX_PAGE_SIZE
Definition: t4_regs.h:21760
#define G_EXT_MEM_SIZE(x)
Definition: t4_regs.h:16146
#define A_EDC_H_BIST_USER_WDATA1
Definition: t4_regs.h:61861
#define F_DROPERRORMAC
Definition: t4_regs.h:22884
#define V_CTLCMDTYPE(x)
Definition: t4_regs.h:34610
#define A_TP_MIB_DATA
Definition: t4_regs.h:23745
#define V_CTLTCAMSEL(x)
Definition: t4_regs.h:34618
#define F_FRAGMENTDROP
Definition: t4_regs.h:21651
#define A_UP_OBQ_0_REALADDR
Definition: t4_regs.h:44234
#define V_QUEUENUMBER(x)
Definition: t4_regs.h:33824
#define A_SGE_DBVFIFO_SIZE
Definition: t4_regs.h:2566
#define A_TP_SYNC_TIME_LO
Definition: t4_regs.h:23747
#define A_LE_DB_SRVR_START_INDEX
Definition: t4_regs.h:38754
#define A_LE_DB_HASH_TBL_BASE_ADDR
Definition: t4_regs.h:38820
#define A_TP_CMM_MM_TX_FLST_BASE
Definition: t4_regs.h:23749
#define A_TP_PMM_TX_BASE
Definition: t4_regs.h:21757
#define A_LE_DB_ACTIVE_TABLE_START_INDEX
Definition: t4_regs.h:38726
#define A_LE_DB_TID_HASHBASE
Definition: t4_regs.h:39542
#define G_EDRAM1_SIZE(x)
Definition: t4_regs.h:16134
#define G_TIMERRESOLUTION(x)
Definition: t4_regs.h:22919
#define G_DMACH(x)
Definition: t4_regs.h:34604
#define G_DATAVIDH1(x)
Definition: t4_regs.h:34651
#define A_TP_TX_MOD_QUEUE_REQ_MAP
Definition: t4_regs.h:23648
#define VF_MPS_REG(reg_addr)
Definition: t4_regs.h:73
#define F_EXT_MEM_ENABLE
Definition: t4_regs.h:16269
#define G_SRAM_PRIO1(x)
Definition: t4_regs.h:34480
#define G_T6_SRAM_PRIO1(x)
Definition: t4_regs.h:34530
#define G_QUESOPCNT(x)
Definition: t4_regs.h:44121
#define G_SRAM_PRIO2(x)
Definition: t4_regs.h:34475
#define F_T6_SRAM_VLD
Definition: t4_regs.h:34539
#define A_SGE_PF_KDOORBELL
Definition: t4_regs.h:515
#define S_KEEPALIVEMAXR2
Definition: t4_regs.h:23045
#define A_SGE_STAT_TOTAL
Definition: t4_regs.h:1998
#define A_TP_DACK_TIMER
Definition: t4_regs.h:22992
#define MPS_CLS_TCAM_X_L(idx)
Definition: t4_regs.h:202
#define A_TP_RXT_MAX
Definition: t4_regs.h:22945
#define V_CTLTCAMINDEX(x)
Definition: t4_regs.h:34623
#define A_MPS_RX_PG_RSV4
Definition: t4_regs.h:34768
#define A_TP_CMM_MM_PS_FLST_BASE
Definition: t4_regs.h:23750
#define F_DROPERRORATTACK
Definition: t4_regs.h:22872
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, "payload DMA offset in rx buffer (bytes)")
int t4_free_smt(struct smt_data *s)
Definition: t4_smt.c:255
int t4_init_smt(struct adapter *sc, int flags)
Definition: t4_smt.c:226
int do_smt_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_smt.c:268
int sysctl_smt(SYSCTL_HANDLER_ARGS)
Definition: t4_smt.c:301
#define G_FW_HDR_FW_VER_MAJOR(x)
#define F_FW_RSS_VI_CONFIG_CMD_UDPEN
@ FW_EINVAL
@ FW_EPROTO
@ FW_EPERM
@ FW_PARAM_DEV_DIAG_VDD
@ FW_PARAM_DEV_DIAG_TMP
@ FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR
@ FW_CAPS_CONFIG_NIC
@ FW_CAPS_CONFIG_NIC_HASHFILTER
@ FW_CAPS_CONFIG_NIC_ETHOFLD
#define FW_PORT_CAP32_FC_TX
#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN
#define S_FW_PORT_CAP32_SPEED
@ FW_DEVLOG_LEVEL_INFO
@ FW_DEVLOG_LEVEL_DEBUG
@ FW_DEVLOG_LEVEL_NOTICE
@ FW_DEVLOG_LEVEL_EMERG
@ FW_DEVLOG_LEVEL_ERR
@ FW_DEVLOG_LEVEL_CRIT
#define M_FW_PORT_CAP32_FEC
#define FW_PORT_CAP32_SPEED_1G
#define FW_PORT_CAP32_FEC_RS
static bool fec_supported(uint32_t caps)
@ FW_CAPS_CONFIG_TLSKEYS
#define FW_PORT_CAP32_FORCE_PAUSE
#define G_FW_PARAMS_PARAM_Y(x)
@ FW_LDST_ADDRSPC_MPS
#define M_FW_PORT_CAP32_SPEED
#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
#define G_FW_HDR_FW_VER_BUILD(x)
@ FW_DEVLOG_FACILITY_RES
@ FW_DEVLOG_FACILITY_ACL
@ FW_DEVLOG_FACILITY_RI
@ FW_DEVLOG_FACILITY_MAC
@ FW_DEVLOG_FACILITY_FILTER
@ FW_DEVLOG_FACILITY_CHNET
@ FW_DEVLOG_FACILITY_FLR
@ FW_DEVLOG_FACILITY_PORT
@ FW_DEVLOG_FACILITY_DCB
@ FW_DEVLOG_FACILITY_OFLD
@ FW_DEVLOG_FACILITY_SCHED
@ FW_DEVLOG_FACILITY_CF
@ FW_DEVLOG_FACILITY_CORE
@ FW_DEVLOG_FACILITY_TIMER
@ FW_DEVLOG_FACILITY_VI
@ FW_DEVLOG_FACILITY_FOISCSI
@ FW_DEVLOG_FACILITY_PHY
@ FW_DEVLOG_FACILITY_ISCSI
@ FW_DEVLOG_FACILITY_TM
@ FW_DEVLOG_FACILITY_ETH
@ FW_DEVLOG_FACILITY_HW
@ FW_DEVLOG_FACILITY_FCOE
@ FW_DEVLOG_FACILITY_FOFCOE
@ FW_DEVLOG_FACILITY_DMAQ
@ FW_DEVLOG_FACILITY_QFC
#define FW_PORT_CAP32_ANEG
@ FW_LDST_CMD
@ FW_CAPS_CONFIG_CMD
#define FW_PORT_CAP32_SPEED_100G
@ FW_VI_FUNC_IWARP
@ FW_VI_FUNC_FOFCOE
@ FW_VI_FUNC_ETH
@ FW_VI_FUNC_OPENFCOE
@ FW_VI_FUNC_OFLD
@ FW_VI_FUNC_FOISCSI
@ FW_VI_FUNC_OPENISCSI
#define V_FW_PARAMS_PARAM_Y(x)
#define FW_PORT_CAP32_FC_RX
#define FW_PORT_CAP32_FORCE_FEC
#define V_FW_PARAMS_PARAM_YZ(x)
static bool is_bt(enum fw_port_type port_type)
#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN
#define FW_PORT_CAP32_SPEED_10G
#define F_FW_CMD_WRITE
@ FW_HDR_FLAGS_RESET_HALT
#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x)
@ FW_PARAMS_PARAM_DEV_RSSINFO
@ FW_PARAMS_PARAM_DEV_LOAD
@ FW_PARAMS_PARAM_DEV_KTLS_HW
@ FW_PARAMS_PARAM_DEV_DIAG
#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
@ FW_LDST_MPS_RPLC
#define V_FW_PORT_CAP32_FEC(x)
@ FW_PORT_MOD_TYPE_SR
@ FW_PORT_MOD_TYPE_TWINAX_PASSIVE
@ FW_PORT_MOD_TYPE_UNKNOWN
@ FW_PORT_MOD_TYPE_LR
@ FW_PORT_MOD_TYPE_NONE
@ FW_PORT_MOD_TYPE_TWINAX_ACTIVE
@ FW_PORT_MOD_TYPE_NOTSUPPORTED
@ FW_PORT_MOD_TYPE_LRM
@ FW_PORT_MOD_TYPE_ERROR
@ FW_PORT_MOD_TYPE_NA
@ FW_PORT_MOD_TYPE_ER
#define FW_PORT_CAP32_SPEED_50G
#define FW_PORT_CAP32_FEC_BASER_RS
#define G_FW_PORT_CAP32_SPEED(x)
#define V_FW_PARAMS_MNEM(x)
@ FW_HDR_CHIP_T5
@ FW_HDR_CHIP_T4
@ FW_HDR_CHIP_T6
#define V_FW_LDST_CMD_IDX(x)
#define V_FW_LDST_CMD_ADDRSPACE(x)
@ FW_CAPS_CONFIG_SWITCH_EGRESS
@ FW_CAPS_CONFIG_SWITCH_INGRESS
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD)
@ FW_PORT_TYPE_BT_XFI
@ FW_PORT_TYPE_NONE
@ FW_PORT_TYPE_KX
@ FW_PORT_TYPE_SFP28
@ FW_PORT_TYPE_QSFP
@ FW_PORT_TYPE_CR2_QSFP
@ FW_PORT_TYPE_BT_SGMII
@ FW_PORT_TYPE_KR4_100G
@ FW_PORT_TYPE_BP_AP
@ FW_PORT_TYPE_FIBER_XFI
@ FW_PORT_TYPE_QSFP_10G
@ FW_PORT_TYPE_KR_SFP28
@ FW_PORT_TYPE_FIBER_XAUI
@ FW_PORT_TYPE_BP4_AP
@ FW_PORT_TYPE_QSA
@ FW_PORT_TYPE_BT_XAUI
@ FW_PORT_TYPE_CX4
@ FW_PORT_TYPE_KR
@ FW_PORT_TYPE_SFP
@ FW_PORT_TYPE_BP40_BA
@ FW_PORT_TYPE_KR_XLAUI
@ FW_PORT_TYPE_CR_QSFP
@ FW_PORT_TYPE_CR4_QSFP
@ FW_PORT_TYPE_KX4
#define V_FW_CMD_OP(x)
#define V_FW_LDST_CMD_FID(x)
#define G_FW_HDR_FW_VER_MINOR(x)
@ FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE
#define F_FW_CMD_READ
#define G_FW_PARAMS_PARAM_Z(x)
#define FW_PORT_CAP32_SPEED_100M
@ FW_CAPS_CONFIG_TOE
#define F_FW_CAPS_CONFIG_CMD_CFVALID
#define FW_PORT_CAP32_SPEED_40G
#define F_FW_CMD_REQUEST
@ FW_CAPS_CONFIG_RDMA_RDDP
@ FW_CAPS_CONFIG_RDMA_RDMAC
@ FW_PARAMS_MNEM_DEV
@ FW_MEMTYPE_EDC1
@ FW_MEMTYPE_EXTMEM1
@ FW_MEMTYPE_FLASH
@ FW_MEMTYPE_EXTMEM
@ FW_MEMTYPE_EDC0
#define V_FW_PARAMS_PARAM_Z(x)
#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x)
#define V_FW_PARAMS_PARAM_X(x)
@ FW_CAPS_CONFIG_ISCSI_T10DIF
@ FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU
@ FW_CAPS_CONFIG_ISCSI_TARGET_PDU
#define G_FW_HDR_FW_VER_MICRO(x)
#define FW_PORT_CAP32_SPEED_25G
#define FW_LEN16(fw_struct)
Definition: offload.h:77
void * data
Definition: offload.h:78
union aopen_entry * next
Definition: offload.h:79
struct fw_ldst_cmd::fw_ldst::fw_ldst_mps::fw_ldst_mps_rplc rplc
union fw_ldst_cmd::fw_ldst::fw_ldst_mps mps