FreeBSD kernel CXGBE device code
t4_filter.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/eventhandler.h>
36#include <sys/fnv_hash.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/module.h>
40#include <sys/bus.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/rwlock.h>
44#include <sys/socket.h>
45#include <sys/sbuf.h>
46#include <netinet/in.h>
47
48#include "common/common.h"
49#include "common/t4_msg.h"
50#include "common/t4_regs.h"
52#include "common/t4_tcb.h"
53#include "t4_l2t.h"
54#include "t4_smt.h"
55
57 LIST_ENTRY(filter_entry) link_4t;
58 LIST_ENTRY(filter_entry) link_tid;
59
60 uint32_t valid:1; /* filter allocated and valid */
61 uint32_t locked:1; /* filter is administratively locked or busy */
62 uint32_t pending:1; /* filter action is pending firmware reply */
63 int tid; /* tid of the filter TCB */
64 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
65 struct smt_entry *smt; /* SMT entry for SMAC rewrite */
66
68};
69
70static void free_filter_resources(struct filter_entry *);
71static int get_tcamfilter(struct adapter *, struct t4_filter *);
72static int get_hashfilter(struct adapter *, struct t4_filter *);
73static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
74 struct l2t_entry *, struct smt_entry *);
75static int del_hashfilter(struct adapter *, struct t4_filter *);
76static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77
78static inline bool
80{
81
82 return (chip_id(sc) >= CHELSIO_T6);
83}
84
85static inline uint32_t
87{
88 struct t4_filter_tuple *ft = &fs->val;
89 uint32_t hash;
90
91 if (fs->type) {
92 /* IPv6 */
93 hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
94 hash = fnv_32_buf(&ft->dip[0], 16, hash);
95 } else {
96 hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
97 hash = fnv_32_buf(&ft->dip[0], 4, hash);
98 }
99 hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
100 hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
101
102 return (hash);
103}
104
105static inline uint32_t
107{
108
109 return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
110}
111
112static int
113alloc_hftid_hash(struct tid_info *t, int flags)
114{
115 int n;
116
117 MPASS(t->ntids > 0);
118 MPASS(t->hftid_hash_4t == NULL);
119 MPASS(t->hftid_hash_tid == NULL);
120
121 n = max(t->ntids / 1024, 16);
122 t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
123 if (t->hftid_hash_4t == NULL)
124 return (ENOMEM);
125 t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
126 flags);
127 if (t->hftid_hash_tid == NULL) {
128 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
129 t->hftid_hash_4t = NULL;
130 return (ENOMEM);
131 }
132
133 mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
134 cv_init(&t->hftid_cv, "t4hfcv");
135
136 return (0);
137}
138
139void
141{
142 struct filter_entry *f, *ftmp;
143 LIST_HEAD(, filter_entry) *head;
144 int i;
145#ifdef INVARIANTS
146 int n = 0;
147#endif
148
149 if (t->tids_in_use > 0) {
150 /* Remove everything from the tid hash. */
151 head = t->hftid_hash_tid;
152 for (i = 0; i <= t->hftid_tid_mask; i++) {
153 LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
154 LIST_REMOVE(f, link_tid);
155 }
156 }
157
158 /* Remove and then free each filter in the 4t hash. */
159 head = t->hftid_hash_4t;
160 for (i = 0; i <= t->hftid_4t_mask; i++) {
161 LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
162#ifdef INVARIANTS
163 n += f->fs.type ? 2 : 1;
164#endif
165 LIST_REMOVE(f, link_4t);
166 free(f, M_CXGBE);
167 }
168 }
169 MPASS(t->tids_in_use == n);
170 t->tids_in_use = 0;
171 }
172
173 if (t->hftid_hash_4t) {
174 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
175 t->hftid_hash_4t = NULL;
176 }
177 if (t->hftid_hash_tid) {
178 hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
179 t->hftid_hash_tid = NULL;
180 }
181 if (mtx_initialized(&t->hftid_lock)) {
182 mtx_destroy(&t->hftid_lock);
183 cv_destroy(&t->hftid_cv);
184 }
185}
186
187static void
188insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
189{
190 struct tid_info *t = &sc->tids;
192
193 MPASS(head != NULL);
194 if (hash == 0)
195 hash = hf_hashfn_4t(&f->fs);
196 LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
197 atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
198}
199
200static void
201insert_hftid(struct adapter *sc, struct filter_entry *f)
202{
203 struct tid_info *t = &sc->tids;
205 uint32_t hash;
206
207 MPASS(f->tid >= t->tid_base);
208 MPASS(f->tid - t->tid_base < t->ntids);
209 mtx_assert(&t->hftid_lock, MA_OWNED);
210
211 hash = hf_hashfn_tid(f->tid);
212 LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
213}
214
215static bool
217 struct t4_filter_specification *fs2)
218{
219 int n;
220
221 MPASS(fs1->hash && fs2->hash);
222
223 if (fs1->type != fs2->type)
224 return (false);
225
226 n = fs1->type ? 16 : 4;
227 if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
228 bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
229 fs1->val.sport != fs2->val.sport ||
230 fs1->val.dport != fs2->val.dport)
231 return (false);
232
233 /*
234 * We know the masks are the same because all hashfilters conform to the
235 * global tp->filter_mask and the driver has verified that already.
236 */
237
238 if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
239 fs1->val.vnic != fs2->val.vnic)
240 return (false);
241 if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
242 return (false);
243 if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
244 return (false);
245 if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
246 return (false);
247 if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
248 return (false);
249 if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
250 return (false);
251 if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
252 return (false);
253 if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
254 return (false);
255 if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
256 return (false);
257 if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
258 return (false);
259
260 return (true);
261}
262
263static struct filter_entry *
264lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
265{
266 struct tid_info *t = &sc->tids;
268 struct filter_entry *f;
269
270 mtx_assert(&t->hftid_lock, MA_OWNED);
271 MPASS(head != NULL);
272
273 if (hash == 0)
274 hash = hf_hashfn_4t(fs);
275
276 LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
277 if (filter_eq(&f->fs, fs))
278 return (f);
279 }
280
281 return (NULL);
282}
283
284static struct filter_entry *
285lookup_hftid(struct adapter *sc, int tid)
286{
287 struct tid_info *t = &sc->tids;
289 struct filter_entry *f;
290 uint32_t hash;
291
292 mtx_assert(&t->hftid_lock, MA_OWNED);
293 MPASS(head != NULL);
294
295 hash = hf_hashfn_tid(tid);
296 LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
297 if (f->tid == tid)
298 return (f);
299 }
300
301 return (NULL);
302}
303
304static void
305remove_hf(struct adapter *sc, struct filter_entry *f)
306{
307 struct tid_info *t = &sc->tids;
308
309 mtx_assert(&t->hftid_lock, MA_OWNED);
310
311 LIST_REMOVE(f, link_4t);
312 atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
313}
314
315static void
316remove_hftid(struct adapter *sc, struct filter_entry *f)
317{
318#ifdef INVARIANTS
319 struct tid_info *t = &sc->tids;
320
321 mtx_assert(&t->hftid_lock, MA_OWNED);
322#endif
323
324 LIST_REMOVE(f, link_tid);
325}
326
327/*
328 * Input: driver's 32b filter mode.
329 * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
330 */
331static uint16_t
332mode_to_fconf(uint32_t mode)
333{
334 uint32_t fconf = 0;
335
336 if (mode & T4_FILTER_IP_FRAGMENT)
337 fconf |= F_FRAGMENTATION;
338
339 if (mode & T4_FILTER_MPS_HIT_TYPE)
340 fconf |= F_MPSHITTYPE;
341
342 if (mode & T4_FILTER_MAC_IDX)
343 fconf |= F_MACMATCH;
344
345 if (mode & T4_FILTER_ETH_TYPE)
346 fconf |= F_ETHERTYPE;
347
348 if (mode & T4_FILTER_IP_PROTO)
349 fconf |= F_PROTOCOL;
350
351 if (mode & T4_FILTER_IP_TOS)
352 fconf |= F_TOS;
353
354 if (mode & T4_FILTER_VLAN)
355 fconf |= F_VLAN;
356
357 if (mode & T4_FILTER_VNIC)
358 fconf |= F_VNIC_ID;
359
360 if (mode & T4_FILTER_PORT)
361 fconf |= F_PORT;
362
363 if (mode & T4_FILTER_FCoE)
364 fconf |= F_FCOE;
365
366 return (fconf);
367}
368
369/*
370 * Input: driver's 32b filter mode.
371 * Returns: hardware vnic mode (ingress config) matching the input.
372 */
373static int
374mode_to_iconf(uint32_t mode)
375{
376 if ((mode & T4_FILTER_VNIC) == 0)
377 return (-1); /* ingress config doesn't matter. */
378
379 if (mode & T4_FILTER_IC_VNIC)
380 return (FW_VNIC_MODE_PF_VF);
381 else if (mode & T4_FILTER_IC_ENCAP)
382 return (FW_VNIC_MODE_ENCAP_EN);
383 else
385}
386
387static int
389 struct t4_filter_specification *fs)
390{
391 struct tp_params *tpp = &sc->params.tp;
392 uint32_t fconf = 0;
393
394 if (fs->val.frag || fs->mask.frag)
395 fconf |= F_FRAGMENTATION;
396
397 if (fs->val.matchtype || fs->mask.matchtype)
398 fconf |= F_MPSHITTYPE;
399
400 if (fs->val.macidx || fs->mask.macidx)
401 fconf |= F_MACMATCH;
402
403 if (fs->val.ethtype || fs->mask.ethtype)
404 fconf |= F_ETHERTYPE;
405
406 if (fs->val.proto || fs->mask.proto)
407 fconf |= F_PROTOCOL;
408
409 if (fs->val.tos || fs->mask.tos)
410 fconf |= F_TOS;
411
412 if (fs->val.vlan_vld || fs->mask.vlan_vld)
413 fconf |= F_VLAN;
414
415 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
417 return (EINVAL);
418 fconf |= F_VNIC_ID;
419 }
420
421 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
422 if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
423 return (EINVAL);
424 fconf |= F_VNIC_ID;
425 }
426
427#ifdef notyet
428 if (fs->val.encap_vld || fs->mask.encap_vld) {
429 if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
430 return (EINVAL);
431 fconf |= F_VNIC_ID;
432 }
433#endif
434
435 if (fs->val.iport || fs->mask.iport)
436 fconf |= F_PORT;
437
438 if (fs->val.fcoe || fs->mask.fcoe)
439 fconf |= F_FCOE;
440
441 if ((tpp->filter_mode | fconf) != tpp->filter_mode)
442 return (E2BIG);
443
444 return (0);
445}
446
447/*
448 * Input: hardware filter configuration (filter mode/mask, ingress config).
449 * Input: driver's 32b filter mode matching the input.
450 */
451static uint32_t
452fconf_to_mode(uint16_t hwmode, int vnic_mode)
453{
456
457 if (hwmode & F_FRAGMENTATION)
458 mode |= T4_FILTER_IP_FRAGMENT;
459 if (hwmode & F_MPSHITTYPE)
461 if (hwmode & F_MACMATCH)
462 mode |= T4_FILTER_MAC_IDX;
463 if (hwmode & F_ETHERTYPE)
464 mode |= T4_FILTER_ETH_TYPE;
465 if (hwmode & F_PROTOCOL)
466 mode |= T4_FILTER_IP_PROTO;
467 if (hwmode & F_TOS)
468 mode |= T4_FILTER_IP_TOS;
469 if (hwmode & F_VLAN)
470 mode |= T4_FILTER_VLAN;
471 if (hwmode & F_VNIC_ID)
472 mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
473 if (hwmode & F_PORT)
474 mode |= T4_FILTER_PORT;
475 if (hwmode & F_FCOE)
476 mode |= T4_FILTER_FCoE;
477
478 switch (vnic_mode) {
480 mode |= T4_FILTER_IC_VNIC;
481 break;
483 mode |= T4_FILTER_IC_ENCAP;
484 break;
486 default:
487 break;
488 }
489
490 return (mode);
491}
492
493int
494get_filter_mode(struct adapter *sc, uint32_t *mode)
495{
496 struct tp_params *tp = &sc->params.tp;
497 uint16_t filter_mode;
498
499 /* Filter mask must comply with the global filter mode. */
500 MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode);
501
502 /* Non-zero incoming value in mode means "hashfilter mode". */
503 filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
504 *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
505
506 return (0);
507}
508
509int
510set_filter_mode(struct adapter *sc, uint32_t mode)
511{
512 struct tp_params *tp = &sc->params.tp;
513 int rc, iconf;
514 uint16_t fconf;
515
516 iconf = mode_to_iconf(mode);
517 fconf = mode_to_fconf(mode);
518 if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
519 return (0); /* Nothing to do */
520
521 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm");
522 if (rc)
523 return (rc);
524
525 if (hw_off_limits(sc)) {
526 rc = ENXIO;
527 goto done;
528 }
529
530 if (sc->tids.ftids_in_use > 0 || /* TCAM filters active */
531 sc->tids.hpftids_in_use > 0 || /* hi-pri TCAM filters active */
532 sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
533 rc = EBUSY;
534 goto done;
535 }
536
537#ifdef TCP_OFFLOAD
538 if (uld_active(sc, ULD_TOM)) {
539 rc = EBUSY;
540 goto done;
541 }
542#endif
543
544 /* Note that filter mask will get clipped to the new filter mode. */
545 rc = -t4_set_filter_cfg(sc, fconf, -1, iconf);
546done:
547 end_synchronized_op(sc, 0);
548 return (rc);
549}
550
551int
552set_filter_mask(struct adapter *sc, uint32_t mode)
553{
554 struct tp_params *tp = &sc->params.tp;
555 int rc, iconf;
556 uint16_t fmask;
557
558 iconf = mode_to_iconf(mode);
559 fmask = mode_to_fconf(mode);
560 if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
561 return (0); /* Nothing to do */
562
563 /*
564 * We aren't going to change the global filter mode or VNIC mode here.
565 * The given filter mask must conform to them.
566 */
567 if ((fmask | tp->filter_mode) != tp->filter_mode)
568 return (EINVAL);
569 if (iconf != -1 && iconf != tp->vnic_mode)
570 return (EINVAL);
571
572 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sethfm");
573 if (rc)
574 return (rc);
575
576 if (hw_off_limits(sc)) {
577 rc = ENXIO;
578 goto done;
579 }
580
581 if (sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
582 rc = EBUSY;
583 goto done;
584 }
585
586#ifdef TCP_OFFLOAD
587 if (uld_active(sc, ULD_TOM)) {
588 rc = EBUSY;
589 goto done;
590 }
591#endif
592 rc = -t4_set_filter_cfg(sc, -1, fmask, -1);
593done:
594 end_synchronized_op(sc, 0);
595 return (rc);
596}
597
598static inline uint64_t
599get_filter_hits(struct adapter *sc, uint32_t tid)
600{
601 uint32_t tcb_addr;
602 uint64_t hits;
603
604 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
605
606 mtx_lock(&sc->reg_lock);
607 if (hw_off_limits(sc))
608 hits = 0;
609 else if (is_t4(sc)) {
610 uint64_t t;
611
612 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
613 hits = be64toh(t);
614 } else {
615 uint32_t t;
616
617 read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
618 hits = be32toh(t);
619 }
620 mtx_unlock(&sc->reg_lock);
621
622 return (hits);
623}
624
625int
626get_filter(struct adapter *sc, struct t4_filter *t)
627{
628 if (t->fs.hash)
629 return (get_hashfilter(sc, t));
630 else
631 return (get_tcamfilter(sc, t));
632}
633
634static int
635set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
636 struct smt_entry *smt)
637{
638 struct filter_entry *f;
639 struct fw_filter2_wr *fwr;
640 u_int vnic_vld, vnic_vld_mask;
641 struct wrq_cookie cookie;
642 int i, rc, busy, locked;
643 u_int tid;
644 const int ntids = t->fs.type ? 4 : 1;
645
646 MPASS(!t->fs.hash);
647 /* Already validated against fconf, iconf */
648 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
649 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
650
651 if (separate_hpfilter_region(sc) && t->fs.prio) {
652 MPASS(t->idx < sc->tids.nhpftids);
653 f = &sc->tids.hpftid_tab[t->idx];
654 tid = sc->tids.hpftid_base + t->idx;
655 } else {
656 MPASS(t->idx < sc->tids.nftids);
657 f = &sc->tids.ftid_tab[t->idx];
658 tid = sc->tids.ftid_base + t->idx;
659 }
660 rc = busy = locked = 0;
661 mtx_lock(&sc->tids.ftid_lock);
662 for (i = 0; i < ntids; i++) {
663 busy += f[i].pending + f[i].valid;
664 locked += f[i].locked;
665 }
666 if (locked > 0)
667 rc = EPERM;
668 else if (busy > 0)
669 rc = EBUSY;
670 else {
671 int len16;
672
674 len16 = howmany(sizeof(struct fw_filter2_wr), 16);
675 else
676 len16 = howmany(sizeof(struct fw_filter_wr), 16);
677 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
678 if (__predict_false(fwr == NULL))
679 rc = ENOMEM;
680 else {
681 f->pending = 1;
682 if (separate_hpfilter_region(sc) && t->fs.prio)
683 sc->tids.hpftids_in_use++;
684 else
685 sc->tids.ftids_in_use++;
686 }
687 }
688 mtx_unlock(&sc->tids.ftid_lock);
689 if (rc != 0)
690 return (rc);
691
692 /*
693 * Can't fail now. A set-filter WR will definitely be sent.
694 */
695
696 f->tid = tid;
697 f->fs = t->fs;
698 f->l2te = l2te;
699 f->smt = smt;
700
701 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
702 vnic_vld = 1;
703 else
704 vnic_vld = 0;
705 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
706 vnic_vld_mask = 1;
707 else
708 vnic_vld_mask = 0;
709
710 bzero(fwr, sizeof(*fwr));
712 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
713 else
714 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
715 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
716 fwr->tid_to_iq =
717 htobe32(V_FW_FILTER_WR_TID(f->tid) |
718 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
720 V_FW_FILTER_WR_IQ(f->fs.iq));
722 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
723 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
724 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
725 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
726 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
727 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
728 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
729 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
730 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
731 f->fs.newvlan == VLAN_REWRITE) |
732 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
733 f->fs.newvlan == VLAN_REWRITE) |
734 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
735 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
736 V_FW_FILTER_WR_PRIO(f->fs.prio) |
737 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
738 fwr->ethtype = htobe16(f->fs.val.ethtype);
739 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
740 fwr->frag_to_ovlan_vldm =
741 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
742 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
743 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
744 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
745 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
746 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
747 fwr->smac_sel = 0;
748 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
750 fwr->maci_to_matchtypem =
751 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
752 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
753 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
754 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
755 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
756 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
757 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
758 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
759 fwr->ptcl = f->fs.val.proto;
760 fwr->ptclm = f->fs.mask.proto;
761 fwr->ttyp = f->fs.val.tos;
762 fwr->ttypm = f->fs.mask.tos;
763 fwr->ivlan = htobe16(f->fs.val.vlan);
764 fwr->ivlanm = htobe16(f->fs.mask.vlan);
765 fwr->ovlan = htobe16(f->fs.val.vnic);
766 fwr->ovlanm = htobe16(f->fs.mask.vnic);
767 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
768 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
769 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
770 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
771 fwr->lp = htobe16(f->fs.val.dport);
772 fwr->lpm = htobe16(f->fs.mask.dport);
773 fwr->fp = htobe16(f->fs.val.sport);
774 fwr->fpm = htobe16(f->fs.mask.sport);
775 /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
776 bzero(fwr->sma, sizeof (fwr->sma));
777 if (sc->params.filter2_wr_support) {
779 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
781 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
783 V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
784 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
785 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
786 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
787 fwr->newlport = htobe16(f->fs.nat_dport);
788 fwr->newfport = htobe16(f->fs.nat_sport);
789 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
790 }
791 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
792
793 /* Wait for response. */
794 mtx_lock(&sc->tids.ftid_lock);
795 for (;;) {
796 if (f->pending == 0) {
797 rc = f->valid ? 0 : EIO;
798 break;
799 }
800 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
801 rc = EINPROGRESS;
802 break;
803 }
804 }
805 mtx_unlock(&sc->tids.ftid_lock);
806 return (rc);
807}
808
809static int
811 uint64_t *ftuple)
812{
813 struct tp_params *tp = &sc->params.tp;
814 uint16_t fmask;
815
816 *ftuple = fmask = 0;
817
818 /*
819 * Initialize each of the fields which we care about which are present
820 * in the Compressed Filter Tuple.
821 */
822 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
823 *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
824 tp->vlan_shift;
825 fmask |= F_VLAN;
826 }
827
828 if (tp->port_shift >= 0 && fs->mask.iport) {
829 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
830 fmask |= F_PORT;
831 }
832
833 if (tp->protocol_shift >= 0 && fs->mask.proto) {
834 *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
835 fmask |= F_PROTOCOL;
836 }
837
838 if (tp->tos_shift >= 0 && fs->mask.tos) {
839 *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
840 fmask |= F_TOS;
841 }
842
843 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
844 /* vnic_mode was already validated. */
845 if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
846 MPASS(fs->mask.pfvf_vld);
847 else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
848 MPASS(fs->mask.ovlan_vld);
849#ifdef notyet
850 else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
851 MPASS(fs->mask.encap_vld);
852#endif
853 *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
854 fmask |= F_VNIC_ID;
855 }
856
857 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
858 *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
859 fmask |= F_MACMATCH;
860 }
861
862 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
863 *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
864 fmask |= F_ETHERTYPE;
865 }
866
867 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
868 *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
869 fmask |= F_MPSHITTYPE;
870 }
871
872 if (tp->frag_shift >= 0 && fs->mask.frag) {
873 *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
874 fmask |= F_FRAGMENTATION;
875 }
876
877 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
878 *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
879 fmask |= F_FCOE;
880 }
881
882 /* A hashfilter must conform to the hardware filter mask. */
883 if (fmask != tp->filter_mask)
884 return (EINVAL);
885
886 return (0);
887}
888
889static bool
891{
892 int i;
893 const int n = fs->type ? 16 : 4;
894
895 if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
896 return (false);
897
898 for (i = 0; i < n; i++) {
899 if (fs->mask.sip[i] != 0xff)
900 return (false);
901 if (fs->mask.dip[i] != 0xff)
902 return (false);
903 }
904
905 return (true);
906}
907
908int
909set_filter(struct adapter *sc, struct t4_filter *t)
910{
911 struct tid_info *ti = &sc->tids;
912 struct l2t_entry *l2te = NULL;
913 struct smt_entry *smt = NULL;
914 uint64_t ftuple;
915 int rc;
916
917 /*
918 * Basic filter checks first.
919 */
920
921 if (t->fs.hash) {
922 if (!is_hashfilter(sc) || ti->ntids == 0)
923 return (ENOTSUP);
924 /* Hardware, not user, selects a tid for hashfilters. */
925 if (t->idx != (uint32_t)-1)
926 return (EINVAL);
927 /* T5 can't count hashfilter hits. */
928 if (is_t5(sc) && t->fs.hitcnts)
929 return (EINVAL);
930 if (!is_4tuple_specified(&t->fs))
931 return (EINVAL);
932 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
933 if (rc != 0)
934 return (rc);
935 } else {
936 if (separate_hpfilter_region(sc) && t->fs.prio) {
937 if (ti->nhpftids == 0)
938 return (ENOTSUP);
939 if (t->idx >= ti->nhpftids)
940 return (EINVAL);
941 } else {
942 if (ti->nftids == 0)
943 return (ENOTSUP);
944 if (t->idx >= ti->nftids)
945 return (EINVAL);
946 }
947 /* IPv6 filter idx must be 4 aligned */
948 if (t->fs.type == 1 &&
949 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
950 return (EINVAL);
951 }
952
953 /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
954 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
955 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
956 t->fs.swapmac || t->fs.nat_mode))
957 return (ENOTSUP);
958
959 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
960 return (EINVAL);
961 if (t->fs.val.iport >= sc->params.nports)
962 return (EINVAL);
963
964 /* Can't specify an iqid/rss_info if not steering. */
965 if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
966 return (EINVAL);
967
968 /* Validate against the global filter mode and ingress config */
970 if (rc != 0)
971 return (rc);
972
973 /*
974 * Basic checks passed. Make sure the queues and tid tables are setup.
975 */
976
977 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
978 if (rc)
979 return (rc);
980
981 if (hw_off_limits(sc)) {
982 rc = ENXIO;
983 goto done;
984 }
985
986 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
987 goto done;
988
989 if (t->fs.hash) {
990 if (__predict_false(ti->hftid_hash_4t == NULL)) {
991 rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
992 if (rc != 0)
993 goto done;
994 }
995 } else if (separate_hpfilter_region(sc) && t->fs.prio &&
996 __predict_false(ti->hpftid_tab == NULL)) {
997 MPASS(ti->nhpftids != 0);
998 KASSERT(ti->hpftids_in_use == 0,
999 ("%s: no memory allocated but hpftids_in_use is %u",
1000 __func__, ti->hpftids_in_use));
1001 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
1002 ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
1003 if (ti->hpftid_tab == NULL) {
1004 rc = ENOMEM;
1005 goto done;
1006 }
1007 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1008 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1009 cv_init(&ti->ftid_cv, "t4fcv");
1010 }
1011 } else if (__predict_false(ti->ftid_tab == NULL)) {
1012 MPASS(ti->nftids != 0);
1013 KASSERT(ti->ftids_in_use == 0,
1014 ("%s: no memory allocated but ftids_in_use is %u",
1015 __func__, ti->ftids_in_use));
1016 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
1017 M_CXGBE, M_NOWAIT | M_ZERO);
1018 if (ti->ftid_tab == NULL) {
1019 rc = ENOMEM;
1020 goto done;
1021 }
1022 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1023 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1024 cv_init(&ti->ftid_cv, "t4fcv");
1025 }
1026 }
1027done:
1028 end_synchronized_op(sc, 0);
1029 if (rc != 0)
1030 return (rc);
1031
1032 /*
1033 * Allocate L2T entry, SMT entry, etc.
1034 */
1035
1036 if (t->fs.newdmac || t->fs.newvlan) {
1037 /* This filter needs an L2T entry; allocate one. */
1038 l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
1039 t->fs.dmac);
1040 if (__predict_false(l2te == NULL)) {
1041 rc = EAGAIN;
1042 goto error;
1043 }
1044 }
1045
1046 if (t->fs.newsmac) {
1047 /* This filter needs an SMT entry; allocate one. */
1048 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
1049 if (__predict_false(smt == NULL)) {
1050 rc = EAGAIN;
1051 goto error;
1052 }
1053 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
1054 if (rc)
1055 goto error;
1056 }
1057
1058 if (t->fs.hash)
1059 rc = set_hashfilter(sc, t, ftuple, l2te, smt);
1060 else
1061 rc = set_tcamfilter(sc, t, l2te, smt);
1062
1063 if (rc != 0 && rc != EINPROGRESS) {
1064error:
1065 if (l2te)
1066 t4_l2t_release(l2te);
1067 if (smt)
1068 t4_smt_release(smt);
1069 }
1070 return (rc);
1071}
1072
1073static int
1074del_tcamfilter(struct adapter *sc, struct t4_filter *t)
1075{
1076 struct filter_entry *f;
1077 struct fw_filter_wr *fwr;
1078 struct wrq_cookie cookie;
1079 int rc, nfilters;
1080#ifdef INVARIANTS
1081 u_int tid_base;
1082#endif
1083
1084 mtx_lock(&sc->tids.ftid_lock);
1085 if (separate_hpfilter_region(sc) && t->fs.prio) {
1086 nfilters = sc->tids.nhpftids;
1087 f = sc->tids.hpftid_tab;
1088#ifdef INVARIANTS
1089 tid_base = sc->tids.hpftid_base;
1090#endif
1091 } else {
1092 nfilters = sc->tids.nftids;
1093 f = sc->tids.ftid_tab;
1094#ifdef INVARIANTS
1095 tid_base = sc->tids.ftid_base;
1096#endif
1097 }
1098 MPASS(f != NULL); /* Caller checked this. */
1099 if (t->idx >= nfilters) {
1100 rc = EINVAL;
1101 goto done;
1102 }
1103 f += t->idx;
1104
1105 if (f->locked) {
1106 rc = EPERM;
1107 goto done;
1108 }
1109 if (f->pending) {
1110 rc = EBUSY;
1111 goto done;
1112 }
1113 if (f->valid == 0) {
1114 rc = EINVAL;
1115 goto done;
1116 }
1117 MPASS(f->tid == tid_base + t->idx);
1118 fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1119 if (fwr == NULL) {
1120 rc = ENOMEM;
1121 goto done;
1122 }
1123
1124 bzero(fwr, sizeof (*fwr));
1125 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1126 f->pending = 1;
1127 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1128 t->fs = f->fs; /* extra info for the caller */
1129
1130 for (;;) {
1131 if (f->pending == 0) {
1132 rc = f->valid ? EIO : 0;
1133 break;
1134 }
1135 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1136 rc = EINPROGRESS;
1137 break;
1138 }
1139 }
1140done:
1141 mtx_unlock(&sc->tids.ftid_lock);
1142 return (rc);
1143}
1144
1145int
1146del_filter(struct adapter *sc, struct t4_filter *t)
1147{
1148
1149 /* No filters possible if not initialized yet. */
1150 if (!(sc->flags & FULL_INIT_DONE))
1151 return (EINVAL);
1152
1153 /*
1154 * The checks for tid tables ensure that the locks that del_* will reach
1155 * for are initialized.
1156 */
1157 if (t->fs.hash) {
1158 if (sc->tids.hftid_hash_4t != NULL)
1159 return (del_hashfilter(sc, t));
1160 } else if (separate_hpfilter_region(sc) && t->fs.prio) {
1161 if (sc->tids.hpftid_tab != NULL)
1162 return (del_tcamfilter(sc, t));
1163 } else {
1164 if (sc->tids.ftid_tab != NULL)
1165 return (del_tcamfilter(sc, t));
1166 }
1167
1168 return (EINVAL);
1169}
1170
1171/*
1172 * Release secondary resources associated with the filter.
1173 */
1174static void
1176{
1177
1178 if (f->l2te) {
1179 t4_l2t_release(f->l2te);
1180 f->l2te = NULL;
1181 }
1182 if (f->smt) {
1183 t4_smt_release(f->smt);
1184 f->smt = NULL;
1185 }
1186}
1187
1188static int
1189set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1190 uint64_t val, int no_reply)
1191{
1192 struct wrq_cookie cookie;
1193 struct cpl_set_tcb_field *req;
1194
1195 req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1196 if (req == NULL)
1197 return (ENOMEM);
1198 bzero(req, sizeof(*req));
1200 if (no_reply == 0) {
1201 req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1202 V_NO_REPLY(0));
1203 } else
1204 req->reply_ctrl = htobe16(V_NO_REPLY(1));
1205 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1206 req->mask = htobe64(mask);
1207 req->val = htobe64(val);
1208 commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1209
1210 return (0);
1211}
1212
1213/* Set one of the t_flags bits in the TCB. */
1214static inline int
1215set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1216 u_int no_reply)
1217{
1218
1219 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
1220 (uint64_t)val << bit_pos, no_reply));
1221}
1222
1223int
1224t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1225{
1226 struct adapter *sc = iq->adapter;
1227 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1228 u_int tid = GET_TID(rpl);
1229 u_int rc, idx;
1230 struct filter_entry *f;
1231
1232 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1233 rss->opcode));
1234
1235
1236 if (is_hpftid(sc, tid)) {
1237 idx = tid - sc->tids.hpftid_base;
1238 f = &sc->tids.hpftid_tab[idx];
1239 } else if (is_ftid(sc, tid)) {
1240 idx = tid - sc->tids.ftid_base;
1241 f = &sc->tids.ftid_tab[idx];
1242 } else
1243 panic("%s: FW reply for invalid TID %d.", __func__, tid);
1244
1245 MPASS(f->tid == tid);
1246 rc = G_COOKIE(rpl->cookie);
1247
1248 mtx_lock(&sc->tids.ftid_lock);
1249 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1250 __func__, rc, tid));
1251 switch(rc) {
1253 /* set-filter succeeded */
1254 f->valid = 1;
1255 if (f->fs.newsmac) {
1256 MPASS(f->smt != NULL);
1257 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1258 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1260 V_TCB_SMAC_SEL(f->smt->idx), 1);
1261 /* XXX: wait for reply to TCB update before !pending */
1262 }
1263 break;
1265 /* del-filter succeeded */
1266 MPASS(f->valid == 1);
1267 f->valid = 0;
1268 /* Fall through */
1270 /* set-filter failed due to lack of SMT space. */
1271 MPASS(f->valid == 0);
1273 if (separate_hpfilter_region(sc) && f->fs.prio)
1274 sc->tids.hpftids_in_use--;
1275 else
1276 sc->tids.ftids_in_use--;
1277 break;
1280 default:
1281 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1282 idx);
1283 }
1284 f->pending = 0;
1285 cv_broadcast(&sc->tids.ftid_cv);
1286 mtx_unlock(&sc->tids.ftid_lock);
1287
1288 return (0);
1289}
1290
1291/*
1292 * This is the reply to the Active Open that created the filter. Additional TCB
1293 * updates may be required to complete the filter configuration.
1294 */
1295int
1296t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1297 struct mbuf *m)
1298{
1299 struct adapter *sc = iq->adapter;
1300 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1301 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1302 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1303 struct filter_entry *f = lookup_atid(sc, atid);
1304
1305 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1306
1307 mtx_lock(&sc->tids.hftid_lock);
1308 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1309 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1310 __func__, f, f->tid));
1311 if (status == CPL_ERR_NONE) {
1312 f->tid = GET_TID(cpl);
1313 MPASS(lookup_hftid(sc, f->tid) == NULL);
1314 insert_hftid(sc, f);
1315 /*
1316 * Leave the filter pending until it is fully set up, which will
1317 * be indicated by the reply to the last TCB update. No need to
1318 * unblock the ioctl thread either.
1319 */
1320 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1321 goto done;
1322 f->valid = 1;
1323 f->pending = 0;
1324 } else {
1325 /* provide errno instead of tid to ioctl */
1326 f->tid = act_open_rpl_status_to_errno(status);
1327 f->valid = 0;
1328 f->pending = 0;
1329 if (act_open_has_tid(status))
1330 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1332 remove_hf(sc, f);
1333 if (f->locked == 0)
1334 free(f, M_CXGBE);
1335 }
1336 cv_broadcast(&sc->tids.hftid_cv);
1337done:
1338 mtx_unlock(&sc->tids.hftid_lock);
1339
1340 free_atid(sc, atid);
1341 return (0);
1342}
1343
1344int
1345t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1346 struct mbuf *m)
1347{
1348 struct adapter *sc = iq->adapter;
1349 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1350 u_int tid = GET_TID(rpl);
1351 struct filter_entry *f;
1352
1353 mtx_lock(&sc->tids.hftid_lock);
1354 f = lookup_hftid(sc, tid);
1355 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1356 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1357 f, tid));
1358 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1359 __func__, f, tid));
1360 f->pending = 0;
1361 if (rpl->status == 0) {
1362 f->valid = 1;
1363 } else {
1364 f->tid = EIO;
1365 f->valid = 0;
1367 remove_hftid(sc, f);
1368 remove_hf(sc, f);
1369 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1370 if (f->locked == 0)
1371 free(f, M_CXGBE);
1372 }
1373 cv_broadcast(&sc->tids.hftid_cv);
1374 mtx_unlock(&sc->tids.hftid_lock);
1375
1376 return (0);
1377}
1378
1379int
1380t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1381 struct mbuf *m)
1382{
1383 struct adapter *sc = iq->adapter;
1384 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1385 unsigned int tid = GET_TID(cpl);
1386 struct filter_entry *f;
1387
1388 mtx_lock(&sc->tids.hftid_lock);
1389 f = lookup_hftid(sc, tid);
1390 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1391 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1392 f, tid));
1393 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1394 tid));
1395 f->pending = 0;
1396 if (cpl->status == 0) {
1397 f->valid = 0;
1399 remove_hftid(sc, f);
1400 remove_hf(sc, f);
1401 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1402 if (f->locked == 0)
1403 free(f, M_CXGBE);
1404 }
1405 cv_broadcast(&sc->tids.hftid_cv);
1406 mtx_unlock(&sc->tids.hftid_lock);
1407
1408 return (0);
1409}
1410
1411static int
1412get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1413{
1414 int i, nfilters;
1415 struct filter_entry *f;
1416 u_int in_use;
1417#ifdef INVARIANTS
1418 u_int tid_base;
1419#endif
1420
1421 MPASS(!t->fs.hash);
1422
1423 if (separate_hpfilter_region(sc) && t->fs.prio) {
1424 nfilters = sc->tids.nhpftids;
1425 f = sc->tids.hpftid_tab;
1426 in_use = sc->tids.hpftids_in_use;
1427#ifdef INVARIANTS
1428 tid_base = sc->tids.hpftid_base;
1429#endif
1430 } else {
1431 nfilters = sc->tids.nftids;
1432 f = sc->tids.ftid_tab;
1433 in_use = sc->tids.ftids_in_use;
1434#ifdef INVARIANTS
1435 tid_base = sc->tids.ftid_base;
1436#endif
1437 }
1438
1439 if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1440 t->idx = 0xffffffff;
1441 return (0);
1442 }
1443
1444 f += t->idx;
1445 mtx_lock(&sc->tids.ftid_lock);
1446 for (i = t->idx; i < nfilters; i++, f++) {
1447 if (f->valid) {
1448 MPASS(f->tid == tid_base + i);
1449 t->idx = i;
1450 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1451 t->smtidx = f->smt ? f->smt->idx : 0;
1452 if (f->fs.hitcnts)
1453 t->hits = get_filter_hits(sc, f->tid);
1454 else
1455 t->hits = UINT64_MAX;
1456 t->fs = f->fs;
1457
1458 goto done;
1459 }
1460 }
1461 t->idx = 0xffffffff;
1462done:
1463 mtx_unlock(&sc->tids.ftid_lock);
1464 return (0);
1465}
1466
1467static int
1468get_hashfilter(struct adapter *sc, struct t4_filter *t)
1469{
1470 struct tid_info *ti = &sc->tids;
1471 int tid;
1472 struct filter_entry *f;
1473 const int inv_tid = ti->ntids + ti->tid_base;
1474
1475 MPASS(t->fs.hash);
1476
1477 if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1478 t->idx >= inv_tid) {
1479 t->idx = 0xffffffff;
1480 return (0);
1481 }
1482 if (t->idx < ti->tid_base)
1483 t->idx = ti->tid_base;
1484
1485 mtx_lock(&ti->hftid_lock);
1486 for (tid = t->idx; tid < inv_tid; tid++) {
1487 f = lookup_hftid(sc, tid);
1488 if (f != NULL && f->valid) {
1489 t->idx = tid;
1490 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1491 t->smtidx = f->smt ? f->smt->idx : 0;
1492 if (f->fs.hitcnts)
1493 t->hits = get_filter_hits(sc, tid);
1494 else
1495 t->hits = UINT64_MAX;
1496 t->fs = f->fs;
1497
1498 goto done;
1499 }
1500 }
1501 t->idx = 0xffffffff;
1502done:
1503 mtx_unlock(&ti->hftid_lock);
1504 return (0);
1505}
1506
1507static void
1508mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1509 uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1510{
1511 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1512 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1513
1514 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1515 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1516 MPASS(atid >= 0);
1517
1518 if (chip_id(sc) == CHELSIO_T5) {
1519 INIT_TP_WR(cpl5, 0);
1520 } else {
1521 INIT_TP_WR(cpl6, 0);
1522 cpl6->rsvd2 = 0;
1523 cpl6->opt3 = 0;
1524 }
1525
1527 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1529 cpl->local_port = htobe16(f->fs.val.dport);
1530 cpl->peer_port = htobe16(f->fs.val.sport);
1531 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1532 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1533 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1534 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1535 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1536 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1537 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1538 V_NO_CONG(f->fs.rpttid) |
1539 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1541
1542 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1543 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1544 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1545 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1546 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1547 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1548 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1549}
1550
1551static void
1552mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1553 uint64_t ftuple, struct cpl_act_open_req *cpl)
1554{
1555 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1556 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1557
1558 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1559 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1560 MPASS(atid >= 0);
1561
1562 if (chip_id(sc) == CHELSIO_T5) {
1563 INIT_TP_WR(cpl5, 0);
1564 } else {
1565 INIT_TP_WR(cpl6, 0);
1566 cpl6->rsvd2 = 0;
1567 cpl6->opt3 = 0;
1568 }
1569
1571 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1573 cpl->local_port = htobe16(f->fs.val.dport);
1574 cpl->peer_port = htobe16(f->fs.val.sport);
1575 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1576 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1577 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1578 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1579 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1580 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1581 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1582 V_NO_CONG(f->fs.rpttid) |
1583 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1585
1586 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1587 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1588 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1589 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1590 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1591 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1592 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1593}
1594
1595static int
1596act_open_cpl_len16(struct adapter *sc, int isipv6)
1597{
1598 int idx;
1599 static const int sz_table[3][2] = {
1600 {
1601 howmany(sizeof (struct cpl_act_open_req), 16),
1602 howmany(sizeof (struct cpl_act_open_req6), 16)
1603 },
1604 {
1605 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1606 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1607 },
1608 {
1609 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1610 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1611 },
1612 };
1613
1614 MPASS(chip_id(sc) >= CHELSIO_T4);
1615 idx = min(chip_id(sc) - CHELSIO_T4, 2);
1616
1617 return (sz_table[idx][!!isipv6]);
1618}
1619
1620static int
1621set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1622 struct l2t_entry *l2te, struct smt_entry *smt)
1623{
1624 void *wr;
1625 struct wrq_cookie cookie;
1626 struct filter_entry *f;
1627 int rc, atid = -1;
1628 uint32_t hash;
1629
1630 MPASS(t->fs.hash);
1631 /* Already validated against fconf, iconf */
1632 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1633 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1634
1635 hash = hf_hashfn_4t(&t->fs);
1636
1637 mtx_lock(&sc->tids.hftid_lock);
1638 if (lookup_hf(sc, &t->fs, hash) != NULL) {
1639 rc = EEXIST;
1640 goto done;
1641 }
1642
1643 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1644 if (__predict_false(f == NULL)) {
1645 rc = ENOMEM;
1646 goto done;
1647 }
1648 f->fs = t->fs;
1649 f->l2te = l2te;
1650 f->smt = smt;
1651
1652 atid = alloc_atid(sc, f);
1653 if (__predict_false(atid) == -1) {
1654 free(f, M_CXGBE);
1655 rc = EAGAIN;
1656 goto done;
1657 }
1658 MPASS(atid >= 0);
1659
1660 wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1661 &cookie);
1662 if (wr == NULL) {
1663 free_atid(sc, atid);
1664 free(f, M_CXGBE);
1665 rc = ENOMEM;
1666 goto done;
1667 }
1668 if (f->fs.type)
1669 mk_act_open_req6(sc, f, atid, ftuple, wr);
1670 else
1671 mk_act_open_req(sc, f, atid, ftuple, wr);
1672
1673 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1674 f->pending = 1;
1675 f->tid = -1;
1676 insert_hf(sc, f, hash);
1677 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1678
1679 for (;;) {
1680 MPASS(f->locked);
1681 if (f->pending == 0) {
1682 if (f->valid) {
1683 rc = 0;
1684 f->locked = 0;
1685 t->idx = f->tid;
1686 } else {
1687 rc = f->tid;
1688 free(f, M_CXGBE);
1689 }
1690 break;
1691 }
1692 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1693 f->locked = 0;
1694 rc = EINPROGRESS;
1695 break;
1696 }
1697 }
1698done:
1699 mtx_unlock(&sc->tids.hftid_lock);
1700 return (rc);
1701}
1702
1703/* SET_TCB_FIELD sent as a ULP command looks like this */
1704#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1705 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1706
1707static void *
1708mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1709 uint64_t val, uint32_t tid, uint32_t qid)
1710{
1711 struct ulptx_idata *ulpsc;
1712 struct cpl_set_tcb_field_core *req;
1713
1714 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1715 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1716
1717 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1718 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1719 ulpsc->len = htobe32(sizeof(*req));
1720
1721 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1722 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1723 req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1724 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1725 req->mask = htobe64(mask);
1726 req->val = htobe64(val);
1727
1728 ulpsc = (struct ulptx_idata *)(req + 1);
1729 if (LEN__SET_TCB_FIELD_ULP % 16) {
1730 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1731 ulpsc->len = htobe32(0);
1732 return (ulpsc + 1);
1733 }
1734 return (ulpsc);
1735}
1736
1737/* ABORT_REQ sent as a ULP command looks like this */
1738#define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1739 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1740
1741static void *
1742mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1743{
1744 struct ulptx_idata *ulpsc;
1745 struct cpl_abort_req_core *req;
1746
1747 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1748 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1749
1750 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1751 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1752 ulpsc->len = htobe32(sizeof(*req));
1753
1754 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1755 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1756 req->rsvd0 = htonl(0);
1757 req->rsvd1 = 0;
1758 req->cmd = CPL_ABORT_NO_RST;
1759
1760 ulpsc = (struct ulptx_idata *)(req + 1);
1761 if (LEN__ABORT_REQ_ULP % 16) {
1762 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1763 ulpsc->len = htobe32(0);
1764 return (ulpsc + 1);
1765 }
1766 return (ulpsc);
1767}
1768
1769/* ABORT_RPL sent as a ULP command looks like this */
1770#define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1771 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1772
1773static void *
1774mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1775{
1776 struct ulptx_idata *ulpsc;
1777 struct cpl_abort_rpl_core *rpl;
1778
1779 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1780 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1781
1782 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1783 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1784 ulpsc->len = htobe32(sizeof(*rpl));
1785
1786 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1787 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1788 rpl->rsvd0 = htonl(0);
1789 rpl->rsvd1 = 0;
1790 rpl->cmd = CPL_ABORT_NO_RST;
1791
1792 ulpsc = (struct ulptx_idata *)(rpl + 1);
1793 if (LEN__ABORT_RPL_ULP % 16) {
1794 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1795 ulpsc->len = htobe32(0);
1796 return (ulpsc + 1);
1797 }
1798 return (ulpsc);
1799}
1800
1801static inline int
1803{
1804
1805 return (sizeof(struct work_request_hdr) +
1806 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1807 roundup2(LEN__ABORT_REQ_ULP, 16) +
1808 roundup2(LEN__ABORT_RPL_ULP, 16));
1809}
1810
1811static void
1812mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1813{
1814 struct ulp_txpkt *ulpmc;
1815
1816 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1817 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1818 ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1820 ulpmc = mk_abort_req_ulp(ulpmc, tid);
1821 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1822}
1823
1824static int
1825del_hashfilter(struct adapter *sc, struct t4_filter *t)
1826{
1827 struct tid_info *ti = &sc->tids;
1828 void *wr;
1829 struct filter_entry *f;
1830 struct wrq_cookie cookie;
1831 int rc;
1832 const int wrlen = del_hashfilter_wrlen();
1833 const int inv_tid = ti->ntids + ti->tid_base;
1834
1835 MPASS(sc->tids.hftid_hash_4t != NULL);
1836 MPASS(sc->tids.ntids > 0);
1837
1838 if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1839 return (EINVAL);
1840
1841 mtx_lock(&ti->hftid_lock);
1842 f = lookup_hftid(sc, t->idx);
1843 if (f == NULL || f->valid == 0) {
1844 rc = EINVAL;
1845 goto done;
1846 }
1847 MPASS(f->tid == t->idx);
1848 if (f->locked) {
1849 rc = EPERM;
1850 goto done;
1851 }
1852 if (f->pending) {
1853 rc = EBUSY;
1854 goto done;
1855 }
1856 wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1857 if (wr == NULL) {
1858 rc = ENOMEM;
1859 goto done;
1860 }
1861
1862 mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1863 f->locked = 1;
1864 f->pending = 1;
1865 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1866 t->fs = f->fs; /* extra info for the caller */
1867
1868 for (;;) {
1869 MPASS(f->locked);
1870 if (f->pending == 0) {
1871 if (f->valid) {
1872 f->locked = 0;
1873 rc = EIO;
1874 } else {
1875 rc = 0;
1876 free(f, M_CXGBE);
1877 }
1878 break;
1879 }
1880 if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1881 f->locked = 0;
1882 rc = EINPROGRESS;
1883 break;
1884 }
1885 }
1886done:
1887 mtx_unlock(&ti->hftid_lock);
1888 return (rc);
1889}
1890
1891#define WORD_MASK 0xffffffff
1892static void
1893set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1894 const bool sip, const bool dp, const bool sp)
1895{
1896
1897 if (dip) {
1898 if (f->fs.type) {
1900 f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1901 f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1902
1903 set_tcb_field(sc, f->tid,
1905 f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1906 f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1907
1908 set_tcb_field(sc, f->tid,
1910 f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1911 f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1912
1913 set_tcb_field(sc, f->tid,
1915 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1916 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1917 } else {
1918 set_tcb_field(sc, f->tid,
1920 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1921 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1922 }
1923 }
1924
1925 if (sip) {
1926 if (f->fs.type) {
1927 set_tcb_field(sc, f->tid,
1929 f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1930 f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1931
1932 set_tcb_field(sc, f->tid,
1934 f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1935 f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1936
1937 set_tcb_field(sc, f->tid,
1939 f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1940 f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1941
1942 set_tcb_field(sc, f->tid,
1944 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1945 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1946
1947 } else {
1948 set_tcb_field(sc, f->tid,
1950 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1951 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1952 }
1953 }
1954
1956 (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1957}
1958
1959/*
1960 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1961 * last of the series of updates requested a reply. The reply informs the
1962 * driver that the filter is fully setup.
1963 */
1964static int
1966{
1967 int updated = 0;
1968
1969 MPASS(f->tid < sc->tids.ntids);
1970 MPASS(f->fs.hash);
1971 MPASS(f->pending);
1972 MPASS(f->valid == 0);
1973
1974 if (f->fs.newdmac) {
1975 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1976 updated++;
1977 }
1978
1979 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1980 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1981 updated++;
1982 }
1983
1984 if (f->fs.newsmac) {
1985 MPASS(f->smt != NULL);
1986 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1987 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1989 1);
1990 updated++;
1991 }
1992
1993 switch(f->fs.nat_mode) {
1994 case NAT_MODE_NONE:
1995 break;
1996 case NAT_MODE_DIP:
1997 set_nat_params(sc, f, true, false, false, false);
1998 updated++;
1999 break;
2000 case NAT_MODE_DIP_DP:
2001 set_nat_params(sc, f, true, false, true, false);
2002 updated++;
2003 break;
2005 set_nat_params(sc, f, true, true, true, false);
2006 updated++;
2007 break;
2008 case NAT_MODE_DIP_DP_SP:
2009 set_nat_params(sc, f, true, false, true, true);
2010 updated++;
2011 break;
2012 case NAT_MODE_SIP_SP:
2013 set_nat_params(sc, f, false, true, false, true);
2014 updated++;
2015 break;
2017 set_nat_params(sc, f, true, true, false, true);
2018 updated++;
2019 break;
2020 case NAT_MODE_ALL:
2021 set_nat_params(sc, f, true, true, true, true);
2022 updated++;
2023 break;
2024 default:
2025 MPASS(0); /* should have been validated earlier */
2026 break;
2027
2028 }
2029
2030 if (f->fs.nat_seq_chk) {
2031 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
2033 V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
2034 updated++;
2035 }
2036
2037 if (is_t5(sc) && f->fs.action == FILTER_DROP) {
2038 /*
2039 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
2040 */
2042 V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
2043 updated++;
2044 }
2045
2046 /*
2047 * Enable switching after all secondary resources (L2T entry, SMT entry,
2048 * etc.) are setup so that any switched packet will use correct
2049 * values.
2050 */
2051 if (f->fs.action == FILTER_SWITCH) {
2052 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
2053 updated++;
2054 }
2055
2056 if (f->fs.hitcnts || updated > 0) {
2057 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
2061 return (EINPROGRESS);
2062 }
2063
2064 return (0);
2065}
@ CPL_COOKIE_HASHFILTER
Definition: adapter.h:402
int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *)
Definition: t4_main.c:6121
static uint32_t t4_read_reg(struct adapter *sc, uint32_t reg)
Definition: adapter.h:1104
void free_atid(struct adapter *, int)
Definition: t4_main.c:3879
void * lookup_atid(struct adapter *, int)
Definition: t4_main.c:3871
int alloc_atid(struct adapter *, void *)
Definition: t4_main.c:3851
static int read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len)
Definition: adapter.h:1473
void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *)
Definition: t4_sge.c:2994
static bool hw_off_limits(struct adapter *sc)
Definition: adapter.h:1096
struct sge_iq iq
Definition: adapter.h:0
void release_tid(struct adapter *, int, struct sge_wrq *)
Definition: t4_main.c:3899
@ SLEEP_OK
Definition: adapter.h:149
@ INTR_OK
Definition: adapter.h:150
void end_synchronized_op(struct adapter *, int)
Definition: t4_main.c:6204
int adapter_init(struct adapter *)
Definition: t4_main.c:6568
@ FULL_INIT_DONE
Definition: adapter.h:158
void * start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *)
Definition: t4_sge.c:2940
#define CHELSIO_T5
Definition: common.h:415
static int is_ftid(const struct adapter *sc, u_int tid)
Definition: common.h:471
int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
Definition: t4_hw.c:10912
static int is_hashfilter(const struct adapter *adap)
Definition: common.h:502
#define CHELSIO_T4
Definition: common.h:414
#define CHELSIO_T6
Definition: common.h:416
static int chip_id(struct adapter *adap)
Definition: common.h:512
static int is_t4(struct adapter *adap)
Definition: common.h:522
static int is_hpftid(const struct adapter *sc, u_int tid)
Definition: common.h:478
static int is_t5(struct adapter *adap)
Definition: common.h:527
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
Definition: t4_hw.c:7183
LIST_HEAD(cxgbei_cmp_head, cxgbei_cmp)
#define INIT_TP_WR(w, tid)
Definition: offload.h:49
@ ULD_TOM
Definition: offload.h:205
#define INIT_ULPTX_WRH(w, wrlen, atomic, tid)
Definition: offload.h:39
#define INIT_TP_WR_MIT_CPL(w, cpl, tid)
Definition: offload.h:57
struct tp_params tp
Definition: common.h:360
unsigned int filter2_wr_support
Definition: common.h:394
uint8_t nports
Definition: common.h:384
struct mtx reg_lock
Definition: adapter.h:992
int flags
Definition: adapter.h:940
struct tid_info tids
Definition: adapter.h:932
struct adapter_params params
Definition: adapter.h:958
struct smt_data * smt
Definition: adapter.h:931
struct sge sge
Definition: adapter.h:902
__be64 local_ip_lo
Definition: t4_msg.h:854
__be64 peer_ip_hi
Definition: t4_msg.h:855
__be64 peer_ip_lo
Definition: t4_msg.h:856
__be64 local_ip_hi
Definition: t4_msg.h:853
__be16 local_port
Definition: t4_msg.h:851
__be16 peer_port
Definition: t4_msg.h:852
__be16 peer_port
Definition: t4_msg.h:803
__be32 peer_ip
Definition: t4_msg.h:805
__be32 local_ip
Definition: t4_msg.h:804
__be64 opt0
Definition: t4_msg.h:806
__be16 local_port
Definition: t4_msg.h:802
__be32 atid_status
Definition: t4_msg.h:897
__be16 reply_ctrl
Definition: t4_msg.h:962
__be16 word_cookie
Definition: t4_msg.h:963
Definition: t4_filter.c:56
__u8 natmode_to_ulp_type
__u8 filter_type_swapmac
__be32 maci_to_matchtypem
__be16 rx_chan_rx_rpl_iq
__be32 del_filter_to_l2tix
Definition: t4_l2t.h:63
__u8 opcode
Definition: t4_msg.h:373
struct adapter * adapter
Definition: adapter.h:422
uint16_t abs_id
Definition: adapter.h:432
struct sge_iq fwq
Definition: adapter.h:831
struct sge_wrq * ctrlq
Definition: adapter.h:832
Definition: t4_smt.h:48
struct t4_filter_tuple val
Definition: t4_ioctl.h:244
struct t4_filter_tuple mask
Definition: t4_ioctl.h:245
uint8_t dmac[ETHER_ADDR_LEN]
Definition: t4_ioctl.h:232
uint8_t smac[ETHER_ADDR_LEN]
Definition: t4_ioctl.h:233
uint16_t dport
Definition: t4_ioctl.h:184
uint32_t frag
Definition: t4_ioctl.h:199
uint8_t dip[16]
Definition: t4_ioctl.h:182
uint16_t sport
Definition: t4_ioctl.h:183
uint32_t ovlan_vld
Definition: t4_ioctl.h:202
uint32_t iport
Definition: t4_ioctl.h:197
uint8_t sip[16]
Definition: t4_ioctl.h:181
uint16_t vnic
Definition: t4_ioctl.h:191
uint8_t tos
Definition: t4_ioctl.h:194
uint32_t vlan_vld
Definition: t4_ioctl.h:201
uint32_t fcoe
Definition: t4_ioctl.h:196
uint32_t macidx
Definition: t4_ioctl.h:200
uint8_t proto
Definition: t4_ioctl.h:195
uint16_t vlan
Definition: t4_ioctl.h:192
uint32_t matchtype
Definition: t4_ioctl.h:198
uint16_t ethtype
Definition: t4_ioctl.h:193
uint32_t pfvf_vld
Definition: t4_ioctl.h:203
uint16_t l2tidx
Definition: t4_ioctl.h:250
uint16_t smtidx
Definition: t4_ioctl.h:251
struct t4_filter_specification fs
Definition: t4_ioctl.h:253
uint64_t hits
Definition: t4_ioctl.h:252
uint32_t idx
Definition: t4_ioctl.h:249
u_int nftids
Definition: offload.h:131
void * hftid_hash_4t
Definition: offload.h:174
u_int nhpftids
Definition: offload.h:135
struct cv ftid_cv
Definition: offload.h:159
u_int hpftid_base
Definition: offload.h:136
struct filter_entry * ftid_tab
Definition: offload.h:160
struct filter_entry * hpftid_tab
Definition: offload.h:161
u_long hftid_4t_mask
Definition: offload.h:175
u_int tids_in_use
Definition: offload.h:172
u_int hpftids_in_use
Definition: offload.h:163
u_int ftid_base
Definition: offload.h:132
u_int ftids_in_use
Definition: offload.h:162
u_int tid_base
Definition: offload.h:140
struct cv hftid_cv
Definition: offload.h:170
void * hftid_hash_tid
Definition: offload.h:176
u_long hftid_tid_mask
Definition: offload.h:177
u_int ntids
Definition: offload.h:139
uint16_t filter_mode
Definition: common.h:259
int8_t protocol_shift
Definition: common.h:271
int8_t vnic_shift
Definition: common.h:268
int8_t matchtype_shift
Definition: common.h:274
int8_t fcoe_shift
Definition: common.h:266
int8_t macmatch_shift
Definition: common.h:273
uint16_t filter_mask
Definition: common.h:260
int vnic_mode
Definition: common.h:261
int8_t frag_shift
Definition: common.h:275
int8_t vlan_shift
Definition: common.h:269
int8_t port_shift
Definition: common.h:267
int8_t tos_shift
Definition: common.h:270
int8_t ethertype_shift
Definition: common.h:272
__be32 cmd_dest
Definition: t4_msg.h:2946
__be32 len
Definition: t4_msg.h:2947
__be32 len
Definition: t4_msg.h:2885
__be32 cmd_more
Definition: t4_msg.h:2884
static int del_hashfilter_wrlen(void)
Definition: t4_filter.c:1802
static bool is_4tuple_specified(struct t4_filter_specification *fs)
Definition: t4_filter.c:890
static int del_hashfilter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:1825
int set_filter_mask(struct adapter *sc, uint32_t mode)
Definition: t4_filter.c:552
static int set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask, uint64_t val, int no_reply)
Definition: t4_filter.c:1189
#define LEN__SET_TCB_FIELD_ULP
Definition: t4_filter.c:1704
static void insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
Definition: t4_filter.c:188
static uint16_t mode_to_fconf(uint32_t mode)
Definition: t4_filter.c:332
static struct filter_entry * lookup_hftid(struct adapter *sc, int tid)
Definition: t4_filter.c:285
int t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_filter.c:1380
static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t, struct l2t_entry *, struct smt_entry *)
Definition: t4_filter.c:1621
int set_filter(struct adapter *sc, struct t4_filter *t)
Definition: t4_filter.c:909
int get_filter_mode(struct adapter *sc, uint32_t *mode)
Definition: t4_filter.c:494
static int set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te, struct smt_entry *smt)
Definition: t4_filter.c:635
static bool filter_eq(struct t4_filter_specification *fs1, struct t4_filter_specification *fs2)
Definition: t4_filter.c:216
static int mode_to_iconf(uint32_t mode)
Definition: t4_filter.c:374
int t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_filter.c:1345
static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *)
Definition: t4_filter.c:1965
static void mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid, uint64_t ftuple, struct cpl_act_open_req *cpl)
Definition: t4_filter.c:1552
static void mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid, uint64_t ftuple, struct cpl_act_open_req6 *cpl)
Definition: t4_filter.c:1508
int t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_filter.c:1224
static int act_open_cpl_len16(struct adapter *sc, int isipv6)
Definition: t4_filter.c:1596
__FBSDID("$FreeBSD$")
int t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_filter.c:1296
static int get_tcamfilter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:1412
static void free_filter_resources(struct filter_entry *)
Definition: t4_filter.c:1175
static int hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs, uint64_t *ftuple)
Definition: t4_filter.c:810
int set_filter_mode(struct adapter *sc, uint32_t mode)
Definition: t4_filter.c:510
static int check_fspec_against_fconf_iconf(struct adapter *sc, struct t4_filter_specification *fs)
Definition: t4_filter.c:388
static struct filter_entry * lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
Definition: t4_filter.c:264
static void * mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
Definition: t4_filter.c:1742
static bool separate_hpfilter_region(struct adapter *sc)
Definition: t4_filter.c:79
static int get_hashfilter(struct adapter *, struct t4_filter *)
Definition: t4_filter.c:1468
void free_hftid_hash(struct tid_info *t)
Definition: t4_filter.c:140
#define WORD_MASK
Definition: t4_filter.c:1891
static int set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val, u_int no_reply)
Definition: t4_filter.c:1215
static void insert_hftid(struct adapter *sc, struct filter_entry *f)
Definition: t4_filter.c:201
static void * mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask, uint64_t val, uint32_t tid, uint32_t qid)
Definition: t4_filter.c:1708
int get_filter(struct adapter *sc, struct t4_filter *t)
Definition: t4_filter.c:626
static void * mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
Definition: t4_filter.c:1774
static void mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
Definition: t4_filter.c:1812
static void set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip, const bool sip, const bool dp, const bool sp)
Definition: t4_filter.c:1893
#define LEN__ABORT_REQ_ULP
Definition: t4_filter.c:1738
static int alloc_hftid_hash(struct tid_info *t, int flags)
Definition: t4_filter.c:113
static uint32_t hf_hashfn_tid(int tid)
Definition: t4_filter.c:106
#define LEN__ABORT_RPL_ULP
Definition: t4_filter.c:1770
static uint64_t get_filter_hits(struct adapter *sc, uint32_t tid)
Definition: t4_filter.c:599
static uint32_t fconf_to_mode(uint16_t hwmode, int vnic_mode)
Definition: t4_filter.c:452
static uint32_t hf_hashfn_4t(struct t4_filter_specification *fs)
Definition: t4_filter.c:86
int del_filter(struct adapter *sc, struct t4_filter *t)
Definition: t4_filter.c:1146
static void remove_hftid(struct adapter *sc, struct filter_entry *f)
Definition: t4_filter.c:316
static void remove_hf(struct adapter *sc, struct filter_entry *f)
Definition: t4_filter.c:305
static int del_tcamfilter(struct adapter *sc, struct t4_filter *t)
Definition: t4_filter.c:1074
@ TCB_SIZE
Definition: t4_hw.h:47
#define T4_FILTER_IPv4
Definition: t4_ioctl.h:108
#define T4_FILTER_IP_TOS
Definition: t4_ioctl.h:118
#define T4_FILTER_PORT
Definition: t4_ioctl.h:115
@ VLAN_REMOVE
Definition: t4_ioctl.h:141
@ VLAN_INSERT
Definition: t4_ioctl.h:142
@ VLAN_REWRITE
Definition: t4_ioctl.h:143
#define T4_FILTER_IC_ENCAP
Definition: t4_ioctl.h:129
#define T4_FILTER_MAC_IDX
Definition: t4_ioctl.h:121
#define T4_FILTER_IP_FRAGMENT
Definition: t4_ioctl.h:123
#define T4_FILTER_IP_DPORT
Definition: t4_ioctl.h:113
#define T4_FILTER_IP_DADDR
Definition: t4_ioctl.h:111
#define T4_FILTER_ETH_TYPE
Definition: t4_ioctl.h:120
#define T4_FILTER_IP_SPORT
Definition: t4_ioctl.h:112
#define T4_FILTER_IP_PROTO
Definition: t4_ioctl.h:119
@ FILTER_SWITCH
Definition: t4_ioctl.h:135
@ FILTER_DROP
Definition: t4_ioctl.h:134
#define T4_FILTER_VLAN
Definition: t4_ioctl.h:117
#define T4_FILTER_IP_SADDR
Definition: t4_ioctl.h:110
@ NAT_MODE_DIP_DP_SP
Definition: t4_ioctl.h:171
@ NAT_MODE_NONE
Definition: t4_ioctl.h:167
@ NAT_MODE_DIP_DP
Definition: t4_ioctl.h:169
@ NAT_MODE_ALL
Definition: t4_ioctl.h:174
@ NAT_MODE_SIP_SP
Definition: t4_ioctl.h:172
@ NAT_MODE_DIP
Definition: t4_ioctl.h:168
@ NAT_MODE_DIP_SIP_SP
Definition: t4_ioctl.h:173
@ NAT_MODE_DIP_DP_SIP
Definition: t4_ioctl.h:170
#define T4_FILTER_MPS_HIT_TYPE
Definition: t4_ioctl.h:122
#define T4_FILTER_VNIC
Definition: t4_ioctl.h:116
#define T4_FILTER_FCoE
Definition: t4_ioctl.h:114
#define T4_FILTER_IC_VNIC
Definition: t4_ioctl.h:128
#define T4_FILTER_IPv6
Definition: t4_ioctl.h:109
struct l2t_entry * t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
Definition: t4_l2t.c:288
static void t4_l2t_release(struct l2t_entry *e)
Definition: t4_l2t.h:105
#define V_QUEUENO(x)
Definition: t4_msg.h:933
@ CPL_ABORT_RPL
Definition: t4_msg.h:45
@ CPL_SET_TCB_FIELD
Definition: t4_msg.h:40
@ CPL_ACT_OPEN_REQ
Definition: t4_msg.h:38
@ CPL_ABORT_REQ
Definition: t4_msg.h:44
@ CPL_ACT_OPEN_REQ6
Definition: t4_msg.h:118
#define V_TID_QID(x)
Definition: t4_msg.h:346
#define V_TX_QUEUE(x)
Definition: t4_msg.h:613
#define F_RX_CHANNEL
Definition: t4_msg.h:618
#define F_NON_OFFLOAD
Definition: t4_msg.h:458
@ CPL_ERR_NONE
Definition: t4_msg.h:160
@ ULP_MODE_TCPDDP
Definition: t4_msg.h:235
@ ULP_MODE_NONE
Definition: t4_msg.h:232
#define V_TX_CHAN(x)
Definition: t4_msg.h:441
#define F_TCAM_BYPASS
Definition: t4_msg.h:487
#define V_CONG_CNTRL(x)
Definition: t4_msg.h:579
#define G_COOKIE(x)
Definition: t4_msg.h:985
#define V_WORD(x)
Definition: t4_msg.h:979
#define V_WND_SCALE_EN(x)
Definition: t4_msg.h:625
#define F_RSS_QUEUE_VALID
Definition: t4_msg.h:566
#define F_T5_OPT_2_VALID
Definition: t4_msg.h:638
#define V_TID_TID(x)
Definition: t4_msg.h:336
#define V_ULP_TXPKT_DEST(x)
Definition: t4_msg.h:2968
#define V_COOKIE(x)
Definition: t4_msg.h:984
#define OPCODE_TID(cmd)
Definition: t4_msg.h:327
@ ULP_TX_PKT
Definition: t4_msg.h:2831
#define V_NAGLE(x)
Definition: t4_msg.h:490
#define V_FILTER_TUPLE(x)
Definition: t4_msg.h:813
#define V_ULPTX_CMD(x)
Definition: t4_msg.h:2845
#define G_AOPEN_STATUS(x)
Definition: t4_msg.h:904
static int act_open_has_tid(int status)
Definition: t4_msg.h:191
#define V_ULP_MODE(x)
Definition: t4_msg.h:462
#define V_TID_COOKIE(x)
Definition: t4_msg.h:341
#define V_SACK_EN(x)
Definition: t4_msg.h:633
#define G_TID_TID(x)
Definition: t4_msg.h:337
@ CPL_ABORT_NO_RST
Definition: t4_msg.h:253
#define V_RX_FC_DISABLE(x)
Definition: t4_msg.h:600
#define V_PACE(x)
Definition: t4_msg.h:584
#define V_NO_CONG(x)
Definition: t4_msg.h:445
#define V_RSS_QUEUE(x)
Definition: t4_msg.h:561
#define MK_OPCODE_TID(opcode, tid)
Definition: t4_msg.h:325
static int act_open_rpl_status_to_errno(int status)
Definition: t4_msg.h:204
#define V_DELACK(x)
Definition: t4_msg.h:449
#define V_L2T_IDX(x)
Definition: t4_msg.h:482
@ ULP_TX_SC_IMM
Definition: t4_msg.h:2836
@ ULP_TX_SC_NOOP
Definition: t4_msg.h:2835
#define G_AOPEN_ATID(x)
Definition: t4_msg.h:909
#define GET_TID(cmd)
Definition: t4_msg.h:330
#define V_NO_REPLY(x)
Definition: t4_msg.h:941
#define F_PROTOCOL
Definition: t4_regs.h:26807
#define F_TOS
Definition: t4_regs.h:26811
#define F_ETHERTYPE
Definition: t4_regs.h:26803
#define F_MACMATCH
Definition: t4_regs.h:26799
#define F_VNIC_ID
Definition: t4_regs.h:26819
#define F_PORT
Definition: t4_regs.h:26823
#define A_TP_CMM_TCB_BASE
Definition: t4_regs.h:21742
#define F_FCOE
Definition: t4_regs.h:26827
#define F_FRAGMENTATION
Definition: t4_regs.h:26791
#define F_VLAN
Definition: t4_regs.h:26815
#define F_MPSHITTYPE
Definition: t4_regs.h:26795
#define F_FT_VLAN_VLD
int t4_smt_set_switching(struct adapter *sc, struct smt_entry *e, uint16_t pfvf, uint8_t *smac)
Definition: t4_smt.c:205
struct smt_entry * t4_smt_alloc_switching(struct smt_data *s, uint8_t *smac)
Definition: t4_smt.c:189
static void t4_smt_release(struct smt_entry *e)
Definition: t4_smt.h:76
#define M_TCB_SMAC_SEL
Definition: t4_tcb.h:58
#define W_TCB_RCV_NXT
Definition: t4_tcb.h:218
#define V_TCB_RCV_NXT(x)
Definition: t4_tcb.h:221
#define W_TCB_SMAC_SEL
Definition: t4_tcb.h:56
#define M_TCB_RCV_NXT
Definition: t4_tcb.h:220
#define M_TCB_T_RTT_TS_RECENT_AGE
Definition: t4_tcb.h:142
#define W_TCB_RX_FRAG2_PTR_RAW
Definition: t4_tcb.h:302
#define V_TCB_TIMESTAMP(x)
Definition: t4_tcb.h:137
#define S_TF_CCTRL_RFR
Definition: t4_tcb.h:753
#define V_TCB_RSS_INFO(x)
Definition: t4_tcb.h:71
#define W_TCB_RSS_INFO
Definition: t4_tcb.h:68
#define S_TF_CCTRL_ECN
Definition: t4_tcb.h:735
#define W_TCB_T_FLAGS
Definition: t4_tcb.h:62
#define W_TCB_RX_FRAG3_LEN_RAW
Definition: t4_tcb.h:320
#define W_TCB_SND_UNA_RAW
Definition: t4_tcb.h:170
#define W_TCB_TIMESTAMP
Definition: t4_tcb.h:134
#define S_TF_CCTRL_CWR
Definition: t4_tcb.h:747
#define S_TF_CCTRL_ECE
Definition: t4_tcb.h:741
#define M_TCB_RSS_INFO
Definition: t4_tcb.h:70
#define W_TCB_PDU_HDR_LEN
Definition: t4_tcb.h:332
#define V_TF_MIGRATING(x)
Definition: t4_tcb.h:549
#define V_TCB_T_RTT_TS_RECENT_AGE(x)
Definition: t4_tcb.h:143
#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW
Definition: t4_tcb.h:326
#define M_TCB_TIMESTAMP
Definition: t4_tcb.h:136
#define V_TCB_SMAC_SEL(x)
Definition: t4_tcb.h:59
#define V_TF_NON_OFFLOAD(x)
Definition: t4_tcb.h:552
#define V_FW_FILTER_WR_IQ(x)
@ FW_FILTER_WR_SMT_TBL_FULL
@ FW_FILTER_WR_SUCCESS
@ FW_FILTER_WR_EINVAL
@ FW_FILTER_WR_FLT_DELETED
@ FW_FILTER_WR_FLT_ADDED
#define V_FW_FILTER_WR_FRAG(x)
#define V_FW_FILTER_WR_OVLAN_VLD(x)
#define V_FW_FILTER_WR_FCOE(x)
#define V_FW_FILTER_WR_IVLAN_VLD(x)
#define V_FW_FILTER_WR_RMVLAN(x)
#define V_FW_FILTER_WR_NOREPLY(x)
@ FW_VNIC_MODE_ENCAP_EN
@ FW_VNIC_MODE_PF_VF
@ FW_VNIC_MODE_OUTER_VLAN
#define V_FW_FILTER_WR_DMAC(x)
#define V_FW_FILTER_WR_HITCNTS(x)
#define V_FW_WR_OP(x)
#define V_FW_FILTER_WR_MASKHASH(x)
#define V_FW_FILTER_WR_OVLAN_VLDM(x)
#define V_FW_FILTER_WR_FCOEM(x)
#define V_FW_FILTER_WR_RQTYPE(x)
#define V_FW_FILTER_WR_TID(x)
#define V_FW_FILTER2_WR_NATMODE(x)
#define V_FW_FILTER_WR_IVLAN_VLDM(x)
#define V_FW_FILTER2_WR_SWAPMAC(x)
#define V_FW_FILTER2_WR_ULP_TYPE(x)
#define V_FW_FILTER_WR_MACI(x)
#define V_FW_FILTER_WR_RX_CHAN(x)
#define V_FW_FILTER_WR_RX_RPL_IQ(x)
#define V_FW_FILTER_WR_TXCHAN(x)
#define V_FW_FILTER_WR_DIRSTEER(x)
#define V_FW_FILTER_WR_MACIM(x)
#define V_FW_FILTER_WR_DIRSTEERHASH(x)
#define V_FW_FILTER_WR_PRIO(x)
#define V_FW_FILTER_WR_MATCHTYPE(x)
#define V_FW_FILTER_WR_SMAC(x)
#define V_FW_FILTER_WR_RPTTID(x)
#define V_FW_FILTER_WR_FRAGM(x)
@ FW_FILTER2_WR
@ FW_FILTER_WR
#define V_FW_FILTER_WR_MATCHTYPEM(x)
#define V_FW_FILTER_WR_L2TIX(x)
#define V_FW_FILTER_WR_PORTM(x)
#define V_FW_FILTER_WR_INSVLAN(x)
#define V_FW_FILTER_WR_PORT(x)
#define V_FW_FILTER_WR_DROP(x)
#define V_FW_FILTER_WR_LPBK(x)
#define V_FW_FILTER2_WR_NATFLAGCHECK(x)
#define FW_LEN16(fw_struct)