FreeBSD kernel CXGBE device code
t4_kern_tls.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018-2019 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include "opt_inet.h"
31#include "opt_inet6.h"
32#include "opt_kern_tls.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include <sys/param.h>
38#include <sys/ktr.h>
39#include <sys/ktls.h>
40#include <sys/sglist.h>
41#include <sys/socket.h>
42#include <sys/socketvar.h>
43#include <sys/sockbuf.h>
44#include <netinet/in.h>
45#include <netinet/in_pcb.h>
46#include <netinet/ip.h>
47#include <netinet/ip6.h>
48#include <netinet/tcp_var.h>
49#include <opencrypto/cryptodev.h>
50#include <opencrypto/xform.h>
51
52#include "common/common.h"
53#include "common/t4_regs.h"
55#include "common/t4_tcb.h"
56#include "t4_l2t.h"
57#include "t4_clip.h"
58#include "t4_mp_ring.h"
59#include "crypto/t4_crypto.h"
60
61#if defined(INET) || defined(INET6)
62
63#define TLS_HEADER_LENGTH 5
64
65struct tls_scmd {
68};
69
70struct tlspcb {
71 struct m_snd_tag com;
72 struct vi_info *vi; /* virtual interface */
73 struct adapter *sc;
74 struct l2t_entry *l2te; /* L2 table entry used by this connection */
75 int tid; /* Connection identifier */
76
77 int tx_key_addr;
78 bool inline_key;
79 bool using_timestamps;
80 unsigned char enc_mode;
81
82 struct tls_scmd scmd0;
83 struct tls_scmd scmd0_short;
84
85 unsigned int tx_key_info_size;
86
87 uint32_t prev_seq;
88 uint32_t prev_ack;
89 uint32_t prev_tsecr;
90 uint16_t prev_win;
91 uint16_t prev_mss;
92
93 /* Only used outside of setup and teardown when using inline keys. */
94 struct tls_keyctx keyctx;
95
96 /* Fields only used during setup and teardown. */
97 struct inpcb *inp; /* backpointer to host stack's PCB */
98 struct sge_txq *txq;
99 struct sge_wrq *ctrlq;
100 struct clip_entry *ce; /* CLIP table entry used by this tid */
101
102 bool open_pending;
103};
104
105static void cxgbe_tls_tag_free(struct m_snd_tag *mst);
106static int ktls_setup_keys(struct tlspcb *tlsp,
107 const struct ktls_session *tls, struct sge_txq *txq);
108
109static const struct if_snd_tag_sw cxgbe_tls_tag_sw = {
110 .snd_tag_free = cxgbe_tls_tag_free,
111 .type = IF_SND_TAG_TYPE_TLS
112};
113
114static inline struct tlspcb *
115mst_to_tls(struct m_snd_tag *t)
116{
117 return (__containerof(t, struct tlspcb, com));
118}
119
120static struct tlspcb *
121alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
122{
123 struct port_info *pi = vi->pi;
124 struct adapter *sc = pi->adapter;
125 struct tlspcb *tlsp;
126
127 tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
128 if (tlsp == NULL)
129 return (NULL);
130
131 m_snd_tag_init(&tlsp->com, ifp, &cxgbe_tls_tag_sw);
132 tlsp->vi = vi;
133 tlsp->sc = sc;
134 tlsp->ctrlq = &sc->sge.ctrlq[pi->port_id];
135 tlsp->tid = -1;
136 tlsp->tx_key_addr = -1;
137
138 return (tlsp);
139}
140
141static int
142ktls_act_open_cpl_size(bool isipv6)
143{
144
145 if (isipv6)
146 return (sizeof(struct cpl_t6_act_open_req6));
147 else
148 return (sizeof(struct cpl_t6_act_open_req));
149}
150
151static void
152mk_ktls_act_open_req(struct adapter *sc, struct vi_info *vi, struct inpcb *inp,
153 struct tlspcb *tlsp, int atid, void *dst)
154{
155 struct tcpcb *tp = intotcpcb(inp);
156 struct cpl_t6_act_open_req *cpl6;
157 struct cpl_act_open_req *cpl;
158 uint64_t options;
159 int qid_atid;
160
161 cpl6 = dst;
162 cpl = (struct cpl_act_open_req *)cpl6;
163 INIT_TP_WR(cpl6, 0);
164 qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
167 qid_atid));
168 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
169 &cpl->peer_ip, &cpl->peer_port);
170
172 options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
173 options |= F_NON_OFFLOAD;
174 cpl->opt0 = htobe64(options);
175
176 options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
177 if (tp->t_flags & TF_REQ_TSTMP)
178 options |= F_TSTAMPS_EN;
179 cpl->opt2 = htobe32(options);
180}
181
182static void
183mk_ktls_act_open_req6(struct adapter *sc, struct vi_info *vi,
184 struct inpcb *inp, struct tlspcb *tlsp, int atid, void *dst)
185{
186 struct tcpcb *tp = intotcpcb(inp);
187 struct cpl_t6_act_open_req6 *cpl6;
188 struct cpl_act_open_req6 *cpl;
189 uint64_t options;
190 int qid_atid;
191
192 cpl6 = dst;
193 cpl = (struct cpl_act_open_req6 *)cpl6;
194 INIT_TP_WR(cpl6, 0);
195 qid_atid = V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
198 qid_atid));
199 cpl->local_port = inp->inp_lport;
200 cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
201 cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
202 cpl->peer_port = inp->inp_fport;
203 cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
204 cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
205
207 options |= V_SMAC_SEL(vi->smt_idx) | V_TX_CHAN(vi->pi->tx_chan);
208 options |= F_NON_OFFLOAD;
209 cpl->opt0 = htobe64(options);
210
211 options = V_TX_QUEUE(sc->params.tp.tx_modq[vi->pi->tx_chan]);
212 if (tp->t_flags & TF_REQ_TSTMP)
213 options |= F_TSTAMPS_EN;
214 cpl->opt2 = htobe32(options);
215}
216
217static int
218send_ktls_act_open_req(struct adapter *sc, struct vi_info *vi,
219 struct inpcb *inp, struct tlspcb *tlsp, int atid)
220{
221 struct wrqe *wr;
222 bool isipv6;
223
224 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
225 if (isipv6) {
226 tlsp->ce = t4_get_clip_entry(sc, &inp->in6p_laddr, true);
227 if (tlsp->ce == NULL)
228 return (ENOENT);
229 }
230
231 wr = alloc_wrqe(ktls_act_open_cpl_size(isipv6), tlsp->ctrlq);
232 if (wr == NULL) {
233 CTR2(KTR_CXGBE, "%s: atid %d failed to alloc WR", __func__,
234 atid);
235 return (ENOMEM);
236 }
237
238 if (isipv6)
239 mk_ktls_act_open_req6(sc, vi, inp, tlsp, atid, wrtod(wr));
240 else
241 mk_ktls_act_open_req(sc, vi, inp, tlsp, atid, wrtod(wr));
242
243 tlsp->open_pending = true;
244 t4_wrq_tx(sc, wr);
245 return (0);
246}
247
248static int
249ktls_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
250 struct mbuf *m)
251{
252 struct adapter *sc = iq->adapter;
253 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
254 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
255 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
256 struct tlspcb *tlsp = lookup_atid(sc, atid);
257 struct inpcb *inp = tlsp->inp;
258
259 CTR3(KTR_CXGBE, "%s: atid %d status %d", __func__, atid, status);
260 free_atid(sc, atid);
261 if (status == 0)
262 tlsp->tid = GET_TID(cpl);
263
264 INP_WLOCK(inp);
265 tlsp->open_pending = false;
266 wakeup(tlsp);
267 INP_WUNLOCK(inp);
268 return (0);
269}
270
271/* SET_TCB_FIELD sent as a ULP command looks like this */
272#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
273 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
274
275_Static_assert((LEN__SET_TCB_FIELD_ULP + sizeof(struct ulptx_idata)) % 16 == 0,
276 "CPL_SET_TCB_FIELD ULP command not 16-byte aligned");
277
278static void
279write_set_tcb_field_ulp(struct tlspcb *tlsp, void *dst, struct sge_txq *txq,
280 uint16_t word, uint64_t mask, uint64_t val)
281{
282 struct ulp_txpkt *txpkt;
283 struct ulptx_idata *idata;
284 struct cpl_set_tcb_field_core *cpl;
285
286 /* ULP_TXPKT */
287 txpkt = dst;
288 txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
290 V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
292 txpkt->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
293
294 /* ULPTX_IDATA sub-command */
295 idata = (struct ulptx_idata *)(txpkt + 1);
296 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
297 idata->len = htobe32(sizeof(*cpl));
298
299 /* CPL_SET_TCB_FIELD */
300 cpl = (struct cpl_set_tcb_field_core *)(idata + 1);
301 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tlsp->tid));
302 cpl->reply_ctrl = htobe16(F_NO_REPLY);
303 cpl->word_cookie = htobe16(V_WORD(word));
304 cpl->mask = htobe64(mask);
305 cpl->val = htobe64(val);
306
307 /* ULPTX_NOOP */
308 idata = (struct ulptx_idata *)(cpl + 1);
309 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
310 idata->len = htobe32(0);
311}
312
313static int
314ktls_set_tcb_fields(struct tlspcb *tlsp, struct tcpcb *tp, struct sge_txq *txq)
315{
316 struct fw_ulptx_wr *wr;
317 struct mbuf *m;
318 char *dst;
319 void *items[1];
320 int error, len;
321
322 len = sizeof(*wr) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
323 if (tp->t_flags & TF_REQ_TSTMP)
324 len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
325 m = alloc_wr_mbuf(len, M_NOWAIT);
326 if (m == NULL) {
327 CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
328 tlsp->tid);
329 return (ENOMEM);
330 }
331 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
332 m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
333
334 /* FW_ULPTX_WR */
335 wr = mtod(m, void *);
336 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
337 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
338 V_FW_WR_LEN16(len / 16));
339 wr->cookie = 0;
340 dst = (char *)(wr + 1);
341
342 /* Clear TF_NON_OFFLOAD and set TF_CORE_BYPASS */
343 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_T_FLAGS,
346 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
347
348 /* Clear the SND_UNA_RAW, SND_NXT_RAW, and SND_MAX_RAW offsets. */
349 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_UNA_RAW,
353 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
354
355 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_SND_MAX_RAW,
357 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
358
359 if (tp->t_flags & TF_REQ_TSTMP) {
360 write_set_tcb_field_ulp(tlsp, dst, txq, W_TCB_TIMESTAMP_OFFSET,
362 V_TCB_TIMESTAMP_OFFSET(tp->ts_offset >> 28));
363 dst += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
364 }
365
366 KASSERT(dst - (char *)wr == len, ("%s: length mismatch", __func__));
367
368 items[0] = m;
369 error = mp_ring_enqueue(txq->r, items, 1, 1);
370 if (error)
371 m_free(m);
372 return (error);
373}
374
375int
376cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
377 struct m_snd_tag **pt)
378{
379 const struct ktls_session *tls;
380 struct tlspcb *tlsp;
381 struct adapter *sc;
382 struct vi_info *vi;
383 struct inpcb *inp;
384 struct tcpcb *tp;
385 struct sge_txq *txq;
386 int atid, error, explicit_iv_size, keyid, mac_first;
387
388 tls = params->tls.tls;
389
390 /* Only TLS 1.1 and TLS 1.2 are currently supported. */
391 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
392 tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
393 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
394 return (EPROTONOSUPPORT);
395
396 /* Sanity check values in *tls. */
397 switch (tls->params.cipher_algorithm) {
398 case CRYPTO_AES_CBC:
399 /* XXX: Explicitly ignore any provided IV. */
400 switch (tls->params.cipher_key_len) {
401 case 128 / 8:
402 case 192 / 8:
403 case 256 / 8:
404 break;
405 default:
406 return (EINVAL);
407 }
408 switch (tls->params.auth_algorithm) {
409 case CRYPTO_SHA1_HMAC:
410 case CRYPTO_SHA2_256_HMAC:
411 case CRYPTO_SHA2_384_HMAC:
412 break;
413 default:
414 return (EPROTONOSUPPORT);
415 }
416 explicit_iv_size = AES_BLOCK_LEN;
417 mac_first = 1;
418 break;
419 case CRYPTO_AES_NIST_GCM_16:
420 if (tls->params.iv_len != SALT_SIZE)
421 return (EINVAL);
422 switch (tls->params.cipher_key_len) {
423 case 128 / 8:
424 case 192 / 8:
425 case 256 / 8:
426 break;
427 default:
428 return (EINVAL);
429 }
430 explicit_iv_size = 8;
431 mac_first = 0;
432 break;
433 default:
434 return (EPROTONOSUPPORT);
435 }
436
437 vi = ifp->if_softc;
438 sc = vi->adapter;
439
440 tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
441
442 atid = alloc_atid(sc, tlsp);
443 if (atid < 0) {
444 error = ENOMEM;
445 goto failed;
446 }
447
448 if (sc->tlst.inline_keys)
449 keyid = -1;
450 else
451 keyid = t4_alloc_tls_keyid(sc);
452 if (keyid < 0) {
453 CTR2(KTR_CXGBE, "%s: atid %d using immediate key ctx", __func__,
454 atid);
455 tlsp->inline_key = true;
456 } else {
457 tlsp->tx_key_addr = keyid;
458 CTR3(KTR_CXGBE, "%s: atid %d allocated TX key addr %#x",
459 __func__,
460 atid, tlsp->tx_key_addr);
461 }
462
463 inp = params->tls.inp;
464 INP_RLOCK(inp);
465 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
466 INP_RUNLOCK(inp);
467 error = ECONNRESET;
468 goto failed;
469 }
470 tlsp->inp = inp;
471
472 tp = inp->inp_ppcb;
473 if (tp->t_flags & TF_REQ_TSTMP) {
474 tlsp->using_timestamps = true;
475 if ((tp->ts_offset & 0xfffffff) != 0) {
476 INP_RUNLOCK(inp);
477 error = EINVAL;
478 goto failed;
479 }
480 } else
481 tlsp->using_timestamps = false;
482
483 error = send_ktls_act_open_req(sc, vi, inp, tlsp, atid);
484 if (error) {
485 INP_RUNLOCK(inp);
486 goto failed;
487 }
488
489 /* Wait for reply to active open. */
490 CTR2(KTR_CXGBE, "%s: atid %d sent CPL_ACT_OPEN_REQ", __func__,
491 atid);
492 while (tlsp->open_pending) {
493 /*
494 * XXX: PCATCH? We would then have to discard the PCB
495 * when the completion CPL arrived.
496 */
497 error = rw_sleep(tlsp, &inp->inp_lock, 0, "t6tlsop", 0);
498 }
499
500 atid = -1;
501 if (tlsp->tid < 0) {
502 INP_RUNLOCK(inp);
503 error = ENOMEM;
504 goto failed;
505 }
506
507 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
508 INP_RUNLOCK(inp);
509 error = ECONNRESET;
510 goto failed;
511 }
512
513 txq = &sc->sge.txq[vi->first_txq];
514 if (inp->inp_flowtype != M_HASHTYPE_NONE)
515 txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
516 vi->rsrv_noflowq);
517 tlsp->txq = txq;
518
519 error = ktls_set_tcb_fields(tlsp, tp, txq);
520 INP_RUNLOCK(inp);
521 if (error)
522 goto failed;
523
524 error = ktls_setup_keys(tlsp, tls, txq);
525 if (error)
526 goto failed;
527
528 tlsp->enc_mode = t4_tls_cipher_mode(tls);
529 tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
530
531 /* The SCMD fields used when encrypting a full TLS record. */
532 tlsp->scmd0.seqno_numivs = htobe32(V_SCMD_SEQ_NO_CTRL(3) |
533 V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
535 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
536 V_SCMD_CIPH_MODE(tlsp->enc_mode) |
537 V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
538 V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
539 V_SCMD_IV_SIZE(explicit_iv_size / 2) | V_SCMD_NUM_IVS(1));
540
541 tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
543 if (tlsp->inline_key)
544 tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
545 tlsp->scmd0.ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen);
546
547 /*
548 * The SCMD fields used when encrypting a partial TLS record
549 * (no trailer and possibly a truncated payload).
550 */
551 tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
554 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
557 V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
558 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
559 tlsp->scmd0_short.seqno_numivs |=
561 else
562 tlsp->scmd0_short.seqno_numivs |=
563 V_SCMD_CIPH_MODE(tlsp->enc_mode);
564 tlsp->scmd0_short.seqno_numivs =
565 htobe32(tlsp->scmd0_short.seqno_numivs);
566
567 tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
570 if (tlsp->inline_key)
571 tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
572
573 TXQ_LOCK(txq);
574 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
575 txq->kern_tls_gcm++;
576 else
577 txq->kern_tls_cbc++;
578 TXQ_UNLOCK(txq);
579 *pt = &tlsp->com;
580 return (0);
581
582failed:
583 if (atid >= 0)
584 free_atid(sc, atid);
585 m_snd_tag_rele(&tlsp->com);
586 return (error);
587}
588
589static int
590ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
591 struct sge_txq *txq)
592{
593 struct tls_key_req *kwr;
594 struct tls_keyctx *kctx;
595 void *items[1];
596 struct mbuf *m;
597 int error;
598
599 /*
600 * Store the salt and keys in the key context. For
601 * connections with an inline key, this key context is passed
602 * as immediate data in each work request. For connections
603 * storing the key in DDR, a work request is used to store a
604 * copy of the key context in DDR.
605 */
606 t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
607 if (tlsp->inline_key)
608 return (0);
609
610 /* Populate key work request. */
611 m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
612 if (m == NULL) {
613 CTR2(KTR_CXGBE, "%s: tid %d failed to alloc WR mbuf", __func__,
614 tlsp->tid);
615 return (ENOMEM);
616 }
617 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
618 m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
619 kwr = mtod(m, void *);
620 memset(kwr, 0, TLS_KEY_WR_SZ);
621
622 t4_write_tlskey_wr(tls, KTLS_TX, tlsp->tid, 0, tlsp->tx_key_addr, kwr);
623 kctx = (struct tls_keyctx *)(kwr + 1);
624 memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
625
626 /*
627 * Place the key work request in the transmit queue. It
628 * should be sent to the NIC before any TLS packets using this
629 * session.
630 */
631 items[0] = m;
632 error = mp_ring_enqueue(txq->r, items, 1, 1);
633 if (error)
634 m_free(m);
635 else
636 CTR2(KTR_CXGBE, "%s: tid %d sent key WR", __func__, tlsp->tid);
637 return (error);
638}
639
640static u_int
641ktls_base_wr_size(struct tlspcb *tlsp)
642{
643 u_int wr_len;
644
645 wr_len = sizeof(struct fw_ulptx_wr); // 16
646 wr_len += sizeof(struct ulp_txpkt); // 8
647 wr_len += sizeof(struct ulptx_idata); // 8
648 wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
649 if (tlsp->inline_key)
650 wr_len += tlsp->tx_key_info_size;
651 else {
652 wr_len += sizeof(struct ulptx_sc_memrd);// 8
653 wr_len += sizeof(struct ulptx_idata); // 8
654 }
655 wr_len += sizeof(struct cpl_tx_data); // 16
656 return (wr_len);
657}
658
659/* How many bytes of TCP payload to send for a given TLS record. */
660static u_int
661ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
662{
663 struct tls_record_layer *hdr;
664 u_int plen, mlen;
665
666 M_ASSERTEXTPG(m_tls);
667 hdr = (void *)m_tls->m_epg_hdr;
668 plen = ntohs(hdr->tls_length);
669
670 /*
671 * What range of the TLS record is the mbuf requesting to be
672 * sent.
673 */
674 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
675
676 /* Always send complete records. */
677 if (mlen == TLS_HEADER_LENGTH + plen)
678 return (mlen);
679
680 /*
681 * If the host stack has asked to send part of the trailer,
682 * trim the length to avoid sending any of the trailer. There
683 * is no way to send a partial trailer currently.
684 */
685 if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen)
686 mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen;
687
688
689 /*
690 * For AES-CBC adjust the ciphertext length for the block
691 * size.
692 */
693 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
694 mlen > TLS_HEADER_LENGTH) {
695 mlen = TLS_HEADER_LENGTH + rounddown(mlen - TLS_HEADER_LENGTH,
696 AES_BLOCK_LEN);
697 }
698
699#ifdef VERBOSE_TRACES
700 CTR4(KTR_CXGBE, "%s: tid %d short TLS record (%u vs %u)",
701 __func__, tlsp->tid, mlen, TLS_HEADER_LENGTH + plen);
702#endif
703 return (mlen);
704}
705
706/*
707 * For a "short" TLS record, determine the offset into the TLS record
708 * payload to send. This offset does not include the TLS header, but
709 * a non-zero offset implies that a header will not be sent.
710 */
711static u_int
712ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
713{
714 struct tls_record_layer *hdr;
715 u_int offset, plen;
716#ifdef INVARIANTS
717 u_int mlen;
718#endif
719
720 M_ASSERTEXTPG(m_tls);
721 hdr = (void *)m_tls->m_epg_hdr;
722 plen = ntohs(hdr->tls_length);
723#ifdef INVARIANTS
724 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
725 MPASS(mlen < TLS_HEADER_LENGTH + plen);
726#endif
727 if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
728 return (0);
729 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
730 /*
731 * Always send something. This function is only called
732 * if we aren't sending the tag at all, but if the
733 * request starts in the tag then we are in an odd
734 * state where would effectively send nothing. Cap
735 * the offset at the last byte of the record payload
736 * to send the last cipher block.
737 */
738 offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
739 (plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1);
740 return (rounddown(offset, AES_BLOCK_LEN));
741 }
742 return (0);
743}
744
745static u_int
746ktls_sgl_size(u_int nsegs)
747{
748 u_int wr_len;
749
750 /* First segment is part of ulptx_sgl. */
751 nsegs--;
752
753 wr_len = sizeof(struct ulptx_sgl);
754 wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
755 return (wr_len);
756}
757
758static int
759ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
760 int *nsegsp)
761{
762 struct tls_record_layer *hdr;
763 u_int imm_len, offset, plen, wr_len, tlen;
764
765 M_ASSERTEXTPG(m_tls);
766
767 /*
768 * Determine the size of the TLS record payload to send
769 * excluding header and trailer.
770 */
771 tlen = ktls_tcp_payload_length(tlsp, m_tls);
772 if (tlen <= m_tls->m_epg_hdrlen) {
773 /*
774 * For requests that only want to send the TLS header,
775 * send a tunnelled packet as immediate data.
776 */
777 wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
778 sizeof(struct cpl_tx_pkt_core) +
779 roundup2(m->m_len + m_tls->m_len, 16);
780 if (wr_len > SGE_MAX_WR_LEN) {
781 CTR3(KTR_CXGBE,
782 "%s: tid %d TLS header-only packet too long (len %d)",
783 __func__, tlsp->tid, m->m_len + m_tls->m_len);
784 }
785
786 /* This should always be the last TLS record in a chain. */
787 MPASS(m_tls->m_next == NULL);
788
789 /*
790 * XXX: Set a bogus 'nsegs' value to avoid tripping an
791 * assertion in mbuf_nsegs() in t4_sge.c.
792 */
793 *nsegsp = 1;
794 return (wr_len);
795 }
796
797 hdr = (void *)m_tls->m_epg_hdr;
798 plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
799 if (tlen < plen) {
800 plen = tlen;
801 offset = ktls_payload_offset(tlsp, m_tls);
802 } else
803 offset = 0;
804
805 /* Calculate the size of the work request. */
806 wr_len = ktls_base_wr_size(tlsp);
807
808 /*
809 * Full records and short records with an offset of 0 include
810 * the TLS header as immediate data. Short records include a
811 * raw AES IV as immediate data.
812 */
813 imm_len = 0;
814 if (offset == 0)
815 imm_len += m_tls->m_epg_hdrlen;
816 if (plen == tlen)
817 imm_len += AES_BLOCK_LEN;
818 wr_len += roundup2(imm_len, 16);
819
820 /* TLS record payload via DSGL. */
821 *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
822 plen - (m_tls->m_epg_hdrlen + offset));
823 wr_len += ktls_sgl_size(*nsegsp);
824
825 wr_len = roundup2(wr_len, 16);
826 return (wr_len);
827}
828
829/*
830 * See if we have any TCP options requiring a dedicated options-only
831 * packet.
832 */
833static int
834ktls_has_tcp_options(struct tcphdr *tcp)
835{
836 u_char *cp;
837 int cnt, opt, optlen;
838
839 cp = (u_char *)(tcp + 1);
840 cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
841 for (; cnt > 0; cnt -= optlen, cp += optlen) {
842 opt = cp[0];
843 if (opt == TCPOPT_EOL)
844 break;
845 if (opt == TCPOPT_NOP)
846 optlen = 1;
847 else {
848 if (cnt < 2)
849 break;
850 optlen = cp[1];
851 if (optlen < 2 || optlen > cnt)
852 break;
853 }
854 switch (opt) {
855 case TCPOPT_NOP:
856 case TCPOPT_TIMESTAMP:
857 break;
858 default:
859 return (1);
860 }
861 }
862 return (0);
863}
864
865/*
866 * Find the TCP timestamp option.
867 */
868static void *
869ktls_find_tcp_timestamps(struct tcphdr *tcp)
870{
871 u_char *cp;
872 int cnt, opt, optlen;
873
874 cp = (u_char *)(tcp + 1);
875 cnt = tcp->th_off * 4 - sizeof(struct tcphdr);
876 for (; cnt > 0; cnt -= optlen, cp += optlen) {
877 opt = cp[0];
878 if (opt == TCPOPT_EOL)
879 break;
880 if (opt == TCPOPT_NOP)
881 optlen = 1;
882 else {
883 if (cnt < 2)
884 break;
885 optlen = cp[1];
886 if (optlen < 2 || optlen > cnt)
887 break;
888 }
889 if (opt == TCPOPT_TIMESTAMP && optlen == TCPOLEN_TIMESTAMP)
890 return (cp + 2);
891 }
892 return (NULL);
893}
894
895int
896t6_ktls_parse_pkt(struct mbuf *m, int *nsegsp, int *len16p)
897{
898 struct tlspcb *tlsp;
899 struct ether_header *eh;
900 struct ip *ip;
901 struct ip6_hdr *ip6;
902 struct tcphdr *tcp;
903 struct mbuf *m_tls;
904 int nsegs;
905 u_int wr_len, tot_len;
906
907 /*
908 * Locate headers in initial mbuf.
909 *
910 * XXX: This assumes all of the headers are in the initial mbuf.
911 * Could perhaps use m_advance() like parse_pkt() if that turns
912 * out to not be true.
913 */
914 M_ASSERTPKTHDR(m);
915 MPASS(m->m_pkthdr.snd_tag != NULL);
916 tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
917
918 if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
919 CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short", __func__,
920 tlsp->tid);
921 return (EINVAL);
922 }
923 eh = mtod(m, struct ether_header *);
924 if (ntohs(eh->ether_type) != ETHERTYPE_IP &&
925 ntohs(eh->ether_type) != ETHERTYPE_IPV6) {
926 CTR2(KTR_CXGBE, "%s: tid %d mbuf not ETHERTYPE_IP{,V6}",
927 __func__, tlsp->tid);
928 return (EINVAL);
929 }
930 m->m_pkthdr.l2hlen = sizeof(*eh);
931
932 /* XXX: Reject unsupported IP options? */
933 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
934 ip = (struct ip *)(eh + 1);
935 if (ip->ip_p != IPPROTO_TCP) {
936 CTR2(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP",
937 __func__, tlsp->tid);
938 return (EINVAL);
939 }
940 m->m_pkthdr.l3hlen = ip->ip_hl * 4;
941 } else {
942 ip6 = (struct ip6_hdr *)(eh + 1);
943 if (ip6->ip6_nxt != IPPROTO_TCP) {
944 CTR3(KTR_CXGBE, "%s: tid %d mbuf not IPPROTO_TCP (%u)",
945 __func__, tlsp->tid, ip6->ip6_nxt);
946 return (EINVAL);
947 }
948 m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
949 }
950 if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
951 sizeof(*tcp)) {
952 CTR2(KTR_CXGBE, "%s: tid %d header mbuf too short (2)",
953 __func__, tlsp->tid);
954 return (EINVAL);
955 }
956 tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
957 m->m_pkthdr.l4hlen = tcp->th_off * 4;
958
959 /* Bail if there is TCP payload before the TLS record. */
960 if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
961 m->m_pkthdr.l4hlen) {
962 CTR6(KTR_CXGBE,
963 "%s: tid %d header mbuf bad length (%d + %d + %d != %d)",
964 __func__, tlsp->tid, m->m_pkthdr.l2hlen,
965 m->m_pkthdr.l3hlen, m->m_pkthdr.l4hlen, m->m_len);
966 return (EINVAL);
967 }
968
969 /* Assume all headers are in 'm' for now. */
970 MPASS(m->m_next != NULL);
971 MPASS(m->m_next->m_flags & M_EXTPG);
972
973 tot_len = 0;
974
975 /*
976 * Each of the remaining mbufs in the chain should reference a
977 * TLS record.
978 */
979 *nsegsp = 0;
980 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
981 MPASS(m_tls->m_flags & M_EXTPG);
982
983 wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
984#ifdef VERBOSE_TRACES
985 CTR4(KTR_CXGBE, "%s: tid %d wr_len %d nsegs %d", __func__,
986 tlsp->tid, wr_len, nsegs);
987#endif
988 if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
989 return (EFBIG);
990 tot_len += roundup2(wr_len, EQ_ESIZE);
991
992 /*
993 * Store 'nsegs' for the first TLS record in the
994 * header mbuf's metadata.
995 */
996 if (*nsegsp == 0)
997 *nsegsp = nsegs;
998 }
999
1000 MPASS(tot_len != 0);
1001
1002 /*
1003 * See if we have any TCP options or a FIN requiring a
1004 * dedicated packet.
1005 */
1006 if ((tcp->th_flags & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
1007 wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
1008 sizeof(struct cpl_tx_pkt_core) + roundup2(m->m_len, 16);
1009 if (wr_len > SGE_MAX_WR_LEN) {
1010 CTR3(KTR_CXGBE,
1011 "%s: tid %d options-only packet too long (len %d)",
1012 __func__, tlsp->tid, m->m_len);
1013 return (EINVAL);
1014 }
1015 tot_len += roundup2(wr_len, EQ_ESIZE);
1016 }
1017
1018 /* Include room for a TP work request to program an L2T entry. */
1019 tot_len += EQ_ESIZE;
1020
1021 /*
1022 * Include room for a ULPTX work request including up to 5
1023 * CPL_SET_TCB_FIELD commands before the first TLS work
1024 * request.
1025 */
1026 wr_len = sizeof(struct fw_ulptx_wr) +
1027 5 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1028
1029 /*
1030 * If timestamps are present, reserve 1 more command for
1031 * setting the echoed timestamp.
1032 */
1033 if (tlsp->using_timestamps)
1034 wr_len += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1035
1036 tot_len += roundup2(wr_len, EQ_ESIZE);
1037
1038 *len16p = tot_len / 16;
1039#ifdef VERBOSE_TRACES
1040 CTR4(KTR_CXGBE, "%s: tid %d len16 %d nsegs %d", __func__,
1041 tlsp->tid, *len16p, *nsegsp);
1042#endif
1043 return (0);
1044}
1045
1046/*
1047 * If the SGL ends on an address that is not 16 byte aligned, this function will
1048 * add a 0 filled flit at the end.
1049 */
1050static void
1051write_gl_to_buf(struct sglist *gl, caddr_t to)
1052{
1053 struct sglist_seg *seg;
1054 __be64 *flitp;
1055 struct ulptx_sgl *usgl;
1056 int i, nflits, nsegs;
1057
1058 KASSERT(((uintptr_t)to & 0xf) == 0,
1059 ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
1060
1061 nsegs = gl->sg_nseg;
1062 MPASS(nsegs > 0);
1063
1064 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
1065 flitp = (__be64 *)to;
1066 seg = &gl->sg_segs[0];
1067 usgl = (void *)flitp;
1068
1069 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1070 V_ULPTX_NSGE(nsegs));
1071 usgl->len0 = htobe32(seg->ss_len);
1072 usgl->addr0 = htobe64(seg->ss_paddr);
1073 seg++;
1074
1075 for (i = 0; i < nsegs - 1; i++, seg++) {
1076 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
1077 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
1078 }
1079 if (i & 1)
1080 usgl->sge[i / 2].len[1] = htobe32(0);
1081 flitp += nflits;
1082
1083 if (nflits & 1) {
1084 MPASS(((uintptr_t)flitp) & 0xf);
1085 *flitp++ = 0;
1086 }
1087
1088 MPASS((((uintptr_t)flitp) & 0xf) == 0);
1089}
1090
1091static inline void
1092copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
1093{
1094
1095 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
1096 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
1097
1098 if (__predict_true((uintptr_t)(*to) + len <=
1099 (uintptr_t)&eq->desc[eq->sidx])) {
1100 bcopy(from, *to, len);
1101 (*to) += len;
1102 if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
1103 (*to) = (caddr_t)eq->desc;
1104 } else {
1105 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
1106
1107 bcopy(from, *to, portion);
1108 from += portion;
1109 portion = len - portion; /* remaining */
1110 bcopy(from, (void *)eq->desc, portion);
1111 (*to) = (caddr_t)eq->desc + portion;
1112 }
1113}
1114
1115static int
1116ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m,
1117 u_int available, u_int pidx)
1118{
1119 struct tx_sdesc *txsd;
1120 struct fw_eth_tx_pkt_wr *wr;
1121 struct cpl_tx_pkt_core *cpl;
1122 uint32_t ctrl;
1123 uint64_t ctrl1;
1124 int len16, ndesc, pktlen;
1125 struct ether_header *eh;
1126 struct ip *ip, newip;
1127 struct ip6_hdr *ip6, newip6;
1128 struct tcphdr *tcp, newtcp;
1129 caddr_t out;
1130
1132 M_ASSERTPKTHDR(m);
1133
1134 wr = dst;
1135 pktlen = m->m_len;
1136 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1137 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1138 ndesc = tx_len16_to_desc(len16);
1139 MPASS(ndesc <= available);
1140
1141 /* Firmware work request header */
1142 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1144
1145 ctrl = V_FW_WR_LEN16(len16);
1146 wr->equiq_to_len16 = htobe32(ctrl);
1147 wr->r3 = 0;
1148
1149 cpl = (void *)(wr + 1);
1150
1151 /* CPL header */
1152 cpl->ctrl0 = txq->cpl_ctrl0;
1153 cpl->pack = 0;
1154 cpl->len = htobe16(pktlen);
1155
1156 out = (void *)(cpl + 1);
1157
1158 /* Copy over Ethernet header. */
1159 eh = mtod(m, struct ether_header *);
1160 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1161
1162 /* Fixup length in IP header and copy out. */
1163 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1164 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1165 newip = *ip;
1166 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1167 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1168 if (m->m_pkthdr.l3hlen > sizeof(*ip))
1169 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1170 m->m_pkthdr.l3hlen - sizeof(*ip));
1172 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1173 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1174 } else {
1175 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1176 newip6 = *ip6;
1177 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1178 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1179 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1181 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1182 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1183 }
1184 cpl->ctrl1 = htobe64(ctrl1);
1185 txq->txcsum++;
1186
1187 /* Clear PUSH and FIN in the TCP header if present. */
1188 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1189 newtcp = *tcp;
1190 newtcp.th_flags &= ~(TH_PUSH | TH_FIN);
1191 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1192
1193 /* Copy rest of packet. */
1194 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, pktlen -
1195 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1196 txq->imm_wrs++;
1197
1198 txq->txpkt_wrs++;
1199
1200 txq->kern_tls_options++;
1201
1202 txsd = &txq->sdesc[pidx];
1203 txsd->m = NULL;
1204 txsd->desc_used = ndesc;
1205
1206 return (ndesc);
1207}
1208
1209static int
1210ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
1211 struct mbuf *m_tls, u_int available, tcp_seq tcp_seqno, u_int pidx)
1212{
1213 struct tx_sdesc *txsd;
1214 struct fw_eth_tx_pkt_wr *wr;
1215 struct cpl_tx_pkt_core *cpl;
1216 uint32_t ctrl;
1217 uint64_t ctrl1;
1218 int len16, ndesc, pktlen;
1219 struct ether_header *eh;
1220 struct ip *ip, newip;
1221 struct ip6_hdr *ip6, newip6;
1222 struct tcphdr *tcp, newtcp;
1223 caddr_t out;
1224
1226 M_ASSERTPKTHDR(m);
1227
1228 /* Locate the template TLS header. */
1229 M_ASSERTEXTPG(m_tls);
1230
1231 /* This should always be the last TLS record in a chain. */
1232 MPASS(m_tls->m_next == NULL);
1233
1234 wr = dst;
1235 pktlen = m->m_len + m_tls->m_len;
1236 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1237 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1238 ndesc = tx_len16_to_desc(len16);
1239 MPASS(ndesc <= available);
1240
1241 /* Firmware work request header */
1242 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1244
1245 ctrl = V_FW_WR_LEN16(len16);
1246 wr->equiq_to_len16 = htobe32(ctrl);
1247 wr->r3 = 0;
1248
1249 cpl = (void *)(wr + 1);
1250
1251 /* CPL header */
1252 cpl->ctrl0 = txq->cpl_ctrl0;
1253 cpl->pack = 0;
1254 cpl->len = htobe16(pktlen);
1255
1256 out = (void *)(cpl + 1);
1257
1258 /* Copy over Ethernet header. */
1259 eh = mtod(m, struct ether_header *);
1260 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1261
1262 /* Fixup length in IP header and copy out. */
1263 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1264 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1265 newip = *ip;
1266 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1267 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1268 if (m->m_pkthdr.l3hlen > sizeof(*ip))
1269 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1270 m->m_pkthdr.l3hlen - sizeof(*ip));
1272 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1273 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1274 } else {
1275 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1276 newip6 = *ip6;
1277 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1278 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1279 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1281 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1282 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1283 }
1284 cpl->ctrl1 = htobe64(ctrl1);
1285 txq->txcsum++;
1286
1287 /* Set sequence number in TCP header. */
1288 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1289 newtcp = *tcp;
1290 newtcp.th_seq = htonl(tcp_seqno + mtod(m_tls, vm_offset_t));
1291 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1292
1293 /* Copy rest of TCP header. */
1294 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1295 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1296
1297 /* Copy the subset of the TLS header requested. */
1298 copy_to_txd(&txq->eq, (char *)m_tls->m_epg_hdr +
1299 mtod(m_tls, vm_offset_t), &out, m_tls->m_len);
1300 txq->imm_wrs++;
1301
1302 txq->txpkt_wrs++;
1303
1304 txq->kern_tls_header++;
1305
1306 txsd = &txq->sdesc[pidx];
1307 txsd->m = m;
1308 txsd->desc_used = ndesc;
1309
1310 return (ndesc);
1311}
1312
1313_Static_assert(sizeof(struct cpl_set_tcb_field) <= EQ_ESIZE,
1314 "CPL_SET_TCB_FIELD must be smaller than a single TX descriptor");
1315_Static_assert(W_TCB_SND_UNA_RAW == W_TCB_SND_NXT_RAW,
1316 "SND_NXT_RAW and SND_UNA_RAW are in different words");
1317
1318static int
1319ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
1320 void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
1321 u_int nsegs, u_int available, tcp_seq tcp_seqno, uint32_t *tsopt,
1322 u_int pidx, bool set_l2t_idx)
1323{
1324 struct sge_eq *eq = &txq->eq;
1325 struct tx_sdesc *txsd;
1326 struct fw_ulptx_wr *wr;
1327 struct ulp_txpkt *txpkt;
1328 struct ulptx_sc_memrd *memrd;
1329 struct ulptx_idata *idata;
1330 struct cpl_tx_sec_pdu *sec_pdu;
1331 struct cpl_tx_data *tx_data;
1332 struct tls_record_layer *hdr;
1333 char *iv, *out;
1334 u_int aad_start, aad_stop;
1335 u_int auth_start, auth_stop, auth_insert;
1336 u_int cipher_start, cipher_stop, iv_offset;
1337 u_int imm_len, mss, ndesc, offset, plen, tlen, twr_len, wr_len;
1338 u_int fields, tx_max_offset, tx_max;
1339 bool first_wr, last_wr, using_scratch;
1340
1341 ndesc = 0;
1342 MPASS(tlsp->txq == txq);
1343
1344 first_wr = (tlsp->prev_seq == 0 && tlsp->prev_ack == 0 &&
1345 tlsp->prev_win == 0);
1346
1347 /*
1348 * Use the per-txq scratch pad if near the end of the ring to
1349 * simplify handling of wrap-around. This uses a simple but
1350 * not quite perfect test of using the scratch buffer if we
1351 * can't fit a maximal work request in without wrapping.
1352 */
1353 using_scratch = (eq->sidx - pidx < SGE_MAX_WR_LEN / EQ_ESIZE);
1354
1355 /* Locate the TLS header. */
1356 M_ASSERTEXTPG(m_tls);
1357 hdr = (void *)m_tls->m_epg_hdr;
1358 plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
1359
1360 /* Determine how much of the TLS record to send. */
1361 tlen = ktls_tcp_payload_length(tlsp, m_tls);
1362 if (tlen <= m_tls->m_epg_hdrlen) {
1363 /*
1364 * For requests that only want to send the TLS header,
1365 * send a tunnelled packet as immediate data.
1366 */
1367#ifdef VERBOSE_TRACES
1368 CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
1369 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
1370#endif
1371 return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
1372 tcp_seqno, pidx));
1373 }
1374 if (tlen < plen) {
1375 plen = tlen;
1376 offset = ktls_payload_offset(tlsp, m_tls);
1377#ifdef VERBOSE_TRACES
1378 CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
1379 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
1380#endif
1381 if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
1382 txq->kern_tls_fin_short++;
1383#ifdef INVARIANTS
1384 panic("%s: FIN on short TLS record", __func__);
1385#endif
1386 }
1387 } else
1388 offset = 0;
1389
1390 /*
1391 * This is the last work request for a given TLS mbuf chain if
1392 * it is the last mbuf in the chain and FIN is not set. If
1393 * FIN is set, then ktls_write_tcp_fin() will write out the
1394 * last work request.
1395 */
1396 last_wr = m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) == 0;
1397
1398 /*
1399 * The host stack may ask us to not send part of the start of
1400 * a TLS record. (For example, the stack might have
1401 * previously sent a "short" TLS record and might later send
1402 * down an mbuf that requests to send the remainder of the TLS
1403 * record.) The crypto engine must process a TLS record from
1404 * the beginning if computing a GCM tag or HMAC, so we always
1405 * send the TLS record from the beginning as input to the
1406 * crypto engine and via CPL_TX_DATA to TP. However, TP will
1407 * drop individual packets after they have been chopped up
1408 * into MSS-sized chunks if the entire sequence range of those
1409 * packets is less than SND_UNA. SND_UNA is computed as
1410 * TX_MAX - SND_UNA_RAW. Thus, use the offset stored in
1411 * m_data to set TX_MAX to the first byte in the TCP sequence
1412 * space the host actually wants us to send and set
1413 * SND_UNA_RAW to 0.
1414 *
1415 * If the host sends us back to back requests that span the
1416 * trailer of a single TLS record (first request ends "in" the
1417 * trailer and second request starts at the next byte but
1418 * still "in" the trailer), the initial bytes of the trailer
1419 * that the first request drops will not be retransmitted. If
1420 * the host uses the same requests when retransmitting the
1421 * connection will hang. To handle this, always transmit the
1422 * full trailer for a request that begins "in" the trailer
1423 * (the second request in the example above). This should
1424 * also help to avoid retransmits for the common case.
1425 *
1426 * A similar condition exists when using CBC for back to back
1427 * requests that span a single AES block. The first request
1428 * will be truncated to end at the end of the previous AES
1429 * block. To handle this, always begin transmission at the
1430 * start of the current AES block.
1431 */
1432 tx_max_offset = mtod(m_tls, vm_offset_t);
1433 if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1434 m_tls->m_epg_trllen) {
1435 /* Always send the full trailer. */
1436 tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
1437 m_tls->m_epg_trllen;
1438 }
1439 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
1440 tx_max_offset > TLS_HEADER_LENGTH) {
1441 /* Always send all of the first AES block. */
1442 tx_max_offset = TLS_HEADER_LENGTH +
1443 rounddown(tx_max_offset - TLS_HEADER_LENGTH,
1444 AES_BLOCK_LEN);
1445 }
1446 tx_max = tcp_seqno + tx_max_offset;
1447
1448 /*
1449 * Update TCB fields. Reserve space for the FW_ULPTX_WR header
1450 * but don't populate it until we know how many field updates
1451 * are required.
1452 */
1453 if (using_scratch)
1454 wr = (void *)txq->ss;
1455 else
1456 wr = dst;
1457 out = (void *)(wr + 1);
1458 fields = 0;
1459 if (set_l2t_idx) {
1460 KASSERT(nsegs != 0,
1461 ("trying to set L2T_IX for subsequent TLS WR"));
1462#ifdef VERBOSE_TRACES
1463 CTR3(KTR_CXGBE, "%s: tid %d set L2T_IX to %d", __func__,
1464 tlsp->tid, tlsp->l2te->idx);
1465#endif
1466 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_L2T_IX,
1467 V_TCB_L2T_IX(M_TCB_L2T_IX), V_TCB_L2T_IX(tlsp->l2te->idx));
1468 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1469 fields++;
1470 }
1471 if (tsopt != NULL && tlsp->prev_tsecr != ntohl(tsopt[1])) {
1472 KASSERT(nsegs != 0,
1473 ("trying to set T_RTSEQ_RECENT for subsequent TLS WR"));
1474#ifdef VERBOSE_TRACES
1475 CTR2(KTR_CXGBE, "%s: tid %d wrote updated T_RTSEQ_RECENT",
1476 __func__, tlsp->tid);
1477#endif
1478 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_T_RTSEQ_RECENT,
1480 V_TCB_T_RTSEQ_RECENT(ntohl(tsopt[1])));
1481 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1482 fields++;
1483
1484 tlsp->prev_tsecr = ntohl(tsopt[1]);
1485 }
1486
1487 if (first_wr || tlsp->prev_seq != tx_max) {
1488 KASSERT(nsegs != 0,
1489 ("trying to set TX_MAX for subsequent TLS WR"));
1490#ifdef VERBOSE_TRACES
1491 CTR4(KTR_CXGBE,
1492 "%s: tid %d setting TX_MAX to %u (tcp_seqno %u)",
1493 __func__, tlsp->tid, tx_max, tcp_seqno);
1494#endif
1495 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_TX_MAX,
1497 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1498 fields++;
1499 }
1500
1501 /*
1502 * If there is data to drop at the beginning of this TLS
1503 * record or if this is a retransmit,
1504 * reset SND_UNA_RAW to 0 so that SND_UNA == TX_MAX.
1505 */
1506 if (tlsp->prev_seq != tx_max || mtod(m_tls, vm_offset_t) != 0) {
1507 KASSERT(nsegs != 0,
1508 ("trying to clear SND_UNA_RAW for subsequent TLS WR"));
1509#ifdef VERBOSE_TRACES
1510 CTR2(KTR_CXGBE, "%s: tid %d clearing SND_UNA_RAW", __func__,
1511 tlsp->tid);
1512#endif
1513 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_SND_UNA_RAW,
1516 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1517 fields++;
1518 }
1519
1520 /*
1521 * Store the expected sequence number of the next byte after
1522 * this record.
1523 */
1524 tlsp->prev_seq = tcp_seqno + tlen;
1525
1526 if (first_wr || tlsp->prev_ack != ntohl(tcp->th_ack)) {
1527 KASSERT(nsegs != 0,
1528 ("trying to set RCV_NXT for subsequent TLS WR"));
1529 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_NXT,
1531 V_TCB_RCV_NXT(ntohl(tcp->th_ack)));
1532 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1533 fields++;
1534
1535 tlsp->prev_ack = ntohl(tcp->th_ack);
1536 }
1537
1538 if (first_wr || tlsp->prev_win != ntohs(tcp->th_win)) {
1539 KASSERT(nsegs != 0,
1540 ("trying to set RCV_WND for subsequent TLS WR"));
1541 write_set_tcb_field_ulp(tlsp, out, txq, W_TCB_RCV_WND,
1543 V_TCB_RCV_WND(ntohs(tcp->th_win)));
1544 out += roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1545 fields++;
1546
1547 tlsp->prev_win = ntohs(tcp->th_win);
1548 }
1549
1550 /* Recalculate 'nsegs' if cached value is not available. */
1551 if (nsegs == 0)
1552 nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
1553 offset, plen - (m_tls->m_epg_hdrlen + offset));
1554
1555 /* Calculate the size of the TLS work request. */
1556 twr_len = ktls_base_wr_size(tlsp);
1557
1558 imm_len = 0;
1559 if (offset == 0)
1560 imm_len += m_tls->m_epg_hdrlen;
1561 if (plen == tlen)
1562 imm_len += AES_BLOCK_LEN;
1563 twr_len += roundup2(imm_len, 16);
1564 twr_len += ktls_sgl_size(nsegs);
1565
1566 /*
1567 * If any field updates were required, determine if they can
1568 * be included in the TLS work request. If not, use the
1569 * FW_ULPTX_WR work request header at 'wr' as a dedicated work
1570 * request for the field updates and start a new work request
1571 * for the TLS work request afterward.
1572 */
1573 if (fields != 0) {
1574 wr_len = fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1575 if (twr_len + wr_len <= SGE_MAX_WR_LEN &&
1576 tlsp->sc->tlst.combo_wrs) {
1577 wr_len += twr_len;
1578 txpkt = (void *)out;
1579 } else {
1580 wr_len += sizeof(*wr);
1581 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1582 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1583 V_FW_WR_LEN16(wr_len / 16));
1584 wr->cookie = 0;
1585
1586 /*
1587 * If we were using scratch space, copy the
1588 * field updates work request to the ring.
1589 */
1590 if (using_scratch) {
1591 out = dst;
1592 copy_to_txd(eq, txq->ss, &out, wr_len);
1593 }
1594
1595 ndesc = howmany(wr_len, EQ_ESIZE);
1596 MPASS(ndesc <= available);
1597
1598 txq->raw_wrs++;
1599 txsd = &txq->sdesc[pidx];
1600 txsd->m = NULL;
1601 txsd->desc_used = ndesc;
1602 IDXINCR(pidx, ndesc, eq->sidx);
1603 dst = &eq->desc[pidx];
1604
1605 /*
1606 * Determine if we should use scratch space
1607 * for the TLS work request based on the
1608 * available space after advancing pidx for
1609 * the field updates work request.
1610 */
1611 wr_len = twr_len;
1612 using_scratch = (eq->sidx - pidx <
1613 howmany(wr_len, EQ_ESIZE));
1614 if (using_scratch)
1615 wr = (void *)txq->ss;
1616 else
1617 wr = dst;
1618 txpkt = (void *)(wr + 1);
1619 }
1620 } else {
1621 wr_len = twr_len;
1622 txpkt = (void *)out;
1623 }
1624
1625 wr_len = roundup2(wr_len, 16);
1626 MPASS(ndesc + howmany(wr_len, EQ_ESIZE) <= available);
1627
1628 /* FW_ULPTX_WR */
1629 wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
1630 wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
1631 V_FW_WR_LEN16(wr_len / 16));
1632 wr->cookie = 0;
1633
1634 /* ULP_TXPKT */
1635 txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
1637 V_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) | V_ULP_TXPKT_DEST(0) |
1639 txpkt->len = htobe32(howmany(twr_len - sizeof(*wr), 16));
1640
1641 /* ULPTX_IDATA sub-command */
1642 idata = (void *)(txpkt + 1);
1643 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1644 V_ULP_TX_SC_MORE(1));
1645 idata->len = sizeof(struct cpl_tx_sec_pdu);
1646
1647 /*
1648 * The key context, CPL_TX_DATA, and immediate data are part
1649 * of this ULPTX_IDATA when using an inline key. When reading
1650 * the key from memory, the CPL_TX_DATA and immediate data are
1651 * part of a separate ULPTX_IDATA.
1652 */
1653 if (tlsp->inline_key)
1654 idata->len += tlsp->tx_key_info_size +
1655 sizeof(struct cpl_tx_data) + imm_len;
1656 idata->len = htobe32(idata->len);
1657
1658 /* CPL_TX_SEC_PDU */
1659 sec_pdu = (void *)(idata + 1);
1660
1661 /*
1662 * For short records, AAD is counted as header data in SCMD0,
1663 * the IV is next followed by a cipher region for the payload.
1664 */
1665 if (plen == tlen) {
1666 aad_start = 0;
1667 aad_stop = 0;
1668 iv_offset = 1;
1669 auth_start = 0;
1670 auth_stop = 0;
1671 auth_insert = 0;
1672 cipher_start = AES_BLOCK_LEN + 1;
1673 cipher_stop = 0;
1674
1675 sec_pdu->pldlen = htobe32(16 + plen -
1676 (m_tls->m_epg_hdrlen + offset));
1677
1678 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1679 sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
1680 sec_pdu->ivgen_hdrlen = htobe32(
1681 tlsp->scmd0_short.ivgen_hdrlen |
1682 V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0));
1683
1684 txq->kern_tls_short++;
1685 } else {
1686 /*
1687 * AAD is TLS header. IV is after AAD. The cipher region
1688 * starts after the IV. See comments in ccr_authenc() and
1689 * ccr_gmac() in t4_crypto.c regarding cipher and auth
1690 * start/stop values.
1691 */
1692 aad_start = 1;
1693 aad_stop = TLS_HEADER_LENGTH;
1694 iv_offset = TLS_HEADER_LENGTH + 1;
1695 cipher_start = m_tls->m_epg_hdrlen + 1;
1696 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1697 cipher_stop = 0;
1698 auth_start = cipher_start;
1699 auth_stop = 0;
1700 auth_insert = 0;
1701 } else {
1702 cipher_stop = 0;
1703 auth_start = cipher_start;
1704 auth_stop = 0;
1705 auth_insert = 0;
1706 }
1707
1708 sec_pdu->pldlen = htobe32(plen);
1709
1710 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1711 sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
1712 sec_pdu->ivgen_hdrlen = tlsp->scmd0.ivgen_hdrlen;
1713
1714 if (mtod(m_tls, vm_offset_t) == 0)
1715 txq->kern_tls_full++;
1716 else
1717 txq->kern_tls_partial++;
1718 }
1719 sec_pdu->op_ivinsrtofst = htobe32(
1722 V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
1723 sec_pdu->aadstart_cipherstop_hi = htobe32(
1724 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1725 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1726 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1727 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1728 sec_pdu->cipherstop_lo_authinsert = htobe32(
1729 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1730 V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1731 V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1732 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1733
1734 sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
1735
1736 /* Key context */
1737 out = (void *)(sec_pdu + 1);
1738 if (tlsp->inline_key) {
1739 memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
1740 out += tlsp->tx_key_info_size;
1741 } else {
1742 /* ULPTX_SC_MEMRD to read key context. */
1743 memrd = (void *)out;
1744 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
1745 V_ULP_TX_SC_MORE(1) |
1746 V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
1747 memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
1748
1749 /* ULPTX_IDATA for CPL_TX_DATA and TLS header. */
1750 idata = (void *)(memrd + 1);
1751 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1752 V_ULP_TX_SC_MORE(1));
1753 idata->len = htobe32(sizeof(struct cpl_tx_data) + imm_len);
1754
1755 out = (void *)(idata + 1);
1756 }
1757
1758 /* CPL_TX_DATA */
1759 tx_data = (void *)out;
1760 OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tlsp->tid));
1761 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1762 mss = m->m_pkthdr.tso_segsz;
1763 tlsp->prev_mss = mss;
1764 } else if (tlsp->prev_mss != 0)
1765 mss = tlsp->prev_mss;
1766 else
1767 mss = tlsp->vi->ifp->if_mtu -
1768 (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
1769 if (offset == 0) {
1770 tx_data->len = htobe32(V_TX_DATA_MSS(mss) | V_TX_LENGTH(tlen));
1771 tx_data->rsvd = htobe32(tcp_seqno);
1772 } else {
1773 tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
1774 V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
1775 tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
1776 }
1777 tx_data->flags = htobe32(F_TX_BYPASS);
1778 if (last_wr && tcp->th_flags & TH_PUSH)
1779 tx_data->flags |= htobe32(F_TX_PUSH | F_TX_SHOVE);
1780
1781 /* Populate the TLS header */
1782 out = (void *)(tx_data + 1);
1783 if (offset == 0) {
1784 memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
1785 out += m_tls->m_epg_hdrlen;
1786 }
1787
1788 /* AES IV for a short record. */
1789 if (plen == tlen) {
1790 iv = out;
1791 if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
1792 memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
1793 memcpy(iv + 4, hdr + 1, 8);
1794 *(uint32_t *)(iv + 12) = htobe32(2 +
1795 offset / AES_BLOCK_LEN);
1796 } else
1797 memcpy(iv, hdr + 1, AES_BLOCK_LEN);
1798 out += AES_BLOCK_LEN;
1799 }
1800
1801 if (imm_len % 16 != 0) {
1802 /* Zero pad to an 8-byte boundary. */
1803 memset(out, 0, 8 - (imm_len % 8));
1804 out += 8 - (imm_len % 8);
1805
1806 /*
1807 * Insert a ULP_TX_SC_NOOP if needed so the SGL is
1808 * 16-byte aligned.
1809 */
1810 if (imm_len % 16 <= 8) {
1811 idata = (void *)out;
1812 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1813 idata->len = htobe32(0);
1814 out = (void *)(idata + 1);
1815 }
1816 }
1817
1818 /* SGL for record payload */
1819 sglist_reset(txq->gl);
1820 if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
1821 plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
1822#ifdef INVARIANTS
1823 panic("%s: failed to append sglist", __func__);
1824#endif
1825 }
1826 write_gl_to_buf(txq->gl, out);
1827
1828 if (using_scratch) {
1829 out = dst;
1830 copy_to_txd(eq, txq->ss, &out, wr_len);
1831 }
1832
1833 ndesc += howmany(wr_len, EQ_ESIZE);
1834 MPASS(ndesc <= available);
1835
1836 txq->kern_tls_records++;
1837 txq->kern_tls_octets += tlen - mtod(m_tls, vm_offset_t);
1838 if (mtod(m_tls, vm_offset_t) != 0) {
1839 if (offset == 0)
1840 txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
1841 else
1842 txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
1843 (m_tls->m_epg_hdrlen + offset);
1844 }
1845
1846 txsd = &txq->sdesc[pidx];
1847 if (last_wr)
1848 txsd->m = m;
1849 else
1850 txsd->m = NULL;
1851 txsd->desc_used = howmany(wr_len, EQ_ESIZE);
1852
1853 return (ndesc);
1854}
1855
1856static int
1857ktls_write_tcp_fin(struct sge_txq *txq, void *dst, struct mbuf *m,
1858 u_int available, tcp_seq tcp_seqno, u_int pidx)
1859{
1860 struct tx_sdesc *txsd;
1861 struct fw_eth_tx_pkt_wr *wr;
1862 struct cpl_tx_pkt_core *cpl;
1863 uint32_t ctrl;
1864 uint64_t ctrl1;
1865 int len16, ndesc, pktlen;
1866 struct ether_header *eh;
1867 struct ip *ip, newip;
1868 struct ip6_hdr *ip6, newip6;
1869 struct tcphdr *tcp, newtcp;
1870 caddr_t out;
1871
1873 M_ASSERTPKTHDR(m);
1874
1875 wr = dst;
1876 pktlen = m->m_len;
1877 ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
1878 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
1879 ndesc = tx_len16_to_desc(len16);
1880 MPASS(ndesc <= available);
1881
1882 /* Firmware work request header */
1883 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1885
1886 ctrl = V_FW_WR_LEN16(len16);
1887 wr->equiq_to_len16 = htobe32(ctrl);
1888 wr->r3 = 0;
1889
1890 cpl = (void *)(wr + 1);
1891
1892 /* CPL header */
1893 cpl->ctrl0 = txq->cpl_ctrl0;
1894 cpl->pack = 0;
1895 cpl->len = htobe16(pktlen);
1896
1897 out = (void *)(cpl + 1);
1898
1899 /* Copy over Ethernet header. */
1900 eh = mtod(m, struct ether_header *);
1901 copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1902
1903 /* Fixup length in IP header and copy out. */
1904 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1905 ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1906 newip = *ip;
1907 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1908 copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
1909 if (m->m_pkthdr.l3hlen > sizeof(*ip))
1910 copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
1911 m->m_pkthdr.l3hlen - sizeof(*ip));
1913 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1914 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1915 } else {
1916 ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
1917 newip6 = *ip6;
1918 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1919 copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
1920 MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
1922 V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
1923 V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
1924 }
1925 cpl->ctrl1 = htobe64(ctrl1);
1926 txq->txcsum++;
1927
1928 /* Set sequence number in TCP header. */
1929 tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1930 newtcp = *tcp;
1931 newtcp.th_seq = htonl(tcp_seqno);
1932 copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
1933
1934 /* Copy rest of packet. */
1935 copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
1936 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
1937 txq->imm_wrs++;
1938
1939 txq->txpkt_wrs++;
1940
1941 txq->kern_tls_fin++;
1942
1943 txsd = &txq->sdesc[pidx];
1944 txsd->m = m;
1945 txsd->desc_used = ndesc;
1946
1947 return (ndesc);
1948}
1949
1950int
1951t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, u_int nsegs,
1952 u_int available)
1953{
1954 struct sge_eq *eq = &txq->eq;
1955 struct tx_sdesc *txsd;
1956 struct tlspcb *tlsp;
1957 struct tcphdr *tcp;
1958 struct mbuf *m_tls;
1959 struct ether_header *eh;
1960 tcp_seq tcp_seqno;
1961 u_int ndesc, pidx, totdesc;
1962 uint16_t vlan_tag;
1963 bool has_fin, set_l2t_idx;
1964 void *tsopt;
1965
1966 M_ASSERTPKTHDR(m);
1967 MPASS(m->m_pkthdr.snd_tag != NULL);
1968 tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
1969
1970 totdesc = 0;
1971 eh = mtod(m, struct ether_header *);
1972 tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
1973 m->m_pkthdr.l3hlen);
1974 pidx = eq->pidx;
1975 has_fin = (tcp->th_flags & TH_FIN) != 0;
1976
1977 /*
1978 * If this TLS record has a FIN, then we will send any
1979 * requested options as part of the FIN packet.
1980 */
1981 if (!has_fin && ktls_has_tcp_options(tcp)) {
1982 ndesc = ktls_write_tcp_options(txq, dst, m, available, pidx);
1983 totdesc += ndesc;
1984 IDXINCR(pidx, ndesc, eq->sidx);
1985 dst = &eq->desc[pidx];
1986#ifdef VERBOSE_TRACES
1987 CTR2(KTR_CXGBE, "%s: tid %d wrote TCP options packet", __func__,
1988 tlsp->tid);
1989#endif
1990 }
1991
1992 /*
1993 * Allocate a new L2T entry if necessary. This may write out
1994 * a work request to the txq.
1995 */
1996 if (m->m_flags & M_VLANTAG)
1997 vlan_tag = m->m_pkthdr.ether_vtag;
1998 else
1999 vlan_tag = 0xfff;
2000 set_l2t_idx = false;
2001 if (tlsp->l2te == NULL || tlsp->l2te->vlan != vlan_tag ||
2002 memcmp(tlsp->l2te->dmac, eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
2003 set_l2t_idx = true;
2004 if (tlsp->l2te)
2005 t4_l2t_release(tlsp->l2te);
2006 tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
2007 vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
2008 if (tlsp->l2te == NULL)
2009 CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
2010 if (ndesc != 0) {
2011 MPASS(ndesc <= available - totdesc);
2012
2013 txq->raw_wrs++;
2014 txsd = &txq->sdesc[pidx];
2015 txsd->m = NULL;
2016 txsd->desc_used = ndesc;
2017 totdesc += ndesc;
2018 IDXINCR(pidx, ndesc, eq->sidx);
2019 dst = &eq->desc[pidx];
2020 }
2021 }
2022
2023 /*
2024 * Iterate over each TLS record constructing a work request
2025 * for that record.
2026 */
2027 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
2028 MPASS(m_tls->m_flags & M_EXTPG);
2029
2030 /*
2031 * Determine the initial TCP sequence number for this
2032 * record.
2033 */
2034 tsopt = NULL;
2035 if (m_tls == m->m_next) {
2036 tcp_seqno = ntohl(tcp->th_seq) -
2037 mtod(m_tls, vm_offset_t);
2038 if (tlsp->using_timestamps)
2039 tsopt = ktls_find_tcp_timestamps(tcp);
2040 } else {
2041 MPASS(mtod(m_tls, vm_offset_t) == 0);
2042 tcp_seqno = tlsp->prev_seq;
2043 }
2044
2045 ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
2046 nsegs, available - totdesc, tcp_seqno, tsopt, pidx,
2047 set_l2t_idx);
2048 totdesc += ndesc;
2049 IDXINCR(pidx, ndesc, eq->sidx);
2050 dst = &eq->desc[pidx];
2051
2052 /*
2053 * The value of nsegs from the header mbuf's metadata
2054 * is only valid for the first TLS record.
2055 */
2056 nsegs = 0;
2057
2058 /* Only need to set the L2T index once. */
2059 set_l2t_idx = false;
2060 }
2061
2062 if (has_fin) {
2063 /*
2064 * If the TCP header for this chain has FIN sent, then
2065 * explicitly send a packet that has FIN set. This
2066 * will also have PUSH set if requested. This assumes
2067 * we sent at least one TLS record work request and
2068 * uses the TCP sequence number after that reqeust as
2069 * the sequence number for the FIN packet.
2070 */
2071 ndesc = ktls_write_tcp_fin(txq, dst, m, available,
2072 tlsp->prev_seq, pidx);
2073 totdesc += ndesc;
2074 }
2075
2076 MPASS(totdesc <= available);
2077 return (totdesc);
2078}
2079
2080static void
2081cxgbe_tls_tag_free(struct m_snd_tag *mst)
2082{
2083 struct adapter *sc;
2084 struct tlspcb *tlsp;
2085
2086 tlsp = mst_to_tls(mst);
2087 sc = tlsp->sc;
2088
2089 CTR2(KTR_CXGBE, "%s: tid %d", __func__, tlsp->tid);
2090
2091 if (tlsp->l2te)
2092 t4_l2t_release(tlsp->l2te);
2093 if (tlsp->tid >= 0)
2094 release_tid(sc, tlsp->tid, tlsp->ctrlq);
2095 if (tlsp->ce)
2096 t4_release_clip_entry(sc, tlsp->ce);
2097 if (tlsp->tx_key_addr >= 0)
2098 t4_free_tls_keyid(sc, tlsp->tx_key_addr);
2099
2100 zfree(tlsp, M_CXGBE);
2101}
2102
2103void
2104t6_ktls_modload(void)
2105{
2106
2109}
2110
2111void
2113{
2114
2117}
2118
2119#else
2120
2121int
2122cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
2123 struct m_snd_tag **pt)
2124{
2125 return (ENXIO);
2126}
2127
2128int
2129t6_ktls_parse_pkt(struct mbuf *m, int *nsegsp, int *len16p)
2130{
2131 return (EINVAL);
2132}
2133
2134int
2135t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, u_int nsegs,
2136 u_int available)
2137{
2138 panic("can't happen");
2139}
2140
2141void
2143{
2144}
2145
2146void
2148{
2149}
2150
2151#endif
@ CPL_COOKIE_KERN_TLS
Definition: adapter.h:404
@ TX_SGL_SEGS
Definition: adapter.h:119
@ EQ_ESIZE
Definition: adapter.h:105
static struct wrqe * alloc_wrqe(int wr_len, struct sge_wrq *wrq)
Definition: adapter.h:1437
struct sge_eq eq
Definition: adapter.h:0
struct sglist * gl
Definition: adapter.h:5
void free_atid(struct adapter *, int)
Definition: t4_main.c:3879
struct mbuf * alloc_wr_mbuf(int, int)
Definition: t4_sge.c:2386
uint16_t pidx
Definition: adapter.h:2
void * lookup_atid(struct adapter *, int)
Definition: t4_main.c:3871
int alloc_atid(struct adapter *, void *)
Definition: t4_main.c:3851
struct ifnet * ifp
Definition: adapter.h:2
static void * wrtod(struct wrqe *wr)
Definition: adapter.h:1451
#define KTR_CXGBE
Definition: adapter.h:67
#define TXQ_LOCK(txq)
Definition: adapter.h:1047
#define TXQ_LOCK_ASSERT_OWNED(txq)
Definition: adapter.h:1050
void t4_register_shared_cpl_handler(int, cpl_handler_t, int)
Definition: t4_sge.c:496
static void t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
Definition: adapter.h:1463
#define IDXINCR(idx, incr, wrap)
Definition: adapter.h:1075
struct sge_iq iq
Definition: adapter.h:0
void release_tid(struct adapter *, int, struct sge_wrq *)
Definition: t4_main.c:3899
#define TXQ_UNLOCK(txq)
Definition: adapter.h:1049
#define CXGBE_UNIMPLEMENTED(s)
Definition: adapter.h:69
static int tx_len16_to_desc(int len16)
Definition: adapter.h:1490
#define INIT_TP_WR(w, tid)
Definition: offload.h:49
uint32_t __be32
Definition: osdep.h:69
uint64_t __be64
Definition: osdep.h:70
struct tp_params tp
Definition: common.h:360
struct tls_tunables tlst
Definition: adapter.h:934
struct adapter_params params
Definition: adapter.h:958
struct sge sge
Definition: adapter.h:902
__be64 local_ip_lo
Definition: t4_msg.h:854
__be64 peer_ip_hi
Definition: t4_msg.h:855
__be64 peer_ip_lo
Definition: t4_msg.h:856
__be64 local_ip_hi
Definition: t4_msg.h:853
__be16 local_port
Definition: t4_msg.h:851
__be16 peer_port
Definition: t4_msg.h:852
__be16 peer_port
Definition: t4_msg.h:803
__be32 peer_ip
Definition: t4_msg.h:805
__be32 local_ip
Definition: t4_msg.h:804
__be64 opt0
Definition: t4_msg.h:806
__be16 local_port
Definition: t4_msg.h:802
__be32 opt2
Definition: t4_msg.h:808
__be32 atid_status
Definition: t4_msg.h:897
__be32 flags
Definition: t4_msg.h:1158
__be32 rsvd
Definition: t4_msg.h:1157
__be32 len
Definition: t4_msg.h:1156
__be16 pack
Definition: t4_msg.h:1266
__be16 len
Definition: t4_msg.h:1267
__be64 ctrl1
Definition: t4_msg.h:1268
__be32 ctrl0
Definition: t4_msg.h:1265
__be32 ivgen_hdrlen
Definition: t4_msg.h:3465
__be32 seqno_numivs
Definition: t4_msg.h:3464
__be32 aadstart_cipherstop_hi
Definition: t4_msg.h:3462
__be32 pldlen
Definition: t4_msg.h:3461
__be64 scmd1
Definition: t4_msg.h:3466
__be32 op_ivinsrtofst
Definition: t4_msg.h:3460
__be32 cipherstop_lo_authinsert
Definition: t4_msg.h:3463
__be32 op_to_compl
__be32 flowid_len16
Definition: t4_l2t.h:63
uint8_t tx_chan
Definition: adapter.h:325
struct adapter * adapter
Definition: adapter.h:306
struct vi_info * vi
Definition: adapter.h:308
uint8_t port_id
Definition: adapter.h:324
uint16_t sidx
Definition: adapter.h:476
unsigned int cntxt_id
Definition: adapter.h:466
uint16_t pidx
Definition: adapter.h:478
struct tx_desc * desc
Definition: adapter.h:473
struct adapter * adapter
Definition: adapter.h:422
uint16_t abs_id
Definition: adapter.h:432
uint64_t raw_wrs
Definition: adapter.h:624
struct mp_ring * r
Definition: adapter.h:602
uint64_t kern_tls_waste
Definition: adapter.h:633
uint64_t kern_tls_fin
Definition: adapter.h:636
uint64_t kern_tls_options
Definition: adapter.h:634
uint64_t kern_tls_octets
Definition: adapter.h:632
uint64_t imm_wrs
Definition: adapter.h:616
uint64_t txcsum
Definition: adapter.h:613
uint64_t txpkt_wrs
Definition: adapter.h:618
__be32 cpl_ctrl0
Definition: adapter.h:605
uint64_t kern_tls_header
Definition: adapter.h:635
uint64_t kern_tls_full
Definition: adapter.h:631
struct tx_sdesc * sdesc
Definition: adapter.h:603
uint64_t kern_tls_partial
Definition: adapter.h:630
uint64_t kern_tls_records
Definition: adapter.h:628
uint64_t kern_tls_gcm
Definition: adapter.h:639
uint64_t kern_tls_fin_short
Definition: adapter.h:637
struct sge_eq eq
Definition: adapter.h:599
uint64_t kern_tls_cbc
Definition: adapter.h:638
uint64_t kern_tls_short
Definition: adapter.h:629
struct sglist * gl
Definition: adapter.h:604
struct sge_iq fwq
Definition: adapter.h:831
struct sge_txq * txq
Definition: adapter.h:833
struct sge_wrq * ctrlq
Definition: adapter.h:832
__be32 ivgen_hdrlen
Definition: t4_tls.h:71
__be32 seqno_numivs
Definition: t4_tls.h:70
int inline_keys
Definition: offload.h:245
unsigned short tx_modq[MAX_NCHAN]
Definition: common.h:257
uint8_t desc_used
Definition: adapter.h:363
struct mbuf * m
Definition: adapter.h:362
__be32 cmd_dest
Definition: t4_msg.h:2946
__be32 len
Definition: t4_msg.h:2947
__be32 len
Definition: t4_msg.h:2885
__be32 cmd_more
Definition: t4_msg.h:2884
__be32 addr
Definition: t4_msg.h:2895
__be32 cmd_to_len
Definition: t4_msg.h:2894
__be64 addr[2]
Definition: t4_msg.h:2857
__be32 len[2]
Definition: t4_msg.h:2856
struct ulptx_sge_pair sge[]
Definition: t4_msg.h:2865
__be32 len0
Definition: t4_msg.h:2862
__be32 cmd_nsge
Definition: t4_msg.h:2861
__be64 addr0
Definition: t4_msg.h:2863
uint16_t smt_idx
Definition: adapter.h:211
int rsrv_noflowq
Definition: adapter.h:225
struct adapter * adapter
Definition: adapter.h:201
int ntxq
Definition: adapter.h:223
struct port_info * pi
Definition: adapter.h:200
int first_txq
Definition: adapter.h:224
Definition: adapter.h:696
void t4_release_clip_entry(struct adapter *sc, struct clip_entry *ce)
Definition: t4_clip.c:337
struct clip_entry * t4_get_clip_entry(struct adapter *sc, struct in6_addr *in6, bool add)
Definition: t4_clip.c:256
#define SCMD_AUTH_MODE_NOP
Definition: t4_crypto.h:153
#define TLS_KEY_WR_SZ
Definition: t4_crypto.h:246
#define SCMD_HMAC_CTRL_NOP
Definition: t4_crypto.h:165
#define SCMD_PROTO_VERSION_GENERIC
Definition: t4_crypto.h:143
#define SCMD_CIPH_MODE_AES_GCM
Definition: t4_crypto.h:147
#define SCMD_ENCDECCTRL_ENCRYPT
Definition: t4_crypto.h:138
#define SALT_SIZE
Definition: t4_crypto.h:242
#define SCMD_CIPH_MODE_AES_CTR
Definition: t4_crypto.h:148
#define SCMD_CIPH_MODE_AES_CBC
Definition: t4_crypto.h:146
#define LEN__SET_TCB_FIELD_ULP
Definition: t4_filter.c:1704
@ SGE_MAX_WR_LEN
Definition: t4_hw.h:93
int t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, u_int nsegs, u_int available)
Definition: t4_kern_tls.c:2135
int cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt)
Definition: t4_kern_tls.c:2122
void t6_ktls_modunload(void)
Definition: t4_kern_tls.c:2147
int t6_ktls_parse_pkt(struct mbuf *m, int *nsegsp, int *len16p)
Definition: t4_kern_tls.c:2129
__FBSDID("$FreeBSD$")
void t6_ktls_modload(void)
Definition: t4_kern_tls.c:2142
struct l2t_entry * t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst, int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
Definition: t4_l2t.c:212
static void t4_l2t_release(struct l2t_entry *e)
Definition: t4_l2t.h:105
int mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
Definition: t4_mp_ring.c:347
@ CPL_TX_DATA
Definition: t4_msg.h:46
@ CPL_SET_TCB_FIELD
Definition: t4_msg.h:40
@ CPL_ACT_OPEN_REQ
Definition: t4_msg.h:38
@ CPL_TX_SEC_PDU
Definition: t4_msg.h:122
@ CPL_ACT_OPEN_RPL
Definition: t4_msg.h:69
@ CPL_ACT_OPEN_REQ6
Definition: t4_msg.h:118
#define V_CPL_TX_SEC_PDU_AADSTOP(x)
Definition: t4_msg.h:3537
#define V_CPL_TX_SEC_PDU_CPLLEN(x)
Definition: t4_msg.h:3502
#define V_TID_QID(x)
Definition: t4_msg.h:346
#define V_TX_QUEUE(x)
Definition: t4_msg.h:613
#define V_SCMD_ENC_DEC_CTRL(x)
Definition: t4_msg.h:3325
#define V_ULP_TXPKT_FID(x)
Definition: t4_msg.h:2972
#define F_NON_OFFLOAD
Definition: t4_msg.h:458
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(x)
Definition: t4_msg.h:3555
#define V_CPL_TX_SEC_PDU_AUTHINSERT(x)
Definition: t4_msg.h:3592
#define V_SCMD_CIPH_AUTH_SEQ_CTRL(x)
Definition: t4_msg.h:3333
#define V_SCMD_HDR_LEN(x)
Definition: t4_msg.h:3455
@ TX_CSUM_TCPIP
Definition: t4_msg.h:263
@ TX_CSUM_TCPIP6
Definition: t4_msg.h:265
#define V_SCMD_IV_GEN_CTRL(x)
Definition: t4_msg.h:3392
@ ULP_MODE_NONE
Definition: t4_msg.h:232
#define V_TXPKT_CSUM_TYPE(x)
Definition: t4_msg.h:1385
#define V_SCMD_HMAC_CTRL(x)
Definition: t4_msg.h:3360
#define V_CPL_TX_SEC_PDU_AADSTART(x)
Definition: t4_msg.h:3528
#define V_TX_CHAN(x)
Definition: t4_msg.h:441
#define F_TX_SHOVE
Definition: t4_msg.h:1193
#define F_TCAM_BYPASS
Definition: t4_msg.h:487
#define V_SCMD_CIPH_MODE(x)
Definition: t4_msg.h:3343
#define V_ULP_TXPKT_CHANNELID(x)
Definition: t4_msg.h:2960
#define V_SCMD_KEY_CTX_INLINE(x)
Definition: t4_msg.h:3418
#define V_ULP_TXPKT_RO(x)
Definition: t4_msg.h:2975
#define V_WORD(x)
Definition: t4_msg.h:979
#define V_TX_DATA_MSS(x)
Definition: t4_msg.h:1164
#define F_TX_BYPASS
Definition: t4_msg.h:1221
#define V_SCMD_AADIVDROP(x)
Definition: t4_msg.h:3446
#define V_SCMD_NUM_IVS(x)
Definition: t4_msg.h:3374
#define V_TX_LENGTH(x)
Definition: t4_msg.h:1169
#define V_TID_TID(x)
Definition: t4_msg.h:336
#define V_ULP_TXPKT_DEST(x)
Definition: t4_msg.h:2968
#define V_SMAC_SEL(x)
Definition: t4_msg.h:477
#define V_SCMD_AUTH_MODE(x)
Definition: t4_msg.h:3351
#define V_CPL_TX_SEC_PDU_AUTHSTART(x)
Definition: t4_msg.h:3574
#define V_SCMD_PROTO_VERSION(x)
Definition: t4_msg.h:3318
#define OPCODE_TID(cmd)
Definition: t4_msg.h:327
@ ULP_TX_PKT
Definition: t4_msg.h:2831
#define V_CPL_TX_SEC_PDU_IVINSRTOFST(x)
Definition: t4_msg.h:3517
#define V_ULPTX_CMD(x)
Definition: t4_msg.h:2845
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(x)
Definition: t4_msg.h:3563
#define G_AOPEN_STATUS(x)
Definition: t4_msg.h:904
#define V_ULP_MODE(x)
Definition: t4_msg.h:462
#define V_ULP_TX_SC_MORE(x)
Definition: t4_msg.h:2852
#define V_TID_COOKIE(x)
Definition: t4_msg.h:341
#define V_ULPTX_NSGE(x)
Definition: t4_msg.h:2890
#define V_SCMD_SEQ_NO_CTRL(x)
Definition: t4_msg.h:3302
#define G_TID_TID(x)
Definition: t4_msg.h:337
#define F_NO_REPLY
Definition: t4_msg.h:942
#define V_CPL_TX_SEC_PDU_PLACEHOLDER(x)
Definition: t4_msg.h:3509
#define V_ULP_TXPKT_DATAMODIFY(x)
Definition: t4_msg.h:2953
#define V_CPL_TX_SEC_PDU_AUTHSTOP(x)
Definition: t4_msg.h:3583
#define V_TXPKT_IPHDR_LEN(x)
Definition: t4_msg.h:1360
#define F_TX_PUSH
Definition: t4_msg.h:1225
#define V_CPL_TX_SEC_PDU_CIPHERSTART(x)
Definition: t4_msg.h:3546
#define V_ULPTX_LEN16(x)
Definition: t4_msg.h:2849
#define F_TSTAMPS_EN
Definition: t4_msg.h:630
#define MK_OPCODE_TID(opcode, tid)
Definition: t4_msg.h:325
#define V_CPL_TX_SEC_PDU_OPCODE(x)
Definition: t4_msg.h:3471
#define V_T6_TXPKT_ETHHDR_LEN(x)
Definition: t4_msg.h:1379
#define V_SCMD_TLS_FRAG_ENABLE(x)
Definition: t4_msg.h:3426
@ ULP_TX_SC_DSGL
Definition: t4_msg.h:2837
@ ULP_TX_SC_IMM
Definition: t4_msg.h:2836
@ ULP_TX_SC_NOOP
Definition: t4_msg.h:2835
@ ULP_TX_SC_MEMRD
Definition: t4_msg.h:2840
#define G_AOPEN_ATID(x)
Definition: t4_msg.h:909
#define GET_TID(cmd)
Definition: t4_msg.h:330
#define V_SCMD_IV_SIZE(x)
Definition: t4_msg.h:3367
static void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int)
Definition: t4_sge.c:6058
#define V_TCB_SND_UNA_RAW(x)
Definition: t4_tcb.h:173
#define V_TCB_SND_NXT_RAW(x)
Definition: t4_tcb.h:179
#define M_TCB_TIMESTAMP_OFFSET
Definition: t4_tcb.h:124
#define V_TCB_TX_MAX(x)
Definition: t4_tcb.h:167
#define W_TCB_RCV_NXT
Definition: t4_tcb.h:218
#define M_TCB_RCV_WND
Definition: t4_tcb.h:226
#define V_TCB_RCV_NXT(x)
Definition: t4_tcb.h:221
#define M_TCB_SND_UNA_RAW
Definition: t4_tcb.h:172
#define M_TCB_TX_MAX
Definition: t4_tcb.h:166
#define W_TCB_TIMESTAMP_OFFSET
Definition: t4_tcb.h:122
#define M_TCB_T_RTSEQ_RECENT
Definition: t4_tcb.h:148
#define M_TCB_RCV_NXT
Definition: t4_tcb.h:220
#define V_TCB_T_RTSEQ_RECENT(x)
Definition: t4_tcb.h:149
#define V_TCB_TIMESTAMP_OFFSET(x)
Definition: t4_tcb.h:125
#define W_TCB_RCV_WND
Definition: t4_tcb.h:224
#define M_TCB_SND_NXT_RAW
Definition: t4_tcb.h:178
#define W_TCB_SND_NXT_RAW
Definition: t4_tcb.h:176
#define W_TCB_SND_MAX_RAW
Definition: t4_tcb.h:182
#define W_TCB_T_FLAGS
Definition: t4_tcb.h:62
#define W_TCB_SND_UNA_RAW
Definition: t4_tcb.h:170
#define W_TCB_TX_MAX
Definition: t4_tcb.h:164
#define V_TCB_RCV_WND(x)
Definition: t4_tcb.h:227
#define V_TCB_SND_MAX_RAW(x)
Definition: t4_tcb.h:185
#define V_TCB_T_FLAGS(x)
Definition: t4_tcb.h:65
#define V_TF_CORE_BYPASS(x)
Definition: t4_tcb.h:757
#define W_TCB_T_RTSEQ_RECENT
Definition: t4_tcb.h:146
#define M_TCB_L2T_IX
Definition: t4_tcb.h:52
#define V_TCB_L2T_IX(x)
Definition: t4_tcb.h:53
#define M_TCB_SND_MAX_RAW
Definition: t4_tcb.h:184
#define V_TF_NON_OFFLOAD(x)
Definition: t4_tcb.h:552
#define W_TCB_L2T_IX
Definition: t4_tcb.h:50
#define TLS_HEADER_LENGTH
Definition: t4_tls.h:51
#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x)
#define V_FW_WR_OP(x)
#define V_FW_WR_LEN16(x)
#define F_FW_ULPTX_WR_DATA
@ FW_ULPTX_WR
@ FW_ETH_TX_PKT_WR