32#include "opt_kern_tls.h"
40#include <sys/sglist.h>
41#include <sys/socket.h>
42#include <sys/socketvar.h>
43#include <sys/sockbuf.h>
44#include <netinet/in.h>
45#include <netinet/in_pcb.h>
46#include <netinet/ip.h>
47#include <netinet/ip6.h>
48#include <netinet/tcp_var.h>
49#include <opencrypto/cryptodev.h>
50#include <opencrypto/xform.h>
61#if defined(INET) || defined(INET6)
63#define TLS_HEADER_LENGTH 5
79 bool using_timestamps;
80 unsigned char enc_mode;
85 unsigned int tx_key_info_size;
100 struct clip_entry *ce;
105static void cxgbe_tls_tag_free(
struct m_snd_tag *mst);
106static int ktls_setup_keys(
struct tlspcb *tlsp,
107 const struct ktls_session *tls,
struct sge_txq *txq);
109static const struct if_snd_tag_sw cxgbe_tls_tag_sw = {
110 .snd_tag_free = cxgbe_tls_tag_free,
111 .type = IF_SND_TAG_TYPE_TLS
114static inline struct tlspcb *
115mst_to_tls(
struct m_snd_tag *t)
117 return (__containerof(t,
struct tlspcb, com));
120static struct tlspcb *
121alloc_tlspcb(
struct ifnet *
ifp,
struct vi_info *vi,
int flags)
127 tlsp = malloc(
sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
131 m_snd_tag_init(&tlsp->com,
ifp, &cxgbe_tls_tag_sw);
136 tlsp->tx_key_addr = -1;
142ktls_act_open_cpl_size(
bool isipv6)
152mk_ktls_act_open_req(
struct adapter *sc,
struct vi_info *vi,
struct inpcb *inp,
153 struct tlspcb *tlsp,
int atid,
void *dst)
155 struct tcpcb *tp = intotcpcb(inp);
174 cpl->
opt0 = htobe64(options);
177 if (tp->t_flags & TF_REQ_TSTMP)
179 cpl->
opt2 = htobe32(options);
184 struct inpcb *inp,
struct tlspcb *tlsp,
int atid,
void *dst)
186 struct tcpcb *tp = intotcpcb(inp);
200 cpl->
local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
201 cpl->
local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
203 cpl->
peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
204 cpl->
peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
209 cpl->
opt0 = htobe64(options);
212 if (tp->t_flags & TF_REQ_TSTMP)
214 cpl->
opt2 = htobe32(options);
219 struct inpcb *inp,
struct tlspcb *tlsp,
int atid)
224 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
227 if (tlsp->ce == NULL)
231 wr =
alloc_wrqe(ktls_act_open_cpl_size(isipv6), tlsp->ctrlq);
233 CTR2(
KTR_CXGBE,
"%s: atid %d failed to alloc WR", __func__,
239 mk_ktls_act_open_req6(sc, vi, inp, tlsp, atid,
wrtod(wr));
241 mk_ktls_act_open_req(sc, vi, inp, tlsp, atid,
wrtod(wr));
243 tlsp->open_pending =
true;
257 struct inpcb *inp = tlsp->inp;
259 CTR3(
KTR_CXGBE,
"%s: atid %d status %d", __func__, atid, status);
265 tlsp->open_pending =
false;
272#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
273 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
276 "CPL_SET_TCB_FIELD ULP command not 16-byte aligned");
279write_set_tcb_field_ulp(
struct tlspcb *tlsp,
void *dst,
struct sge_txq *txq,
280 uint16_t word, uint64_t mask, uint64_t val)
297 idata->
len = htobe32(
sizeof(*cpl));
310 idata->
len = htobe32(0);
314ktls_set_tcb_fields(
struct tlspcb *tlsp,
struct tcpcb *tp,
struct sge_txq *txq)
323 if (tp->t_flags & TF_REQ_TSTMP)
327 CTR2(
KTR_CXGBE,
"%s: tid %d failed to alloc WR mbuf", __func__,
331 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
332 m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
335 wr = mtod(m,
void *);
340 dst = (
char *)(wr + 1);
359 if (tp->t_flags & TF_REQ_TSTMP) {
366 KASSERT(dst - (
char *)wr == len, (
"%s: length mismatch", __func__));
377 struct m_snd_tag **pt)
379 const struct ktls_session *tls;
386 int atid, error, explicit_iv_size, keyid, mac_first;
388 tls = params->tls.tls;
391 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
392 tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
393 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
394 return (EPROTONOSUPPORT);
397 switch (tls->params.cipher_algorithm) {
400 switch (tls->params.cipher_key_len) {
408 switch (tls->params.auth_algorithm) {
409 case CRYPTO_SHA1_HMAC:
410 case CRYPTO_SHA2_256_HMAC:
411 case CRYPTO_SHA2_384_HMAC:
414 return (EPROTONOSUPPORT);
416 explicit_iv_size = AES_BLOCK_LEN;
419 case CRYPTO_AES_NIST_GCM_16:
422 switch (tls->params.cipher_key_len) {
430 explicit_iv_size = 8;
434 return (EPROTONOSUPPORT);
440 tlsp = alloc_tlspcb(
ifp, vi, M_WAITOK);
451 keyid = t4_alloc_tls_keyid(sc);
453 CTR2(
KTR_CXGBE,
"%s: atid %d using immediate key ctx", __func__,
455 tlsp->inline_key =
true;
457 tlsp->tx_key_addr = keyid;
458 CTR3(
KTR_CXGBE,
"%s: atid %d allocated TX key addr %#x",
460 atid, tlsp->tx_key_addr);
463 inp = params->tls.inp;
465 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
473 if (tp->t_flags & TF_REQ_TSTMP) {
474 tlsp->using_timestamps =
true;
475 if ((tp->ts_offset & 0xfffffff) != 0) {
481 tlsp->using_timestamps =
false;
483 error = send_ktls_act_open_req(sc, vi, inp, tlsp, atid);
490 CTR2(
KTR_CXGBE,
"%s: atid %d sent CPL_ACT_OPEN_REQ", __func__,
492 while (tlsp->open_pending) {
497 error = rw_sleep(tlsp, &inp->inp_lock, 0,
"t6tlsop", 0);
507 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
514 if (inp->inp_flowtype != M_HASHTYPE_NONE)
519 error = ktls_set_tcb_fields(tlsp, tp, txq);
524 error = ktls_setup_keys(tlsp, tls, txq);
528 tlsp->enc_mode = t4_tls_cipher_mode(tls);
529 tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
543 if (tlsp->inline_key)
545 tlsp->scmd0.ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen);
559 tlsp->scmd0_short.seqno_numivs |=
562 tlsp->scmd0_short.seqno_numivs |=
564 tlsp->scmd0_short.seqno_numivs =
565 htobe32(tlsp->scmd0_short.seqno_numivs);
570 if (tlsp->inline_key)
585 m_snd_tag_rele(&tlsp->com);
590ktls_setup_keys(
struct tlspcb *tlsp,
const struct ktls_session *tls,
606 t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
607 if (tlsp->inline_key)
613 CTR2(
KTR_CXGBE,
"%s: tid %d failed to alloc WR mbuf", __func__,
617 m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
618 m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
619 kwr = mtod(m,
void *);
622 t4_write_tlskey_wr(tls, KTLS_TX, tlsp->tid, 0, tlsp->tx_key_addr, kwr);
624 memcpy(kctx, &tlsp->keyctx,
sizeof(*kctx));
636 CTR2(
KTR_CXGBE,
"%s: tid %d sent key WR", __func__, tlsp->tid);
641ktls_base_wr_size(
struct tlspcb *tlsp)
649 if (tlsp->inline_key)
650 wr_len += tlsp->tx_key_info_size;
661ktls_tcp_payload_length(
struct tlspcb *tlsp,
struct mbuf *m_tls)
663 struct tls_record_layer *hdr;
666 M_ASSERTEXTPG(m_tls);
667 hdr = (
void *)m_tls->m_epg_hdr;
668 plen = ntohs(hdr->tls_length);
674 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
700 CTR4(
KTR_CXGBE,
"%s: tid %d short TLS record (%u vs %u)",
712ktls_payload_offset(
struct tlspcb *tlsp,
struct mbuf *m_tls)
714 struct tls_record_layer *hdr;
720 M_ASSERTEXTPG(m_tls);
721 hdr = (
void *)m_tls->m_epg_hdr;
722 plen = ntohs(hdr->tls_length);
724 mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
727 if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
738 offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
740 return (rounddown(offset, AES_BLOCK_LEN));
746ktls_sgl_size(u_int nsegs)
754 wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
759ktls_wr_len(
struct tlspcb *tlsp,
struct mbuf *m,
struct mbuf *m_tls,
762 struct tls_record_layer *hdr;
763 u_int imm_len, offset, plen, wr_len, tlen;
765 M_ASSERTEXTPG(m_tls);
771 tlen = ktls_tcp_payload_length(tlsp, m_tls);
772 if (tlen <= m_tls->m_epg_hdrlen) {
779 roundup2(m->m_len + m_tls->m_len, 16);
782 "%s: tid %d TLS header-only packet too long (len %d)",
783 __func__, tlsp->tid, m->m_len + m_tls->m_len);
787 MPASS(m_tls->m_next == NULL);
797 hdr = (
void *)m_tls->m_epg_hdr;
801 offset = ktls_payload_offset(tlsp, m_tls);
806 wr_len = ktls_base_wr_size(tlsp);
815 imm_len += m_tls->m_epg_hdrlen;
817 imm_len += AES_BLOCK_LEN;
818 wr_len += roundup2(imm_len, 16);
821 *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
822 plen - (m_tls->m_epg_hdrlen + offset));
823 wr_len += ktls_sgl_size(*nsegsp);
825 wr_len = roundup2(wr_len, 16);
834ktls_has_tcp_options(
struct tcphdr *tcp)
837 int cnt, opt, optlen;
839 cp = (u_char *)(tcp + 1);
840 cnt = tcp->th_off * 4 -
sizeof(
struct tcphdr);
841 for (; cnt > 0; cnt -= optlen, cp += optlen) {
843 if (opt == TCPOPT_EOL)
845 if (opt == TCPOPT_NOP)
851 if (optlen < 2 || optlen > cnt)
856 case TCPOPT_TIMESTAMP:
869ktls_find_tcp_timestamps(
struct tcphdr *tcp)
872 int cnt, opt, optlen;
874 cp = (u_char *)(tcp + 1);
875 cnt = tcp->th_off * 4 -
sizeof(
struct tcphdr);
876 for (; cnt > 0; cnt -= optlen, cp += optlen) {
878 if (opt == TCPOPT_EOL)
880 if (opt == TCPOPT_NOP)
886 if (optlen < 2 || optlen > cnt)
889 if (opt == TCPOPT_TIMESTAMP && optlen == TCPOLEN_TIMESTAMP)
899 struct ether_header *eh;
905 u_int wr_len, tot_len;
915 MPASS(m->m_pkthdr.snd_tag != NULL);
916 tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
918 if (m->m_len <=
sizeof(*eh) +
sizeof(*ip)) {
919 CTR2(
KTR_CXGBE,
"%s: tid %d header mbuf too short", __func__,
923 eh = mtod(m,
struct ether_header *);
924 if (ntohs(eh->ether_type) != ETHERTYPE_IP &&
925 ntohs(eh->ether_type) != ETHERTYPE_IPV6) {
926 CTR2(
KTR_CXGBE,
"%s: tid %d mbuf not ETHERTYPE_IP{,V6}",
927 __func__, tlsp->tid);
930 m->m_pkthdr.l2hlen =
sizeof(*eh);
933 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
934 ip = (
struct ip *)(eh + 1);
935 if (ip->ip_p != IPPROTO_TCP) {
936 CTR2(
KTR_CXGBE,
"%s: tid %d mbuf not IPPROTO_TCP",
937 __func__, tlsp->tid);
940 m->m_pkthdr.l3hlen = ip->ip_hl * 4;
942 ip6 = (
struct ip6_hdr *)(eh + 1);
943 if (ip6->ip6_nxt != IPPROTO_TCP) {
944 CTR3(
KTR_CXGBE,
"%s: tid %d mbuf not IPPROTO_TCP (%u)",
945 __func__, tlsp->tid, ip6->ip6_nxt);
948 m->m_pkthdr.l3hlen =
sizeof(
struct ip6_hdr);
950 if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
952 CTR2(
KTR_CXGBE,
"%s: tid %d header mbuf too short (2)",
953 __func__, tlsp->tid);
956 tcp = (
struct tcphdr *)((
char *)(eh + 1) + m->m_pkthdr.l3hlen);
957 m->m_pkthdr.l4hlen = tcp->th_off * 4;
960 if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
961 m->m_pkthdr.l4hlen) {
963 "%s: tid %d header mbuf bad length (%d + %d + %d != %d)",
964 __func__, tlsp->tid, m->m_pkthdr.l2hlen,
965 m->m_pkthdr.l3hlen, m->m_pkthdr.l4hlen, m->m_len);
970 MPASS(m->m_next != NULL);
971 MPASS(m->m_next->m_flags & M_EXTPG);
980 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
981 MPASS(m_tls->m_flags & M_EXTPG);
983 wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
985 CTR4(
KTR_CXGBE,
"%s: tid %d wr_len %d nsegs %d", __func__,
986 tlsp->tid, wr_len, nsegs);
990 tot_len += roundup2(wr_len,
EQ_ESIZE);
1000 MPASS(tot_len != 0);
1006 if ((tcp->th_flags & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
1011 "%s: tid %d options-only packet too long (len %d)",
1012 __func__, tlsp->tid, m->m_len);
1015 tot_len += roundup2(wr_len,
EQ_ESIZE);
1033 if (tlsp->using_timestamps)
1036 tot_len += roundup2(wr_len,
EQ_ESIZE);
1038 *len16p = tot_len / 16;
1039#ifdef VERBOSE_TRACES
1040 CTR4(
KTR_CXGBE,
"%s: tid %d len16 %d nsegs %d", __func__,
1041 tlsp->tid, *len16p, *nsegsp);
1051write_gl_to_buf(
struct sglist *
gl, caddr_t to)
1053 struct sglist_seg *seg;
1056 int i, nflits, nsegs;
1058 KASSERT(((uintptr_t)to & 0xf) == 0,
1059 (
"%s: SGL must start at a 16 byte boundary: %p", __func__, to));
1061 nsegs =
gl->sg_nseg;
1064 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
1066 seg = &
gl->sg_segs[0];
1067 usgl = (
void *)flitp;
1071 usgl->
len0 = htobe32(seg->ss_len);
1072 usgl->
addr0 = htobe64(seg->ss_paddr);
1075 for (i = 0; i < nsegs - 1; i++, seg++) {
1076 usgl->
sge[i / 2].
len[i & 1] = htobe32(seg->ss_len);
1077 usgl->
sge[i / 2].
addr[i & 1] = htobe64(seg->ss_paddr);
1080 usgl->
sge[i / 2].
len[1] = htobe32(0);
1084 MPASS(((uintptr_t)flitp) & 0xf);
1088 MPASS((((uintptr_t)flitp) & 0xf) == 0);
1095 MPASS((uintptr_t)(*to) >= (uintptr_t)&
eq->
desc[0]);
1096 MPASS((uintptr_t)(*to) < (uintptr_t)&
eq->
desc[
eq->
sidx]);
1098 if (__predict_true((uintptr_t)(*to) + len <=
1100 bcopy(from, *to, len);
1102 if ((uintptr_t)(*to) == (uintptr_t)&
eq->
desc[
eq->
sidx])
1103 (*to) = (caddr_t)
eq->
desc;
1105 int portion = (uintptr_t)&
eq->
desc[
eq->
sidx] - (uintptr_t)(*to);
1107 bcopy(from, *to, portion);
1109 portion = len - portion;
1110 bcopy(from, (
void *)
eq->
desc, portion);
1111 (*to) = (caddr_t)
eq->
desc + portion;
1116ktls_write_tcp_options(
struct sge_txq *txq,
void *dst,
struct mbuf *m,
1117 u_int available, u_int
pidx)
1124 int len16, ndesc, pktlen;
1125 struct ether_header *eh;
1126 struct ip *ip, newip;
1127 struct ip6_hdr *ip6, newip6;
1128 struct tcphdr *tcp, newtcp;
1139 MPASS(ndesc <= available);
1149 cpl = (
void *)(wr + 1);
1154 cpl->
len = htobe16(pktlen);
1156 out = (
void *)(cpl + 1);
1159 eh = mtod(m,
struct ether_header *);
1160 copy_to_txd(&txq->
eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1163 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1164 ip = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1166 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1167 copy_to_txd(&txq->
eq, (caddr_t)&newip, &out,
sizeof(newip));
1168 if (m->m_pkthdr.l3hlen >
sizeof(*ip))
1170 m->m_pkthdr.l3hlen -
sizeof(*ip));
1175 ip6 = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1177 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1178 copy_to_txd(&txq->
eq, (caddr_t)&newip6, &out,
sizeof(newip6));
1179 MPASS(m->m_pkthdr.l3hlen ==
sizeof(*ip6));
1188 tcp = (
void *)((
char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1190 newtcp.th_flags &= ~(TH_PUSH | TH_FIN);
1191 copy_to_txd(&txq->
eq, (caddr_t)&newtcp, &out,
sizeof(newtcp));
1195 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
sizeof(*tcp)));
1210ktls_write_tunnel_packet(
struct sge_txq *txq,
void *dst,
struct mbuf *m,
1211 struct mbuf *m_tls, u_int available, tcp_seq tcp_seqno, u_int
pidx)
1218 int len16, ndesc, pktlen;
1219 struct ether_header *eh;
1220 struct ip *ip, newip;
1221 struct ip6_hdr *ip6, newip6;
1222 struct tcphdr *tcp, newtcp;
1229 M_ASSERTEXTPG(m_tls);
1232 MPASS(m_tls->m_next == NULL);
1235 pktlen = m->m_len + m_tls->m_len;
1239 MPASS(ndesc <= available);
1249 cpl = (
void *)(wr + 1);
1254 cpl->
len = htobe16(pktlen);
1256 out = (
void *)(cpl + 1);
1259 eh = mtod(m,
struct ether_header *);
1260 copy_to_txd(&txq->
eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1263 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1264 ip = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1266 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1267 copy_to_txd(&txq->
eq, (caddr_t)&newip, &out,
sizeof(newip));
1268 if (m->m_pkthdr.l3hlen >
sizeof(*ip))
1270 m->m_pkthdr.l3hlen -
sizeof(*ip));
1275 ip6 = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1277 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1278 copy_to_txd(&txq->
eq, (caddr_t)&newip6, &out,
sizeof(newip6));
1279 MPASS(m->m_pkthdr.l3hlen ==
sizeof(*ip6));
1288 tcp = (
void *)((
char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1290 newtcp.th_seq = htonl(tcp_seqno + mtod(m_tls, vm_offset_t));
1291 copy_to_txd(&txq->
eq, (caddr_t)&newtcp, &out,
sizeof(newtcp));
1295 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
sizeof(*tcp)));
1299 mtod(m_tls, vm_offset_t), &out, m_tls->m_len);
1314 "CPL_SET_TCB_FIELD must be smaller than a single TX descriptor");
1316 "SND_NXT_RAW and SND_UNA_RAW are in different words");
1319ktls_write_tls_wr(
struct tlspcb *tlsp,
struct sge_txq *txq,
1320 void *dst,
struct mbuf *m,
struct tcphdr *tcp,
struct mbuf *m_tls,
1321 u_int nsegs, u_int available, tcp_seq tcp_seqno, uint32_t *tsopt,
1322 u_int
pidx,
bool set_l2t_idx)
1332 struct tls_record_layer *hdr;
1334 u_int aad_start, aad_stop;
1335 u_int auth_start, auth_stop, auth_insert;
1336 u_int cipher_start, cipher_stop, iv_offset;
1337 u_int imm_len, mss, ndesc, offset, plen, tlen, twr_len, wr_len;
1338 u_int fields, tx_max_offset, tx_max;
1339 bool first_wr, last_wr, using_scratch;
1342 MPASS(tlsp->txq == txq);
1344 first_wr = (tlsp->prev_seq == 0 && tlsp->prev_ack == 0 &&
1345 tlsp->prev_win == 0);
1356 M_ASSERTEXTPG(m_tls);
1357 hdr = (
void *)m_tls->m_epg_hdr;
1361 tlen = ktls_tcp_payload_length(tlsp, m_tls);
1362 if (tlen <= m_tls->m_epg_hdrlen) {
1367#ifdef VERBOSE_TRACES
1368 CTR3(
KTR_CXGBE,
"%s: tid %d header-only TLS record %u",
1369 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
1371 return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
1376 offset = ktls_payload_offset(tlsp, m_tls);
1377#ifdef VERBOSE_TRACES
1378 CTR4(
KTR_CXGBE,
"%s: tid %d short TLS record %u with offset %u",
1379 __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
1381 if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
1384 panic(
"%s: FIN on short TLS record", __func__);
1396 last_wr = m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) == 0;
1432 tx_max_offset = mtod(m_tls, vm_offset_t);
1434 m_tls->m_epg_trllen) {
1437 m_tls->m_epg_trllen;
1446 tx_max = tcp_seqno + tx_max_offset;
1454 wr = (
void *)txq->ss;
1457 out = (
void *)(wr + 1);
1461 (
"trying to set L2T_IX for subsequent TLS WR"));
1462#ifdef VERBOSE_TRACES
1463 CTR3(
KTR_CXGBE,
"%s: tid %d set L2T_IX to %d", __func__,
1464 tlsp->tid, tlsp->l2te->idx);
1471 if (tsopt != NULL && tlsp->prev_tsecr != ntohl(tsopt[1])) {
1473 (
"trying to set T_RTSEQ_RECENT for subsequent TLS WR"));
1474#ifdef VERBOSE_TRACES
1475 CTR2(
KTR_CXGBE,
"%s: tid %d wrote updated T_RTSEQ_RECENT",
1476 __func__, tlsp->tid);
1484 tlsp->prev_tsecr = ntohl(tsopt[1]);
1487 if (first_wr || tlsp->prev_seq != tx_max) {
1489 (
"trying to set TX_MAX for subsequent TLS WR"));
1490#ifdef VERBOSE_TRACES
1492 "%s: tid %d setting TX_MAX to %u (tcp_seqno %u)",
1493 __func__, tlsp->tid, tx_max, tcp_seqno);
1506 if (tlsp->prev_seq != tx_max || mtod(m_tls, vm_offset_t) != 0) {
1508 (
"trying to clear SND_UNA_RAW for subsequent TLS WR"));
1509#ifdef VERBOSE_TRACES
1510 CTR2(
KTR_CXGBE,
"%s: tid %d clearing SND_UNA_RAW", __func__,
1524 tlsp->prev_seq = tcp_seqno + tlen;
1526 if (first_wr || tlsp->prev_ack != ntohl(tcp->th_ack)) {
1528 (
"trying to set RCV_NXT for subsequent TLS WR"));
1535 tlsp->prev_ack = ntohl(tcp->th_ack);
1538 if (first_wr || tlsp->prev_win != ntohs(tcp->th_win)) {
1540 (
"trying to set RCV_WND for subsequent TLS WR"));
1547 tlsp->prev_win = ntohs(tcp->th_win);
1552 nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
1553 offset, plen - (m_tls->m_epg_hdrlen + offset));
1556 twr_len = ktls_base_wr_size(tlsp);
1560 imm_len += m_tls->m_epg_hdrlen;
1562 imm_len += AES_BLOCK_LEN;
1563 twr_len += roundup2(imm_len, 16);
1564 twr_len += ktls_sgl_size(nsegs);
1576 tlsp->sc->tlst.combo_wrs) {
1578 txpkt = (
void *)out;
1580 wr_len +=
sizeof(*wr);
1590 if (using_scratch) {
1596 MPASS(ndesc <= available);
1615 wr = (
void *)txq->ss;
1618 txpkt = (
void *)(wr + 1);
1622 txpkt = (
void *)out;
1625 wr_len = roundup2(wr_len, 16);
1626 MPASS(ndesc + howmany(wr_len,
EQ_ESIZE) <= available);
1639 txpkt->
len = htobe32(howmany(twr_len -
sizeof(*wr), 16));
1642 idata = (
void *)(txpkt + 1);
1653 if (tlsp->inline_key)
1654 idata->
len += tlsp->tx_key_info_size +
1656 idata->
len = htobe32(idata->
len);
1659 sec_pdu = (
void *)(idata + 1);
1672 cipher_start = AES_BLOCK_LEN + 1;
1675 sec_pdu->
pldlen = htobe32(16 + plen -
1676 (m_tls->m_epg_hdrlen + offset));
1679 sec_pdu->
seqno_numivs = tlsp->scmd0_short.seqno_numivs;
1681 tlsp->scmd0_short.ivgen_hdrlen |
1695 cipher_start = m_tls->m_epg_hdrlen + 1;
1698 auth_start = cipher_start;
1703 auth_start = cipher_start;
1708 sec_pdu->
pldlen = htobe32(plen);
1714 if (mtod(m_tls, vm_offset_t) == 0)
1734 sec_pdu->
scmd1 = htobe64(m_tls->m_epg_seqno);
1737 out = (
void *)(sec_pdu + 1);
1738 if (tlsp->inline_key) {
1739 memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
1740 out += tlsp->tx_key_info_size;
1743 memrd = (
void *)out;
1747 memrd->
addr = htobe32(tlsp->tx_key_addr >> 5);
1750 idata = (
void *)(memrd + 1);
1755 out = (
void *)(idata + 1);
1759 tx_data = (
void *)out;
1761 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1762 mss = m->m_pkthdr.tso_segsz;
1763 tlsp->prev_mss = mss;
1764 }
else if (tlsp->prev_mss != 0)
1765 mss = tlsp->prev_mss;
1767 mss = tlsp->vi->ifp->if_mtu -
1768 (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
1771 tx_data->
rsvd = htobe32(tcp_seqno);
1774 V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
1775 tx_data->
rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
1778 if (last_wr && tcp->th_flags & TH_PUSH)
1782 out = (
void *)(tx_data + 1);
1784 memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
1785 out += m_tls->m_epg_hdrlen;
1792 memcpy(iv, tlsp->keyctx.u.txhdr.txsalt,
SALT_SIZE);
1793 memcpy(iv + 4, hdr + 1, 8);
1794 *(uint32_t *)(iv + 12) = htobe32(2 +
1795 offset / AES_BLOCK_LEN);
1797 memcpy(iv, hdr + 1, AES_BLOCK_LEN);
1798 out += AES_BLOCK_LEN;
1801 if (imm_len % 16 != 0) {
1803 memset(out, 0, 8 - (imm_len % 8));
1804 out += 8 - (imm_len % 8);
1810 if (imm_len % 16 <= 8) {
1811 idata = (
void *)out;
1813 idata->
len = htobe32(0);
1814 out = (
void *)(idata + 1);
1819 sglist_reset(txq->
gl);
1820 if (sglist_append_mbuf_epg(txq->
gl, m_tls, m_tls->m_epg_hdrlen + offset,
1821 plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
1823 panic(
"%s: failed to append sglist", __func__);
1826 write_gl_to_buf(txq->
gl, out);
1828 if (using_scratch) {
1833 ndesc += howmany(wr_len,
EQ_ESIZE);
1834 MPASS(ndesc <= available);
1838 if (mtod(m_tls, vm_offset_t) != 0) {
1843 (m_tls->m_epg_hdrlen + offset);
1857ktls_write_tcp_fin(
struct sge_txq *txq,
void *dst,
struct mbuf *m,
1858 u_int available, tcp_seq tcp_seqno, u_int
pidx)
1865 int len16, ndesc, pktlen;
1866 struct ether_header *eh;
1867 struct ip *ip, newip;
1868 struct ip6_hdr *ip6, newip6;
1869 struct tcphdr *tcp, newtcp;
1880 MPASS(ndesc <= available);
1890 cpl = (
void *)(wr + 1);
1895 cpl->
len = htobe16(pktlen);
1897 out = (
void *)(cpl + 1);
1900 eh = mtod(m,
struct ether_header *);
1901 copy_to_txd(&txq->
eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
1904 if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
1905 ip = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1907 newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
1908 copy_to_txd(&txq->
eq, (caddr_t)&newip, &out,
sizeof(newip));
1909 if (m->m_pkthdr.l3hlen >
sizeof(*ip))
1911 m->m_pkthdr.l3hlen -
sizeof(*ip));
1916 ip6 = (
void *)((
char *)eh + m->m_pkthdr.l2hlen);
1918 newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
1919 copy_to_txd(&txq->
eq, (caddr_t)&newip6, &out,
sizeof(newip6));
1920 MPASS(m->m_pkthdr.l3hlen ==
sizeof(*ip6));
1929 tcp = (
void *)((
char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
1931 newtcp.th_seq = htonl(tcp_seqno);
1932 copy_to_txd(&txq->
eq, (caddr_t)&newtcp, &out,
sizeof(newtcp));
1936 (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
sizeof(*tcp)));
1956 struct tlspcb *tlsp;
1959 struct ether_header *eh;
1961 u_int ndesc,
pidx, totdesc;
1963 bool has_fin, set_l2t_idx;
1967 MPASS(m->m_pkthdr.snd_tag != NULL);
1968 tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
1971 eh = mtod(m,
struct ether_header *);
1972 tcp = (
struct tcphdr *)((
char *)eh + m->m_pkthdr.l2hlen +
1973 m->m_pkthdr.l3hlen);
1975 has_fin = (tcp->th_flags & TH_FIN) != 0;
1981 if (!has_fin && ktls_has_tcp_options(tcp)) {
1982 ndesc = ktls_write_tcp_options(txq, dst, m, available,
pidx);
1986#ifdef VERBOSE_TRACES
1987 CTR2(
KTR_CXGBE,
"%s: tid %d wrote TCP options packet", __func__,
1996 if (m->m_flags & M_VLANTAG)
1997 vlan_tag = m->m_pkthdr.ether_vtag;
2000 set_l2t_idx =
false;
2001 if (tlsp->l2te == NULL || tlsp->l2te->vlan != vlan_tag ||
2002 memcmp(tlsp->l2te->dmac, eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
2007 vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
2008 if (tlsp->l2te == NULL)
2011 MPASS(ndesc <= available - totdesc);
2027 for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
2028 MPASS(m_tls->m_flags & M_EXTPG);
2035 if (m_tls == m->m_next) {
2036 tcp_seqno = ntohl(tcp->th_seq) -
2037 mtod(m_tls, vm_offset_t);
2038 if (tlsp->using_timestamps)
2039 tsopt = ktls_find_tcp_timestamps(tcp);
2041 MPASS(mtod(m_tls, vm_offset_t) == 0);
2042 tcp_seqno = tlsp->prev_seq;
2045 ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
2046 nsegs, available - totdesc, tcp_seqno, tsopt,
pidx,
2059 set_l2t_idx =
false;
2071 ndesc = ktls_write_tcp_fin(txq, dst, m, available,
2072 tlsp->prev_seq,
pidx);
2076 MPASS(totdesc <= available);
2081cxgbe_tls_tag_free(
struct m_snd_tag *mst)
2084 struct tlspcb *tlsp;
2086 tlsp = mst_to_tls(mst);
2089 CTR2(
KTR_CXGBE,
"%s: tid %d", __func__, tlsp->tid);
2097 if (tlsp->tx_key_addr >= 0)
2098 t4_free_tls_keyid(sc, tlsp->tx_key_addr);
2100 zfree(tlsp, M_CXGBE);
2123 struct m_snd_tag **pt)
2138 panic(
"can't happen");
static struct wrqe * alloc_wrqe(int wr_len, struct sge_wrq *wrq)
void free_atid(struct adapter *, int)
struct mbuf * alloc_wr_mbuf(int, int)
void * lookup_atid(struct adapter *, int)
int alloc_atid(struct adapter *, void *)
static void * wrtod(struct wrqe *wr)
#define TXQ_LOCK_ASSERT_OWNED(txq)
void t4_register_shared_cpl_handler(int, cpl_handler_t, int)
static void t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
#define IDXINCR(idx, incr, wrap)
void release_tid(struct adapter *, int, struct sge_wrq *)
#define CXGBE_UNIMPLEMENTED(s)
static int tx_len16_to_desc(int len16)
#define INIT_TP_WR(w, tid)
struct adapter_params params
__be32 aadstart_cipherstop_hi
__be32 cipherstop_lo_authinsert
uint64_t kern_tls_options
uint64_t kern_tls_partial
uint64_t kern_tls_records
uint64_t kern_tls_fin_short
unsigned short tx_modq[MAX_NCHAN]
struct ulptx_sge_pair sge[]
void t4_release_clip_entry(struct adapter *sc, struct clip_entry *ce)
struct clip_entry * t4_get_clip_entry(struct adapter *sc, struct in6_addr *in6, bool add)
#define SCMD_AUTH_MODE_NOP
#define SCMD_HMAC_CTRL_NOP
#define SCMD_PROTO_VERSION_GENERIC
#define SCMD_CIPH_MODE_AES_GCM
#define SCMD_ENCDECCTRL_ENCRYPT
#define SCMD_CIPH_MODE_AES_CTR
#define SCMD_CIPH_MODE_AES_CBC
#define LEN__SET_TCB_FIELD_ULP
int t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m, u_int nsegs, u_int available)
int cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt)
void t6_ktls_modunload(void)
int t6_ktls_parse_pkt(struct mbuf *m, int *nsegsp, int *len16p)
void t6_ktls_modload(void)
struct l2t_entry * t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst, int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
static void t4_l2t_release(struct l2t_entry *e)
int mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
#define V_CPL_TX_SEC_PDU_AADSTOP(x)
#define V_CPL_TX_SEC_PDU_CPLLEN(x)
#define V_SCMD_ENC_DEC_CTRL(x)
#define V_ULP_TXPKT_FID(x)
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(x)
#define V_CPL_TX_SEC_PDU_AUTHINSERT(x)
#define V_SCMD_CIPH_AUTH_SEQ_CTRL(x)
#define V_SCMD_HDR_LEN(x)
#define V_SCMD_IV_GEN_CTRL(x)
#define V_TXPKT_CSUM_TYPE(x)
#define V_SCMD_HMAC_CTRL(x)
#define V_CPL_TX_SEC_PDU_AADSTART(x)
#define V_SCMD_CIPH_MODE(x)
#define V_ULP_TXPKT_CHANNELID(x)
#define V_SCMD_KEY_CTX_INLINE(x)
#define V_ULP_TXPKT_RO(x)
#define V_SCMD_AADIVDROP(x)
#define V_SCMD_NUM_IVS(x)
#define V_ULP_TXPKT_DEST(x)
#define V_SCMD_AUTH_MODE(x)
#define V_CPL_TX_SEC_PDU_AUTHSTART(x)
#define V_SCMD_PROTO_VERSION(x)
#define V_CPL_TX_SEC_PDU_IVINSRTOFST(x)
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(x)
#define G_AOPEN_STATUS(x)
#define V_ULP_TX_SC_MORE(x)
#define V_SCMD_SEQ_NO_CTRL(x)
#define V_CPL_TX_SEC_PDU_PLACEHOLDER(x)
#define V_ULP_TXPKT_DATAMODIFY(x)
#define V_CPL_TX_SEC_PDU_AUTHSTOP(x)
#define V_TXPKT_IPHDR_LEN(x)
#define V_CPL_TX_SEC_PDU_CIPHERSTART(x)
#define MK_OPCODE_TID(opcode, tid)
#define V_CPL_TX_SEC_PDU_OPCODE(x)
#define V_T6_TXPKT_ETHHDR_LEN(x)
#define V_SCMD_TLS_FRAG_ENABLE(x)
#define V_SCMD_IV_SIZE(x)
static void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int)
#define V_TCB_SND_UNA_RAW(x)
#define V_TCB_SND_NXT_RAW(x)
#define M_TCB_TIMESTAMP_OFFSET
#define M_TCB_SND_UNA_RAW
#define W_TCB_TIMESTAMP_OFFSET
#define M_TCB_T_RTSEQ_RECENT
#define V_TCB_T_RTSEQ_RECENT(x)
#define V_TCB_TIMESTAMP_OFFSET(x)
#define M_TCB_SND_NXT_RAW
#define W_TCB_SND_NXT_RAW
#define W_TCB_SND_MAX_RAW
#define W_TCB_SND_UNA_RAW
#define V_TCB_SND_MAX_RAW(x)
#define V_TF_CORE_BYPASS(x)
#define W_TCB_T_RTSEQ_RECENT
#define M_TCB_SND_MAX_RAW
#define V_TF_NON_OFFLOAD(x)
#define TLS_HEADER_LENGTH
#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x)
#define F_FW_ULPTX_WR_DATA