FreeBSD kernel CXGBE device code
t4_crypto.c
Go to the documentation of this file.
1/*-
2 * Copyright (c) 2017 Chelsio Communications, Inc.
3 * Copyright (c) 2021 The FreeBSD Foundation
4 * All rights reserved.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 *
7 * Portions of this software were developed by Ararat River
8 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/types.h>
36#include <sys/bus.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/module.h>
41#include <sys/sglist.h>
42
43#include <opencrypto/cryptodev.h>
44#include <opencrypto/xform.h>
45
46#include "cryptodev_if.h"
47
48#include "common/common.h"
49#include "crypto/t4_crypto.h"
50
51/*
52 * Requests consist of:
53 *
54 * +-------------------------------+
55 * | struct fw_crypto_lookaside_wr |
56 * +-------------------------------+
57 * | struct ulp_txpkt |
58 * +-------------------------------+
59 * | struct ulptx_idata |
60 * +-------------------------------+
61 * | struct cpl_tx_sec_pdu |
62 * +-------------------------------+
63 * | struct cpl_tls_tx_scmd_fmt |
64 * +-------------------------------+
65 * | key context header |
66 * +-------------------------------+
67 * | AES key | ----- For requests with AES
68 * +-------------------------------+
69 * | Hash state | ----- For hash-only requests
70 * +-------------------------------+ -
71 * | IPAD (16-byte aligned) | \
72 * +-------------------------------+ +---- For requests with HMAC
73 * | OPAD (16-byte aligned) | /
74 * +-------------------------------+ -
75 * | GMAC H | ----- For AES-GCM
76 * +-------------------------------+ -
77 * | struct cpl_rx_phys_dsgl | \
78 * +-------------------------------+ +---- Destination buffer for
79 * | PHYS_DSGL entries | / non-hash-only requests
80 * +-------------------------------+ -
81 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
82 * +-------------------------------+
83 * | IV | ----- If immediate IV
84 * +-------------------------------+
85 * | Payload | ----- If immediate Payload
86 * +-------------------------------+ -
87 * | struct ulptx_sgl | \
88 * +-------------------------------+ +---- If payload via SGL
89 * | SGL entries | /
90 * +-------------------------------+ -
91 *
92 * Note that the key context must be padded to ensure 16-byte alignment.
93 * For HMAC requests, the key consists of the partial hash of the IPAD
94 * followed by the partial hash of the OPAD.
95 *
96 * Replies consist of:
97 *
98 * +-------------------------------+
99 * | struct cpl_fw6_pld |
100 * +-------------------------------+
101 * | hash digest | ----- For HMAC request with
102 * +-------------------------------+ 'hash_size' set in work request
103 *
104 * A 32-bit big-endian error status word is supplied in the last 4
105 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
106 * "MAC" error and bit 1 indicates a "PAD" error.
107 *
108 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
109 * in the request is returned in data[1] of the CPL_FW6_PLD message.
110 *
111 * For block cipher replies, the updated IV is supplied in data[2] and
112 * data[3] of the CPL_FW6_PLD message.
113 *
114 * For hash replies where the work request set 'hash_size' to request
115 * a copy of the hash in the reply, the hash digest is supplied
116 * immediately following the CPL_FW6_PLD message.
117 */
118
119/*
120 * The crypto engine supports a maximum AAD size of 511 bytes.
121 */
122#define MAX_AAD_LEN 511
123
124/*
125 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
126 * entries. While the CPL includes a 16-bit length field, the T6 can
127 * sometimes hang if an error occurs while processing a request with a
128 * single DSGL entry larger than 2k.
129 */
130#define MAX_RX_PHYS_DSGL_SGE 32
131#define DSGL_SGE_MAXLEN 2048
132
133/*
134 * The adapter only supports requests with a total input or output
135 * length of 64k-1 or smaller. Longer requests either result in hung
136 * requests or incorrect results.
137 */
138#define MAX_REQUEST_SIZE 65535
139
140static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
141
143 const struct auth_hash *auth_hash;
145 unsigned int partial_digest_len;
146 unsigned int auth_mode;
147 unsigned int mk_size;
149};
150
153 char ghash_h[GMAC_BLOCK_LEN];
154};
155
158};
159
161 unsigned int cipher_mode;
162 unsigned int key_len;
163 unsigned int iv_len;
167};
168
169struct ccr_port {
170 struct sge_wrq *txq;
171 struct sge_rxq *rxq;
175
176 counter_u64_t stats_queued;
177 counter_u64_t stats_completed;
178};
179
181#ifdef INVARIANTS
182 int pending;
183#endif
184 enum { HASH, HMAC, CIPHER, ETA, GCM, CCM } mode;
185 struct ccr_port *port;
186 union {
190 };
192 struct mtx lock;
193
194 /*
195 * A fallback software session is used for certain GCM/CCM
196 * requests that the hardware can't handle such as requests
197 * with only AAD and no payload.
198 */
199 crypto_session_t sw_session;
200
201 /*
202 * Pre-allocate S/G lists used when preparing a work request.
203 * 'sg_input' contains an sglist describing the entire input
204 * buffer for a 'struct cryptop'. 'sg_output' contains an
205 * sglist describing the entire output buffer. 'sg_ulptx' is
206 * used to describe the data the engine should DMA as input
207 * via ULPTX_SGL. 'sg_dsgl' is used to describe the
208 * destination that cipher text and a tag should be written
209 * to.
210 */
211 struct sglist *sg_input;
212 struct sglist *sg_output;
213 struct sglist *sg_ulptx;
214 struct sglist *sg_dsgl;
215};
216
217struct ccr_softc {
219 device_t dev;
220 uint32_t cid;
221 struct mtx lock;
226
227 /*
228 * Pre-allocate a dummy output buffer for the IV and AAD for
229 * AEAD requests.
230 */
232 struct sglist *sg_iv_aad;
233
234 /* Statistics. */
235 counter_u64_t stats_cipher_encrypt;
236 counter_u64_t stats_cipher_decrypt;
237 counter_u64_t stats_hash;
238 counter_u64_t stats_hmac;
239 counter_u64_t stats_eta_encrypt;
240 counter_u64_t stats_eta_decrypt;
241 counter_u64_t stats_gcm_encrypt;
242 counter_u64_t stats_gcm_decrypt;
243 counter_u64_t stats_ccm_encrypt;
244 counter_u64_t stats_ccm_decrypt;
245 counter_u64_t stats_wr_nomem;
246 counter_u64_t stats_inflight;
247 counter_u64_t stats_mac_error;
248 counter_u64_t stats_pad_error;
249 counter_u64_t stats_sglist_error;
250 counter_u64_t stats_process_error;
251 counter_u64_t stats_sw_fallback;
252
253 struct sysctl_ctx_list ctx;
254};
255
256/*
257 * Crypto requests involve two kind of scatter/gather lists.
258 *
259 * Non-hash-only requests require a PHYS_DSGL that describes the
260 * location to store the results of the encryption or decryption
261 * operation. This SGL uses a different format (PHYS_DSGL) and should
262 * exclude the skip bytes at the start of the data as well as any AAD
263 * or IV. For authenticated encryption requests it should include the
264 * destination of the hash or tag.
265 *
266 * The input payload may either be supplied inline as immediate data,
267 * or via a standard ULP_TX SGL. This SGL should include AAD,
268 * ciphertext, and the hash or tag for authenticated decryption
269 * requests.
270 *
271 * These scatter/gather lists can describe different subsets of the
272 * buffers described by the crypto operation. ccr_populate_sglist()
273 * generates a scatter/gather list that covers an entire crypto
274 * operation buffer that is then used to construct the other
275 * scatter/gather lists.
276 */
277static int
278ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
279{
280 int error;
281
282 sglist_reset(sg);
283 switch (cb->cb_type) {
284 case CRYPTO_BUF_MBUF:
285 error = sglist_append_mbuf(sg, cb->cb_mbuf);
286 break;
287 case CRYPTO_BUF_SINGLE_MBUF:
288 error = sglist_append_single_mbuf(sg, cb->cb_mbuf);
289 break;
290 case CRYPTO_BUF_UIO:
291 error = sglist_append_uio(sg, cb->cb_uio);
292 break;
293 case CRYPTO_BUF_CONTIG:
294 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
295 break;
296 case CRYPTO_BUF_VMPAGE:
297 error = sglist_append_vmpages(sg, cb->cb_vm_page,
298 cb->cb_vm_page_len, cb->cb_vm_page_offset);
299 break;
300 default:
301 error = EINVAL;
302 }
303 return (error);
304}
305
306/*
307 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
308 * segments.
309 */
310static int
311ccr_count_sgl(struct sglist *sg, int maxsegsize)
312{
313 int i, nsegs;
314
315 nsegs = 0;
316 for (i = 0; i < sg->sg_nseg; i++)
317 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
318 return (nsegs);
319}
320
321/* These functions deal with PHYS_DSGL for the reply buffer. */
322static inline int
324{
325 int len;
326
327 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
328 if ((nsegs % 8) != 0) {
329 len += sizeof(uint16_t) * 8;
330 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
331 }
332 return (len);
333}
334
335static void
336ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs)
337{
338 struct sglist *sg;
339 struct cpl_rx_phys_dsgl *cpl;
340 struct phys_sge_pairs *sgl;
341 vm_paddr_t paddr;
342 size_t seglen;
343 u_int i, j;
344
345 sg = s->sg_dsgl;
346 cpl = dst;
349 cpl->pcirlxorder_to_noofsgentr = htobe32(
355 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
356 cpl->rss_hdr_int.hash_val = 0;
358 sgl = (struct phys_sge_pairs *)(cpl + 1);
359 j = 0;
360 for (i = 0; i < sg->sg_nseg; i++) {
361 seglen = sg->sg_segs[i].ss_len;
362 paddr = sg->sg_segs[i].ss_paddr;
363 do {
364 sgl->addr[j] = htobe64(paddr);
365 if (seglen > DSGL_SGE_MAXLEN) {
366 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
367 paddr += DSGL_SGE_MAXLEN;
368 seglen -= DSGL_SGE_MAXLEN;
369 } else {
370 sgl->len[j] = htobe16(seglen);
371 seglen = 0;
372 }
373 j++;
374 if (j == 8) {
375 sgl++;
376 j = 0;
377 }
378 } while (seglen != 0);
379 }
380 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
381}
382
383/* These functions deal with the ULPTX_SGL for input payload. */
384static inline int
386{
387 u_int n;
388
389 nsegs--; /* first segment is part of ulptx_sgl */
390 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
391 return (roundup2(n, 16));
392}
393
394static void
395ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs)
396{
397 struct ulptx_sgl *usgl;
398 struct sglist *sg;
399 struct sglist_seg *ss;
400 int i;
401
402 sg = s->sg_ulptx;
403 MPASS(nsegs == sg->sg_nseg);
404 ss = &sg->sg_segs[0];
405 usgl = dst;
406 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
407 V_ULPTX_NSGE(nsegs));
408 usgl->len0 = htobe32(ss->ss_len);
409 usgl->addr0 = htobe64(ss->ss_paddr);
410 ss++;
411 for (i = 0; i < sg->sg_nseg - 1; i++) {
412 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
413 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
414 ss++;
415 }
416}
417
418static bool
419ccr_use_imm_data(u_int transhdr_len, u_int input_len)
420{
421
422 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
423 return (false);
424 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
426 return (false);
427 return (true);
428}
429
430static void
432 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len,
433 u_int sgl_len, u_int hash_size, struct cryptop *crp)
434{
435 u_int cctx_size, idata_len;
436
437 cctx_size = sizeof(struct _key_ctx) + kctx_len;
438 crwr->wreq.op_to_cctx_size = htobe32(
443 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
444 crwr->wreq.len16_pkd = htobe32(
445 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
446 crwr->wreq.session_id = 0;
447 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
453 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | /* unused in firmware */
455 crwr->wreq.key_addr = 0;
456 crwr->wreq.pld_size_hash_size = htobe32(
459 crwr->wreq.cookie = htobe64((uintptr_t)crp);
460
461 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
466 crwr->ulptx.len = htobe32(
467 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
468
469 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
470 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
471 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
472 if (imm_len % 16 != 0)
473 idata_len -= 16 - imm_len % 16;
474 crwr->sc_imm.len = htobe32(idata_len);
475}
476
477static int
478ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
479{
480 struct chcr_wr *crwr;
481 struct wrqe *wr;
482 const struct auth_hash *axf;
483 char *dst;
484 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
485 u_int hmac_ctrl, imm_len, iopad_size;
486 int error, sgl_nsegs, sgl_len, use_opad;
487
488 /* Reject requests with too large of an input buffer. */
489 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
490 return (EFBIG);
491
492 axf = s->hmac.auth_hash;
493
494 if (s->mode == HMAC) {
495 use_opad = 1;
496 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
497 } else {
498 use_opad = 0;
499 hmac_ctrl = SCMD_HMAC_CTRL_NOP;
500 }
501
502 /* PADs must be 128-bit aligned. */
503 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
504
505 /*
506 * The 'key' part of the context includes the aligned IPAD and
507 * OPAD.
508 */
509 kctx_len = iopad_size;
510 if (use_opad)
511 kctx_len += iopad_size;
512 hash_size_in_response = axf->hashsize;
513 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
514
515 if (crp->crp_payload_length == 0) {
516 imm_len = axf->blocksize;
517 sgl_nsegs = 0;
518 sgl_len = 0;
519 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
520 imm_len = crp->crp_payload_length;
521 sgl_nsegs = 0;
522 sgl_len = 0;
523 } else {
524 imm_len = 0;
525 sglist_reset(s->sg_ulptx);
526 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
527 crp->crp_payload_start, crp->crp_payload_length);
528 if (error)
529 return (error);
530 sgl_nsegs = s->sg_ulptx->sg_nseg;
531 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
532 }
533
534 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
535 if (wr_len > SGE_MAX_WR_LEN)
536 return (EFBIG);
537 wr = alloc_wrqe(wr_len, s->port->txq);
538 if (wr == NULL) {
539 counter_u64_add(sc->stats_wr_nomem, 1);
540 return (ENOMEM);
541 }
542 crwr = wrtod(wr);
543 memset(crwr, 0, wr_len);
544
545 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
546 hash_size_in_response, crp);
547
548 crwr->sec_cpl.op_ivinsrtofst = htobe32(
554
555 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
556 axf->blocksize : crp->crp_payload_length);
557
558 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
560
561 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
562 crwr->sec_cpl.seqno_numivs = htobe32(
567 V_SCMD_HMAC_CTRL(hmac_ctrl));
568 crwr->sec_cpl.ivgen_hdrlen = htobe32(
570 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
571 V_SCMD_MAC_ONLY(1));
572
573 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
574
575 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
576 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
577 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
582
583 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
584 if (crp->crp_payload_length == 0) {
585 dst[0] = 0x80;
586 if (s->mode == HMAC)
587 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
588 htobe64(axf->blocksize << 3);
589 } else if (imm_len != 0)
590 crypto_copydata(crp, crp->crp_payload_start,
591 crp->crp_payload_length, dst);
592 else
593 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
594
595 /* XXX: TODO backpressure */
596 t4_wrq_tx(sc->adapter, wr);
597
598 return (0);
599}
600
601static int
602ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
603 const struct cpl_fw6_pld *cpl, int error)
604{
605 uint8_t hash[HASH_MAX_LEN];
606
607 if (error)
608 return (error);
609
610 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
611 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
612 hash);
613 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
614 return (EBADMSG);
615 } else
616 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
617 (cpl + 1));
618 return (0);
619}
620
621static int
622ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
623{
624 char iv[CHCR_MAX_CRYPTO_IV_LEN];
625 struct chcr_wr *crwr;
626 struct wrqe *wr;
627 char *dst;
628 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
629 u_int imm_len, iv_len;
630 int dsgl_nsegs, dsgl_len;
631 int sgl_nsegs, sgl_len;
632 int error;
633
634 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
635 return (EINVAL);
637 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
638 return (EINVAL);
639
640 /* Reject requests with too large of an input buffer. */
641 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
642 return (EFBIG);
643
644 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
645 op_type = CHCR_ENCRYPT_OP;
646 else
647 op_type = CHCR_DECRYPT_OP;
648
649 sglist_reset(s->sg_dsgl);
650 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
651 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
652 crp->crp_payload_output_start, crp->crp_payload_length);
653 else
654 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
655 crp->crp_payload_start, crp->crp_payload_length);
656 if (error)
657 return (error);
658 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
659 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
660 return (EFBIG);
661 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
662
663 /* The 'key' must be 128-bit aligned. */
664 kctx_len = roundup2(s->cipher.key_len, 16);
665 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
666
667 /* For AES-XTS we send a 16-byte IV in the work request. */
669 iv_len = AES_BLOCK_LEN;
670 else
671 iv_len = s->cipher.iv_len;
672
673 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
674 imm_len = crp->crp_payload_length;
675 sgl_nsegs = 0;
676 sgl_len = 0;
677 } else {
678 imm_len = 0;
679 sglist_reset(s->sg_ulptx);
680 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
681 crp->crp_payload_start, crp->crp_payload_length);
682 if (error)
683 return (error);
684 sgl_nsegs = s->sg_ulptx->sg_nseg;
685 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
686 }
687
688 wr_len = roundup2(transhdr_len, 16) + iv_len +
689 roundup2(imm_len, 16) + sgl_len;
691 return (EFBIG);
692 wr = alloc_wrqe(wr_len, s->port->txq);
693 if (wr == NULL) {
694 counter_u64_add(sc->stats_wr_nomem, 1);
695 return (ENOMEM);
696 }
697 crwr = wrtod(wr);
698 memset(crwr, 0, wr_len);
699
700 crypto_read_iv(crp, iv);
701
702 /* Zero the remainder of the IV for AES-XTS. */
703 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
704
705 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
706 crp);
707
708 crwr->sec_cpl.op_ivinsrtofst = htobe32(
714
715 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
716
717 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
718 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
720 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
722
723 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
724 crwr->sec_cpl.seqno_numivs = htobe32(
727 V_SCMD_ENC_DEC_CTRL(op_type) |
731 V_SCMD_IV_SIZE(iv_len / 2) |
732 V_SCMD_NUM_IVS(0));
733 crwr->sec_cpl.ivgen_hdrlen = htobe32(
736 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
737
739 switch (s->cipher.cipher_mode) {
741 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
742 memcpy(crwr->key_ctx.key, s->cipher.enckey,
743 s->cipher.key_len);
744 else
745 memcpy(crwr->key_ctx.key, s->cipher.deckey,
746 s->cipher.key_len);
747 break;
749 memcpy(crwr->key_ctx.key, s->cipher.enckey,
750 s->cipher.key_len);
751 break;
753 key_half = s->cipher.key_len / 2;
754 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
755 key_half);
756 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
757 memcpy(crwr->key_ctx.key + key_half,
758 s->cipher.enckey, key_half);
759 else
760 memcpy(crwr->key_ctx.key + key_half,
761 s->cipher.deckey, key_half);
762 break;
763 }
764
765 dst = (char *)(crwr + 1) + kctx_len;
766 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
767 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
768 memcpy(dst, iv, iv_len);
769 dst += iv_len;
770 if (imm_len != 0)
771 crypto_copydata(crp, crp->crp_payload_start,
772 crp->crp_payload_length, dst);
773 else
774 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
775
776 /* XXX: TODO backpressure */
777 t4_wrq_tx(sc->adapter, wr);
778
779 explicit_bzero(iv, sizeof(iv));
780 return (0);
781}
782
783static int
785 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
786{
787
788 /*
789 * The updated IV to permit chained requests is at
790 * cpl->data[2], but OCF doesn't permit chained requests.
791 */
792 return (error);
793}
794
795/*
796 * 'hashsize' is the length of a full digest. 'authsize' is the
797 * requested digest length for this operation which may be less
798 * than 'hashsize'.
799 */
800static int
801ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
802{
803
804 if (authsize == 10)
806 if (authsize == 12)
808 if (authsize == hashsize / 2)
809 return (SCMD_HMAC_CTRL_DIV2);
811}
812
813static int
814ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
815{
816 char iv[CHCR_MAX_CRYPTO_IV_LEN];
817 struct chcr_wr *crwr;
818 struct wrqe *wr;
819 const struct auth_hash *axf;
820 char *dst;
821 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
822 u_int hash_size_in_response, imm_len, iopad_size, iv_len;
823 u_int aad_start, aad_stop;
824 u_int auth_insert;
825 u_int cipher_start, cipher_stop;
826 u_int hmac_ctrl, input_len;
827 int dsgl_nsegs, dsgl_len;
828 int sgl_nsegs, sgl_len;
829 int error;
830
831 /*
832 * If there is a need in the future, requests with an empty
833 * payload could be supported as HMAC-only requests.
834 */
835 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
836 return (EINVAL);
838 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
839 return (EINVAL);
840
841 /* For AES-XTS we send a 16-byte IV in the work request. */
843 iv_len = AES_BLOCK_LEN;
844 else
845 iv_len = s->cipher.iv_len;
846
847 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
848 return (EINVAL);
849
850 axf = s->hmac.auth_hash;
851 hash_size_in_response = s->hmac.hash_len;
852 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
853 op_type = CHCR_ENCRYPT_OP;
854 else
855 op_type = CHCR_DECRYPT_OP;
856
857 /*
858 * The output buffer consists of the cipher text followed by
859 * the hash when encrypting. For decryption it only contains
860 * the plain text.
861 *
862 * Due to a firmware bug, the output buffer must include a
863 * dummy output buffer for the IV and AAD prior to the real
864 * output buffer.
865 */
866 if (op_type == CHCR_ENCRYPT_OP) {
867 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
868 hash_size_in_response > MAX_REQUEST_SIZE)
869 return (EFBIG);
870 } else {
871 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
873 return (EFBIG);
874 }
875 sglist_reset(s->sg_dsgl);
876 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0,
877 iv_len + crp->crp_aad_length);
878 if (error)
879 return (error);
880 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
881 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
882 crp->crp_payload_output_start, crp->crp_payload_length);
883 else
884 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
885 crp->crp_payload_start, crp->crp_payload_length);
886 if (error)
887 return (error);
888 if (op_type == CHCR_ENCRYPT_OP) {
889 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
890 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
891 crp->crp_digest_start, hash_size_in_response);
892 else
893 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
894 crp->crp_digest_start, hash_size_in_response);
895 if (error)
896 return (error);
897 }
898 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
899 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
900 return (EFBIG);
901 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
902
903 /* PADs must be 128-bit aligned. */
904 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
905
906 /*
907 * The 'key' part of the key context consists of the key followed
908 * by the IPAD and OPAD.
909 */
910 kctx_len = roundup2(s->cipher.key_len, 16) + iopad_size * 2;
911 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
912
913 /*
914 * The input buffer consists of the IV, any AAD, and then the
915 * cipher/plain text. For decryption requests the hash is
916 * appended after the cipher text.
917 *
918 * The IV is always stored at the start of the input buffer
919 * even though it may be duplicated in the payload. The
920 * crypto engine doesn't work properly if the IV offset points
921 * inside of the AAD region, so a second copy is always
922 * required.
923 */
924 input_len = crp->crp_aad_length + crp->crp_payload_length;
925
926 /*
927 * The firmware hangs if sent a request which is a
928 * bit smaller than MAX_REQUEST_SIZE. In particular, the
929 * firmware appears to require 512 - 16 bytes of spare room
930 * along with the size of the hash even if the hash isn't
931 * included in the input buffer.
932 */
933 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
935 return (EFBIG);
936 if (op_type == CHCR_DECRYPT_OP)
937 input_len += hash_size_in_response;
938
939 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
940 imm_len = input_len;
941 sgl_nsegs = 0;
942 sgl_len = 0;
943 } else {
944 imm_len = 0;
945 sglist_reset(s->sg_ulptx);
946 if (crp->crp_aad_length != 0) {
947 if (crp->crp_aad != NULL)
948 error = sglist_append(s->sg_ulptx,
949 crp->crp_aad, crp->crp_aad_length);
950 else
951 error = sglist_append_sglist(s->sg_ulptx,
952 s->sg_input, crp->crp_aad_start,
953 crp->crp_aad_length);
954 if (error)
955 return (error);
956 }
957 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
958 crp->crp_payload_start, crp->crp_payload_length);
959 if (error)
960 return (error);
961 if (op_type == CHCR_DECRYPT_OP) {
962 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
963 crp->crp_digest_start, hash_size_in_response);
964 if (error)
965 return (error);
966 }
967 sgl_nsegs = s->sg_ulptx->sg_nseg;
968 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
969 }
970
971 /* Any AAD comes after the IV. */
972 if (crp->crp_aad_length != 0) {
973 aad_start = iv_len + 1;
974 aad_stop = aad_start + crp->crp_aad_length - 1;
975 } else {
976 aad_start = 0;
977 aad_stop = 0;
978 }
979 cipher_start = iv_len + crp->crp_aad_length + 1;
980 if (op_type == CHCR_DECRYPT_OP)
981 cipher_stop = hash_size_in_response;
982 else
983 cipher_stop = 0;
984 if (op_type == CHCR_DECRYPT_OP)
985 auth_insert = hash_size_in_response;
986 else
987 auth_insert = 0;
988
989 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
990 sgl_len;
991 if (wr_len > SGE_MAX_WR_LEN)
992 return (EFBIG);
993 wr = alloc_wrqe(wr_len, s->port->txq);
994 if (wr == NULL) {
995 counter_u64_add(sc->stats_wr_nomem, 1);
996 return (ENOMEM);
997 }
998 crwr = wrtod(wr);
999 memset(crwr, 0, wr_len);
1000
1001 crypto_read_iv(crp, iv);
1002
1003 /* Zero the remainder of the IV for AES-XTS. */
1004 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
1005
1006 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
1007 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
1008
1009 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1015
1016 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1017
1018 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1019 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1020 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1021 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1022 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1023 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1024 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1025 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1026 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1027 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1028
1029 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1030 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1031 crwr->sec_cpl.seqno_numivs = htobe32(
1034 V_SCMD_ENC_DEC_CTRL(op_type) |
1035 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1038 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1039 V_SCMD_IV_SIZE(iv_len / 2) |
1040 V_SCMD_NUM_IVS(0));
1041 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1044 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1045
1046 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1047 switch (s->cipher.cipher_mode) {
1049 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1050 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1051 s->cipher.key_len);
1052 else
1053 memcpy(crwr->key_ctx.key, s->cipher.deckey,
1054 s->cipher.key_len);
1055 break;
1057 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1058 s->cipher.key_len);
1059 break;
1061 key_half = s->cipher.key_len / 2;
1062 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
1063 key_half);
1064 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1065 memcpy(crwr->key_ctx.key + key_half,
1066 s->cipher.enckey, key_half);
1067 else
1068 memcpy(crwr->key_ctx.key + key_half,
1069 s->cipher.deckey, key_half);
1070 break;
1071 }
1072
1073 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1074 memcpy(dst, s->hmac.pads, iopad_size * 2);
1075
1076 dst = (char *)(crwr + 1) + kctx_len;
1077 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1078 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1079 memcpy(dst, iv, iv_len);
1080 dst += iv_len;
1081 if (imm_len != 0) {
1082 if (crp->crp_aad_length != 0) {
1083 if (crp->crp_aad != NULL)
1084 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1085 else
1086 crypto_copydata(crp, crp->crp_aad_start,
1087 crp->crp_aad_length, dst);
1088 dst += crp->crp_aad_length;
1089 }
1090 crypto_copydata(crp, crp->crp_payload_start,
1091 crp->crp_payload_length, dst);
1092 dst += crp->crp_payload_length;
1093 if (op_type == CHCR_DECRYPT_OP)
1094 crypto_copydata(crp, crp->crp_digest_start,
1095 hash_size_in_response, dst);
1096 } else
1097 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1098
1099 /* XXX: TODO backpressure */
1100 t4_wrq_tx(sc->adapter, wr);
1101
1102 explicit_bzero(iv, sizeof(iv));
1103 return (0);
1104}
1105
1106static int
1107ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
1108 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1109{
1110
1111 /*
1112 * The updated IV to permit chained requests is at
1113 * cpl->data[2], but OCF doesn't permit chained requests.
1114 */
1115 return (error);
1116}
1117
1118static int
1119ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1120{
1121 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1122 struct chcr_wr *crwr;
1123 struct wrqe *wr;
1124 char *dst;
1125 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1126 u_int hash_size_in_response, imm_len;
1127 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1128 u_int hmac_ctrl, input_len;
1129 int dsgl_nsegs, dsgl_len;
1130 int sgl_nsegs, sgl_len;
1131 int error;
1132
1133 if (s->cipher.key_len == 0)
1134 return (EINVAL);
1135
1136 /*
1137 * The crypto engine doesn't handle GCM requests with an empty
1138 * payload, so handle those in software instead.
1139 */
1140 if (crp->crp_payload_length == 0)
1141 return (EMSGSIZE);
1142
1143 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1144 return (EMSGSIZE);
1145
1146 hash_size_in_response = s->gmac.hash_len;
1147 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1148 op_type = CHCR_ENCRYPT_OP;
1149 else
1150 op_type = CHCR_DECRYPT_OP;
1151
1152 iv_len = AES_BLOCK_LEN;
1153
1154 /*
1155 * GCM requests should always provide an explicit IV.
1156 */
1157 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1158 return (EINVAL);
1159
1160 /*
1161 * The output buffer consists of the cipher text followed by
1162 * the tag when encrypting. For decryption it only contains
1163 * the plain text.
1164 *
1165 * Due to a firmware bug, the output buffer must include a
1166 * dummy output buffer for the IV and AAD prior to the real
1167 * output buffer.
1168 */
1169 if (op_type == CHCR_ENCRYPT_OP) {
1170 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1171 hash_size_in_response > MAX_REQUEST_SIZE)
1172 return (EFBIG);
1173 } else {
1174 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1176 return (EFBIG);
1177 }
1178 sglist_reset(s->sg_dsgl);
1179 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1180 crp->crp_aad_length);
1181 if (error)
1182 return (error);
1183 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1184 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1185 crp->crp_payload_output_start, crp->crp_payload_length);
1186 else
1187 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1188 crp->crp_payload_start, crp->crp_payload_length);
1189 if (error)
1190 return (error);
1191 if (op_type == CHCR_ENCRYPT_OP) {
1192 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1193 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1194 crp->crp_digest_start, hash_size_in_response);
1195 else
1196 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1197 crp->crp_digest_start, hash_size_in_response);
1198 if (error)
1199 return (error);
1200 }
1201 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1202 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1203 return (EFBIG);
1204 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1205
1206 /*
1207 * The 'key' part of the key context consists of the key followed
1208 * by the Galois hash key.
1209 */
1210 kctx_len = roundup2(s->cipher.key_len, 16) + GMAC_BLOCK_LEN;
1211 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1212
1213 /*
1214 * The input buffer consists of the IV, any AAD, and then the
1215 * cipher/plain text. For decryption requests the hash is
1216 * appended after the cipher text.
1217 *
1218 * The IV is always stored at the start of the input buffer
1219 * even though it may be duplicated in the payload. The
1220 * crypto engine doesn't work properly if the IV offset points
1221 * inside of the AAD region, so a second copy is always
1222 * required.
1223 */
1224 input_len = crp->crp_aad_length + crp->crp_payload_length;
1225 if (op_type == CHCR_DECRYPT_OP)
1226 input_len += hash_size_in_response;
1227 if (input_len > MAX_REQUEST_SIZE)
1228 return (EFBIG);
1229 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1230 imm_len = input_len;
1231 sgl_nsegs = 0;
1232 sgl_len = 0;
1233 } else {
1234 imm_len = 0;
1235 sglist_reset(s->sg_ulptx);
1236 if (crp->crp_aad_length != 0) {
1237 if (crp->crp_aad != NULL)
1238 error = sglist_append(s->sg_ulptx,
1239 crp->crp_aad, crp->crp_aad_length);
1240 else
1241 error = sglist_append_sglist(s->sg_ulptx,
1242 s->sg_input, crp->crp_aad_start,
1243 crp->crp_aad_length);
1244 if (error)
1245 return (error);
1246 }
1247 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1248 crp->crp_payload_start, crp->crp_payload_length);
1249 if (error)
1250 return (error);
1251 if (op_type == CHCR_DECRYPT_OP) {
1252 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1253 crp->crp_digest_start, hash_size_in_response);
1254 if (error)
1255 return (error);
1256 }
1257 sgl_nsegs = s->sg_ulptx->sg_nseg;
1258 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1259 }
1260
1261 if (crp->crp_aad_length != 0) {
1262 aad_start = iv_len + 1;
1263 aad_stop = aad_start + crp->crp_aad_length - 1;
1264 } else {
1265 aad_start = 0;
1266 aad_stop = 0;
1267 }
1268 cipher_start = iv_len + crp->crp_aad_length + 1;
1269 if (op_type == CHCR_DECRYPT_OP)
1270 cipher_stop = hash_size_in_response;
1271 else
1272 cipher_stop = 0;
1273 if (op_type == CHCR_DECRYPT_OP)
1274 auth_insert = hash_size_in_response;
1275 else
1276 auth_insert = 0;
1277
1278 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1279 sgl_len;
1280 if (wr_len > SGE_MAX_WR_LEN)
1281 return (EFBIG);
1282 wr = alloc_wrqe(wr_len, s->port->txq);
1283 if (wr == NULL) {
1284 counter_u64_add(sc->stats_wr_nomem, 1);
1285 return (ENOMEM);
1286 }
1287 crwr = wrtod(wr);
1288 memset(crwr, 0, wr_len);
1289
1290 crypto_read_iv(crp, iv);
1291 *(uint32_t *)&iv[12] = htobe32(1);
1292
1293 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1294 crp);
1295
1296 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1302
1303 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1304
1305 /*
1306 * NB: cipherstop is explicitly set to 0. On encrypt it
1307 * should normally be set to 0 anyway. However, for decrypt
1308 * the cipher ends before the tag in the ETA case (and
1309 * authstop is set to stop before the tag), but for GCM the
1310 * cipher still runs to the end of the buffer. Not sure if
1311 * this is intentional or a firmware quirk, but it is required
1312 * for working tag validation with GCM decryption.
1313 */
1314 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1315 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1316 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1317 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1319 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1321 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1322 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1323 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1324
1325 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1326 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1327 crwr->sec_cpl.seqno_numivs = htobe32(
1330 V_SCMD_ENC_DEC_CTRL(op_type) |
1331 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1334 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1335 V_SCMD_IV_SIZE(iv_len / 2) |
1336 V_SCMD_NUM_IVS(0));
1337 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1340 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1341
1342 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1343 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1344 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1345 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1346
1347 dst = (char *)(crwr + 1) + kctx_len;
1348 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1349 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1350 memcpy(dst, iv, iv_len);
1351 dst += iv_len;
1352 if (imm_len != 0) {
1353 if (crp->crp_aad_length != 0) {
1354 if (crp->crp_aad != NULL)
1355 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1356 else
1357 crypto_copydata(crp, crp->crp_aad_start,
1358 crp->crp_aad_length, dst);
1359 dst += crp->crp_aad_length;
1360 }
1361 crypto_copydata(crp, crp->crp_payload_start,
1362 crp->crp_payload_length, dst);
1363 dst += crp->crp_payload_length;
1364 if (op_type == CHCR_DECRYPT_OP)
1365 crypto_copydata(crp, crp->crp_digest_start,
1366 hash_size_in_response, dst);
1367 } else
1368 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1369
1370 /* XXX: TODO backpressure */
1371 t4_wrq_tx(sc->adapter, wr);
1372
1373 explicit_bzero(iv, sizeof(iv));
1374 return (0);
1375}
1376
1377static int
1378ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1379 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1380{
1381
1382 /*
1383 * The updated IV to permit chained requests is at
1384 * cpl->data[2], but OCF doesn't permit chained requests.
1385 *
1386 * Note that the hardware should always verify the GMAC hash.
1387 */
1388 return (error);
1389}
1390
1391static int
1392ccr_ccm_hmac_ctrl(unsigned int authsize)
1393{
1394 switch (authsize) {
1395 case 4:
1396 return (SCMD_HMAC_CTRL_PL1);
1397 case 6:
1398 return (SCMD_HMAC_CTRL_PL2);
1399 case 8:
1400 return (SCMD_HMAC_CTRL_DIV2);
1401 case 10:
1403 case 12:
1405 case 14:
1406 return (SCMD_HMAC_CTRL_PL3);
1407 case 16:
1408 return (SCMD_HMAC_CTRL_NO_TRUNC);
1409 default:
1410 __assert_unreachable();
1411 }
1412}
1413
1414static void
1415generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
1416 const char *iv, char *b0)
1417{
1418 u_int i, payload_len, L;
1419
1420 /* NB: L is already set in the first byte of the IV. */
1421 memcpy(b0, iv, CCM_B0_SIZE);
1422 L = iv[0] + 1;
1423
1424 /* Set length of hash in bits 3 - 5. */
1425 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1426
1427 /* Store the payload length as a big-endian value. */
1428 payload_len = crp->crp_payload_length;
1429 for (i = 0; i < L; i++) {
1430 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1431 payload_len >>= 8;
1432 }
1433
1434 /*
1435 * If there is AAD in the request, set bit 6 in the flags
1436 * field and store the AAD length as a big-endian value at the
1437 * start of block 1. This only assumes a 16-bit AAD length
1438 * since T6 doesn't support large AAD sizes.
1439 */
1440 if (crp->crp_aad_length != 0) {
1441 b0[0] |= (1 << 6);
1442 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1443 }
1444}
1445
1446static int
1447ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1448{
1449 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1450 const struct crypto_session_params *csp;
1451 struct ulptx_idata *idata;
1452 struct chcr_wr *crwr;
1453 struct wrqe *wr;
1454 char *dst;
1455 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1456 u_int aad_len, b0_len, hash_size_in_response, imm_len;
1457 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1458 u_int hmac_ctrl, input_len;
1459 int dsgl_nsegs, dsgl_len;
1460 int sgl_nsegs, sgl_len;
1461 int error;
1462
1463 csp = crypto_get_params(crp->crp_session);
1464
1465 if (s->cipher.key_len == 0)
1466 return (EINVAL);
1467
1468 /*
1469 * The crypto engine doesn't handle CCM requests with an empty
1470 * payload, so handle those in software instead.
1471 */
1472 if (crp->crp_payload_length == 0)
1473 return (EMSGSIZE);
1474
1475 /* The length has to fit within the length field in block 0. */
1476 if (crp->crp_payload_length > ccm_max_payload_length(csp))
1477 return (EMSGSIZE);
1478
1479 /*
1480 * CCM always includes block 0 in the AAD before AAD from the
1481 * request.
1482 */
1483 b0_len = CCM_B0_SIZE;
1484 if (crp->crp_aad_length != 0)
1485 b0_len += CCM_AAD_FIELD_SIZE;
1486 aad_len = b0_len + crp->crp_aad_length;
1487
1488 /*
1489 * CCM requests should always provide an explicit IV (really
1490 * the nonce).
1491 */
1492 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1493 return (EINVAL);
1494
1495 /*
1496 * The IV in the work request is 16 bytes and not just the
1497 * nonce.
1498 */
1499 iv_len = AES_BLOCK_LEN;
1500
1501 if (iv_len + aad_len > MAX_AAD_LEN)
1502 return (EMSGSIZE);
1503
1504 hash_size_in_response = s->ccm_mac.hash_len;
1505 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1506 op_type = CHCR_ENCRYPT_OP;
1507 else
1508 op_type = CHCR_DECRYPT_OP;
1509
1510 /*
1511 * The output buffer consists of the cipher text followed by
1512 * the tag when encrypting. For decryption it only contains
1513 * the plain text.
1514 *
1515 * Due to a firmware bug, the output buffer must include a
1516 * dummy output buffer for the IV and AAD prior to the real
1517 * output buffer.
1518 */
1519 if (op_type == CHCR_ENCRYPT_OP) {
1520 if (iv_len + aad_len + crp->crp_payload_length +
1521 hash_size_in_response > MAX_REQUEST_SIZE)
1522 return (EFBIG);
1523 } else {
1524 if (iv_len + aad_len + crp->crp_payload_length >
1526 return (EFBIG);
1527 }
1528 sglist_reset(s->sg_dsgl);
1529 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1530 aad_len);
1531 if (error)
1532 return (error);
1533 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1534 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1535 crp->crp_payload_output_start, crp->crp_payload_length);
1536 else
1537 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1538 crp->crp_payload_start, crp->crp_payload_length);
1539 if (error)
1540 return (error);
1541 if (op_type == CHCR_ENCRYPT_OP) {
1542 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1543 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1544 crp->crp_digest_start, hash_size_in_response);
1545 else
1546 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1547 crp->crp_digest_start, hash_size_in_response);
1548 if (error)
1549 return (error);
1550 }
1551 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1552 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1553 return (EFBIG);
1554 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1555
1556 /*
1557 * The 'key' part of the key context consists of two copies of
1558 * the AES key.
1559 */
1560 kctx_len = roundup2(s->cipher.key_len, 16) * 2;
1561 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1562
1563 /*
1564 * The input buffer consists of the IV, AAD (including block
1565 * 0), and then the cipher/plain text. For decryption
1566 * requests the hash is appended after the cipher text.
1567 *
1568 * The IV is always stored at the start of the input buffer
1569 * even though it may be duplicated in the payload. The
1570 * crypto engine doesn't work properly if the IV offset points
1571 * inside of the AAD region, so a second copy is always
1572 * required.
1573 */
1574 input_len = aad_len + crp->crp_payload_length;
1575 if (op_type == CHCR_DECRYPT_OP)
1576 input_len += hash_size_in_response;
1577 if (input_len > MAX_REQUEST_SIZE)
1578 return (EFBIG);
1579 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1580 imm_len = input_len;
1581 sgl_nsegs = 0;
1582 sgl_len = 0;
1583 } else {
1584 /* Block 0 is passed as immediate data. */
1585 imm_len = b0_len;
1586
1587 sglist_reset(s->sg_ulptx);
1588 if (crp->crp_aad_length != 0) {
1589 if (crp->crp_aad != NULL)
1590 error = sglist_append(s->sg_ulptx,
1591 crp->crp_aad, crp->crp_aad_length);
1592 else
1593 error = sglist_append_sglist(s->sg_ulptx,
1594 s->sg_input, crp->crp_aad_start,
1595 crp->crp_aad_length);
1596 if (error)
1597 return (error);
1598 }
1599 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1600 crp->crp_payload_start, crp->crp_payload_length);
1601 if (error)
1602 return (error);
1603 if (op_type == CHCR_DECRYPT_OP) {
1604 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1605 crp->crp_digest_start, hash_size_in_response);
1606 if (error)
1607 return (error);
1608 }
1609 sgl_nsegs = s->sg_ulptx->sg_nseg;
1610 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1611 }
1612
1613 aad_start = iv_len + 1;
1614 aad_stop = aad_start + aad_len - 1;
1615 cipher_start = aad_stop + 1;
1616 if (op_type == CHCR_DECRYPT_OP)
1617 cipher_stop = hash_size_in_response;
1618 else
1619 cipher_stop = 0;
1620 if (op_type == CHCR_DECRYPT_OP)
1621 auth_insert = hash_size_in_response;
1622 else
1623 auth_insert = 0;
1624
1625 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1626 sgl_len;
1627 if (wr_len > SGE_MAX_WR_LEN)
1628 return (EFBIG);
1629 wr = alloc_wrqe(wr_len, s->port->txq);
1630 if (wr == NULL) {
1631 counter_u64_add(sc->stats_wr_nomem, 1);
1632 return (ENOMEM);
1633 }
1634 crwr = wrtod(wr);
1635 memset(crwr, 0, wr_len);
1636
1637 /*
1638 * Read the nonce from the request. Use the nonce to generate
1639 * the full IV with the counter set to 0.
1640 */
1641 memset(iv, 0, iv_len);
1642 iv[0] = (15 - csp->csp_ivlen) - 1;
1643 crypto_read_iv(crp, iv + 1);
1644
1645 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1646 crp);
1647
1648 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1654
1655 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1656
1657 /*
1658 * NB: cipherstop is explicitly set to 0. See comments above
1659 * in ccr_gcm().
1660 */
1661 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1662 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1663 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1664 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1666 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1668 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1669 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1670 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1671
1672 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1673 hmac_ctrl = ccr_ccm_hmac_ctrl(hash_size_in_response);
1674 crwr->sec_cpl.seqno_numivs = htobe32(
1677 V_SCMD_ENC_DEC_CTRL(op_type) |
1678 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1681 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1682 V_SCMD_IV_SIZE(iv_len / 2) |
1683 V_SCMD_NUM_IVS(0));
1684 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1687 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1688
1689 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1690 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1691 memcpy(crwr->key_ctx.key + roundup(s->cipher.key_len, 16),
1692 s->cipher.enckey, s->cipher.key_len);
1693
1694 dst = (char *)(crwr + 1) + kctx_len;
1695 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1696 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1697 memcpy(dst, iv, iv_len);
1698 dst += iv_len;
1699 generate_ccm_b0(crp, hash_size_in_response, iv, dst);
1700 if (sgl_nsegs == 0) {
1701 dst += b0_len;
1702 if (crp->crp_aad_length != 0) {
1703 if (crp->crp_aad != NULL)
1704 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1705 else
1706 crypto_copydata(crp, crp->crp_aad_start,
1707 crp->crp_aad_length, dst);
1708 dst += crp->crp_aad_length;
1709 }
1710 crypto_copydata(crp, crp->crp_payload_start,
1711 crp->crp_payload_length, dst);
1712 dst += crp->crp_payload_length;
1713 if (op_type == CHCR_DECRYPT_OP)
1714 crypto_copydata(crp, crp->crp_digest_start,
1715 hash_size_in_response, dst);
1716 } else {
1717 dst += CCM_B0_SIZE;
1718 if (b0_len > CCM_B0_SIZE) {
1719 /*
1720 * If there is AAD, insert padding including a
1721 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1722 * is 16-byte aligned.
1723 */
1724 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1725 ("b0_len mismatch"));
1726 memset(dst + CCM_AAD_FIELD_SIZE, 0,
1727 8 - CCM_AAD_FIELD_SIZE);
1728 idata = (void *)(dst + 8);
1729 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1730 idata->len = htobe32(0);
1731 dst = (void *)(idata + 1);
1732 }
1733 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1734 }
1735
1736 /* XXX: TODO backpressure */
1737 t4_wrq_tx(sc->adapter, wr);
1738
1739 explicit_bzero(iv, sizeof(iv));
1740 return (0);
1741}
1742
1743static int
1744ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1745 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1746{
1747
1748 /*
1749 * The updated IV to permit chained requests is at
1750 * cpl->data[2], but OCF doesn't permit chained requests.
1751 *
1752 * Note that the hardware should always verify the CBC MAC
1753 * hash.
1754 */
1755 return (error);
1756}
1757
1758/*
1759 * Use the software session for requests not supported by the crypto
1760 * engine (e.g. CCM and GCM requests with an empty payload).
1761 */
1762static int
1763ccr_soft_done(struct cryptop *crp)
1764{
1765 struct cryptop *orig;
1766
1767 orig = crp->crp_opaque;
1768 orig->crp_etype = crp->crp_etype;
1769 crypto_freereq(crp);
1770 crypto_done(orig);
1771 return (0);
1772}
1773
1774static void
1775ccr_soft(struct ccr_session *s, struct cryptop *crp)
1776{
1777 struct cryptop *new;
1778 int error;
1779
1780 new = crypto_clonereq(crp, s->sw_session, M_NOWAIT);
1781 if (new == NULL) {
1782 crp->crp_etype = ENOMEM;
1783 crypto_done(crp);
1784 return;
1785 }
1786
1787 /*
1788 * XXX: This only really needs CRYPTO_ASYNC_ORDERED if the
1789 * original request was dispatched that way. There is no way
1790 * to know that though since crypto_dispatch_async() discards
1791 * the flag for async backends (such as ccr(4)).
1792 */
1793 new->crp_opaque = crp;
1794 new->crp_callback = ccr_soft_done;
1795 error = crypto_dispatch_async(new, CRYPTO_ASYNC_ORDERED);
1796 if (error != 0) {
1797 crp->crp_etype = error;
1798 crypto_done(crp);
1799 }
1800}
1801
1802static void
1803ccr_identify(driver_t *driver, device_t parent)
1804{
1805 struct adapter *sc;
1806
1807 sc = device_get_softc(parent);
1809 device_find_child(parent, "ccr", -1) == NULL)
1810 device_add_child(parent, "ccr", -1);
1811}
1812
1813static int
1815{
1816
1817 device_set_desc(dev, "Chelsio Crypto Accelerator");
1818 return (BUS_PROBE_DEFAULT);
1819}
1820
1821static void
1823{
1824 struct sysctl_ctx_list *ctx = &sc->ctx;
1825 struct sysctl_oid *oid, *port_oid;
1826 struct sysctl_oid_list *children;
1827 char buf[16];
1828 int i;
1829
1830 /*
1831 * dev.ccr.X.
1832 */
1833 oid = device_get_sysctl_tree(sc->dev);
1834 children = SYSCTL_CHILDREN(oid);
1835
1836 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW,
1837 &sc->port_mask, 0, "Mask of enabled ports");
1838
1839 /*
1840 * dev.ccr.X.stats.
1841 */
1842 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1843 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1844 children = SYSCTL_CHILDREN(oid);
1845
1846 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
1847 &sc->stats_hash, "Hash requests submitted");
1848 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1849 &sc->stats_hmac, "HMAC requests submitted");
1850 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt",
1851 CTLFLAG_RD, &sc->stats_cipher_encrypt,
1852 "Cipher encryption requests submitted");
1853 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt",
1854 CTLFLAG_RD, &sc->stats_cipher_decrypt,
1855 "Cipher decryption requests submitted");
1856 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt",
1857 CTLFLAG_RD, &sc->stats_eta_encrypt,
1858 "Combined AES+HMAC encryption requests submitted");
1859 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt",
1860 CTLFLAG_RD, &sc->stats_eta_decrypt,
1861 "Combined AES+HMAC decryption requests submitted");
1862 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt",
1863 CTLFLAG_RD, &sc->stats_gcm_encrypt,
1864 "AES-GCM encryption requests submitted");
1865 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt",
1866 CTLFLAG_RD, &sc->stats_gcm_decrypt,
1867 "AES-GCM decryption requests submitted");
1868 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt",
1869 CTLFLAG_RD, &sc->stats_ccm_encrypt,
1870 "AES-CCM encryption requests submitted");
1871 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt",
1872 CTLFLAG_RD, &sc->stats_ccm_decrypt,
1873 "AES-CCM decryption requests submitted");
1874 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1875 &sc->stats_wr_nomem, "Work request memory allocation failures");
1876 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1877 &sc->stats_inflight, "Requests currently pending");
1878 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1879 &sc->stats_mac_error, "MAC errors");
1880 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1881 &sc->stats_pad_error, "Padding errors");
1882 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error",
1883 CTLFLAG_RD, &sc->stats_sglist_error,
1884 "Requests for which DMA mapping failed");
1885 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
1886 CTLFLAG_RD, &sc->stats_process_error,
1887 "Requests failed during queueing");
1888 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
1889 CTLFLAG_RD, &sc->stats_sw_fallback,
1890 "Requests processed by falling back to software");
1891
1892 /*
1893 * dev.ccr.X.stats.port
1894 */
1895 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port",
1896 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
1897
1898 for (i = 0; i < nitems(sc->ports); i++) {
1899 if (sc->ports[i].rxq == NULL)
1900 continue;
1901
1902 /*
1903 * dev.ccr.X.stats.port.Y
1904 */
1905 snprintf(buf, sizeof(buf), "%d", i);
1906 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO,
1907 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf);
1908 children = SYSCTL_CHILDREN(oid);
1909
1910 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions",
1911 CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
1912 "Count of active sessions");
1913 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "queued",
1914 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued");
1915 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "completed",
1916 CTLFLAG_RD, &sc->ports[i].stats_completed,
1917 "Requests completed");
1918 }
1919}
1920
1921static void
1923{
1924 struct port_info *pi;
1925
1926 pi = sc->adapter->port[port];
1927 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
1928 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq];
1929 sc->ports[port].rx_channel_id = pi->rx_c_chan;
1930 sc->ports[port].tx_channel_id = pi->tx_chan;
1931 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK);
1932 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK);
1933 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
1934 "Too many ports to fit in port_mask");
1935
1936 /*
1937 * Completions for crypto requests on port 1 can sometimes
1938 * return a stale cookie value due to a firmware bug. Disable
1939 * requests on port 1 by default on affected firmware.
1940 */
1941 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
1942 port == 0)
1943 sc->port_mask |= 1u << port;
1944}
1945
1946static int
1948{
1949 struct ccr_softc *sc;
1950 int32_t cid;
1951 int i;
1952
1953 sc = device_get_softc(dev);
1954 sc->dev = dev;
1955 sysctl_ctx_init(&sc->ctx);
1956 sc->adapter = device_get_softc(device_get_parent(dev));
1957 for_each_port(sc->adapter, i) {
1958 ccr_init_port(sc, i);
1959 }
1960 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1961 CRYPTOCAP_F_HARDWARE);
1962 if (cid < 0) {
1963 device_printf(dev, "could not get crypto driver id\n");
1964 return (ENXIO);
1965 }
1966 sc->cid = cid;
1967 sc->adapter->ccr_softc = sc;
1968
1969 /*
1970 * The FID must be the first RXQ for port 0 regardless of
1971 * which port is used to service the request.
1972 */
1973 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id;
1974
1975 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1976 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1977 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1978 sc->stats_cipher_encrypt = counter_u64_alloc(M_WAITOK);
1979 sc->stats_cipher_decrypt = counter_u64_alloc(M_WAITOK);
1980 sc->stats_hash = counter_u64_alloc(M_WAITOK);
1981 sc->stats_hmac = counter_u64_alloc(M_WAITOK);
1982 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK);
1983 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK);
1984 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK);
1985 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK);
1986 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK);
1987 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK);
1988 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK);
1989 sc->stats_inflight = counter_u64_alloc(M_WAITOK);
1990 sc->stats_mac_error = counter_u64_alloc(M_WAITOK);
1991 sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
1992 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
1993 sc->stats_process_error = counter_u64_alloc(M_WAITOK);
1994 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
1995 ccr_sysctls(sc);
1996
1997 return (0);
1998}
1999
2000static void
2002{
2003
2004 counter_u64_free(sc->ports[port].stats_queued);
2005 counter_u64_free(sc->ports[port].stats_completed);
2006}
2007
2008static int
2010{
2011 struct ccr_softc *sc;
2012 int i;
2013
2014 sc = device_get_softc(dev);
2015
2016 mtx_lock(&sc->lock);
2017 sc->detaching = true;
2018 mtx_unlock(&sc->lock);
2019
2020 crypto_unregister_all(sc->cid);
2021
2022 sysctl_ctx_free(&sc->ctx);
2023 mtx_destroy(&sc->lock);
2024 counter_u64_free(sc->stats_cipher_encrypt);
2025 counter_u64_free(sc->stats_cipher_decrypt);
2026 counter_u64_free(sc->stats_hash);
2027 counter_u64_free(sc->stats_hmac);
2028 counter_u64_free(sc->stats_eta_encrypt);
2029 counter_u64_free(sc->stats_eta_decrypt);
2030 counter_u64_free(sc->stats_gcm_encrypt);
2031 counter_u64_free(sc->stats_gcm_decrypt);
2032 counter_u64_free(sc->stats_ccm_encrypt);
2033 counter_u64_free(sc->stats_ccm_decrypt);
2034 counter_u64_free(sc->stats_wr_nomem);
2035 counter_u64_free(sc->stats_inflight);
2036 counter_u64_free(sc->stats_mac_error);
2037 counter_u64_free(sc->stats_pad_error);
2038 counter_u64_free(sc->stats_sglist_error);
2039 counter_u64_free(sc->stats_process_error);
2040 counter_u64_free(sc->stats_sw_fallback);
2041 for_each_port(sc->adapter, i) {
2042 ccr_free_port(sc, i);
2043 }
2044 sglist_free(sc->sg_iv_aad);
2045 free(sc->iv_aad_buf, M_CCR);
2046 sc->adapter->ccr_softc = NULL;
2047 return (0);
2048}
2049
2050static void
2052{
2053 union authctx auth_ctx;
2054 const struct auth_hash *axf;
2055
2056 axf = s->hmac.auth_hash;
2057 axf->Init(&auth_ctx);
2058 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2059}
2060
2061static bool
2062ccr_aes_check_keylen(int alg, int klen)
2063{
2064
2065 switch (klen * 8) {
2066 case 128:
2067 case 192:
2068 if (alg == CRYPTO_AES_XTS)
2069 return (false);
2070 break;
2071 case 256:
2072 break;
2073 case 512:
2074 if (alg != CRYPTO_AES_XTS)
2075 return (false);
2076 break;
2077 default:
2078 return (false);
2079 }
2080 return (true);
2081}
2082
2083static void
2084ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
2085{
2086 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2087 unsigned int opad_present;
2088
2090 kbits = (klen / 2) * 8;
2091 else
2092 kbits = klen * 8;
2093 switch (kbits) {
2094 case 128:
2096 break;
2097 case 192:
2099 break;
2100 case 256:
2102 break;
2103 default:
2104 panic("should not get here");
2105 }
2106
2107 s->cipher.key_len = klen;
2108 memcpy(s->cipher.enckey, key, s->cipher.key_len);
2109 switch (s->cipher.cipher_mode) {
2112 t4_aes_getdeckey(s->cipher.deckey, key, kbits);
2113 break;
2114 }
2115
2116 kctx_len = roundup2(s->cipher.key_len, 16);
2117 switch (s->mode) {
2118 case ETA:
2119 mk_size = s->hmac.mk_size;
2120 opad_present = 1;
2121 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2122 kctx_len += iopad_size * 2;
2123 break;
2124 case GCM:
2126 opad_present = 0;
2127 kctx_len += GMAC_BLOCK_LEN;
2128 break;
2129 case CCM:
2130 switch (kbits) {
2131 case 128:
2133 break;
2134 case 192:
2136 break;
2137 case 256:
2139 break;
2140 default:
2141 panic("should not get here");
2142 }
2143 opad_present = 0;
2144 kctx_len *= 2;
2145 break;
2146 default:
2147 mk_size = CHCR_KEYCTX_NO_KEY;
2148 opad_present = 0;
2149 break;
2150 }
2151 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2152 s->cipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2155 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2158}
2159
2160static bool
2161ccr_auth_supported(const struct crypto_session_params *csp)
2162{
2163
2164 switch (csp->csp_auth_alg) {
2165 case CRYPTO_SHA1:
2166 case CRYPTO_SHA2_224:
2167 case CRYPTO_SHA2_256:
2168 case CRYPTO_SHA2_384:
2169 case CRYPTO_SHA2_512:
2170 case CRYPTO_SHA1_HMAC:
2171 case CRYPTO_SHA2_224_HMAC:
2172 case CRYPTO_SHA2_256_HMAC:
2173 case CRYPTO_SHA2_384_HMAC:
2174 case CRYPTO_SHA2_512_HMAC:
2175 break;
2176 default:
2177 return (false);
2178 }
2179 return (true);
2180}
2181
2182static bool
2183ccr_cipher_supported(const struct crypto_session_params *csp)
2184{
2185
2186 switch (csp->csp_cipher_alg) {
2187 case CRYPTO_AES_CBC:
2188 if (csp->csp_ivlen != AES_BLOCK_LEN)
2189 return (false);
2190 break;
2191 case CRYPTO_AES_ICM:
2192 if (csp->csp_ivlen != AES_BLOCK_LEN)
2193 return (false);
2194 break;
2195 case CRYPTO_AES_XTS:
2196 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2197 return (false);
2198 break;
2199 default:
2200 return (false);
2201 }
2202 return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2203 csp->csp_cipher_klen));
2204}
2205
2206static int
2207ccr_cipher_mode(const struct crypto_session_params *csp)
2208{
2209
2210 switch (csp->csp_cipher_alg) {
2211 case CRYPTO_AES_CBC:
2212 return (SCMD_CIPH_MODE_AES_CBC);
2213 case CRYPTO_AES_ICM:
2214 return (SCMD_CIPH_MODE_AES_CTR);
2215 case CRYPTO_AES_NIST_GCM_16:
2216 return (SCMD_CIPH_MODE_AES_GCM);
2217 case CRYPTO_AES_XTS:
2218 return (SCMD_CIPH_MODE_AES_XTS);
2219 case CRYPTO_AES_CCM_16:
2220 return (SCMD_CIPH_MODE_AES_CCM);
2221 default:
2222 return (SCMD_CIPH_MODE_NOP);
2223 }
2224}
2225
2226static int
2227ccr_probesession(device_t dev, const struct crypto_session_params *csp)
2228{
2229 unsigned int cipher_mode;
2230
2231 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
2232 0)
2233 return (EINVAL);
2234 switch (csp->csp_mode) {
2235 case CSP_MODE_DIGEST:
2236 if (!ccr_auth_supported(csp))
2237 return (EINVAL);
2238 break;
2239 case CSP_MODE_CIPHER:
2240 if (!ccr_cipher_supported(csp))
2241 return (EINVAL);
2242 break;
2243 case CSP_MODE_AEAD:
2244 switch (csp->csp_cipher_alg) {
2245 case CRYPTO_AES_NIST_GCM_16:
2246 case CRYPTO_AES_CCM_16:
2247 break;
2248 default:
2249 return (EINVAL);
2250 }
2251 break;
2252 case CSP_MODE_ETA:
2253 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
2254 return (EINVAL);
2255 break;
2256 default:
2257 return (EINVAL);
2258 }
2259
2260 if (csp->csp_cipher_klen != 0) {
2261 cipher_mode = ccr_cipher_mode(csp);
2262 if (cipher_mode == SCMD_CIPH_MODE_NOP)
2263 return (EINVAL);
2264 }
2265
2266 return (CRYPTODEV_PROBE_HARDWARE);
2267}
2268
2269/*
2270 * Select an available port with the lowest number of active sessions.
2271 */
2272static struct ccr_port *
2274{
2275 struct ccr_port *best, *p;
2276 int i;
2277
2278 mtx_assert(&sc->lock, MA_OWNED);
2279 best = NULL;
2280 for (i = 0; i < nitems(sc->ports); i++) {
2281 p = &sc->ports[i];
2282
2283 /* Ignore non-existent ports. */
2284 if (p->rxq == NULL)
2285 continue;
2286
2287 /*
2288 * XXX: Ignore ports whose queues aren't initialized.
2289 * This is racy as the rxq can be destroyed by the
2290 * associated VI detaching. Eventually ccr should use
2291 * dedicated queues.
2292 */
2293 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2294 continue;
2295
2296 if ((sc->port_mask & (1u << i)) == 0)
2297 continue;
2298
2299 if (best == NULL ||
2301 best = p;
2302 }
2303 return (best);
2304}
2305
2306static void
2308{
2309 crypto_freesession(s->sw_session);
2310 sglist_free(s->sg_input);
2311 sglist_free(s->sg_output);
2312 sglist_free(s->sg_ulptx);
2313 sglist_free(s->sg_dsgl);
2314 mtx_destroy(&s->lock);
2315}
2316
2317static int
2318ccr_newsession(device_t dev, crypto_session_t cses,
2319 const struct crypto_session_params *csp)
2320{
2321 struct ccr_softc *sc;
2322 struct ccr_session *s;
2323 const struct auth_hash *auth_hash;
2324 unsigned int auth_mode, cipher_mode, mk_size;
2325 unsigned int partial_digest_len;
2326 int error;
2327
2328 switch (csp->csp_auth_alg) {
2329 case CRYPTO_SHA1:
2330 case CRYPTO_SHA1_HMAC:
2331 auth_hash = &auth_hash_hmac_sha1;
2332 auth_mode = SCMD_AUTH_MODE_SHA1;
2334 partial_digest_len = SHA1_HASH_LEN;
2335 break;
2336 case CRYPTO_SHA2_224:
2337 case CRYPTO_SHA2_224_HMAC:
2338 auth_hash = &auth_hash_hmac_sha2_224;
2339 auth_mode = SCMD_AUTH_MODE_SHA224;
2341 partial_digest_len = SHA2_256_HASH_LEN;
2342 break;
2343 case CRYPTO_SHA2_256:
2344 case CRYPTO_SHA2_256_HMAC:
2345 auth_hash = &auth_hash_hmac_sha2_256;
2346 auth_mode = SCMD_AUTH_MODE_SHA256;
2348 partial_digest_len = SHA2_256_HASH_LEN;
2349 break;
2350 case CRYPTO_SHA2_384:
2351 case CRYPTO_SHA2_384_HMAC:
2352 auth_hash = &auth_hash_hmac_sha2_384;
2353 auth_mode = SCMD_AUTH_MODE_SHA512_384;
2355 partial_digest_len = SHA2_512_HASH_LEN;
2356 break;
2357 case CRYPTO_SHA2_512:
2358 case CRYPTO_SHA2_512_HMAC:
2359 auth_hash = &auth_hash_hmac_sha2_512;
2360 auth_mode = SCMD_AUTH_MODE_SHA512_512;
2362 partial_digest_len = SHA2_512_HASH_LEN;
2363 break;
2364 default:
2365 auth_hash = NULL;
2366 auth_mode = SCMD_AUTH_MODE_NOP;
2367 mk_size = 0;
2368 partial_digest_len = 0;
2369 break;
2370 }
2371
2372 cipher_mode = ccr_cipher_mode(csp);
2373
2374#ifdef INVARIANTS
2375 switch (csp->csp_mode) {
2376 case CSP_MODE_CIPHER:
2377 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2378 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2379 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2380 panic("invalid cipher algo");
2381 break;
2382 case CSP_MODE_DIGEST:
2383 if (auth_mode == SCMD_AUTH_MODE_NOP)
2384 panic("invalid auth algo");
2385 break;
2386 case CSP_MODE_AEAD:
2387 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
2388 cipher_mode != SCMD_CIPH_MODE_AES_CCM)
2389 panic("invalid aead cipher algo");
2390 if (auth_mode != SCMD_AUTH_MODE_NOP)
2391 panic("invalid aead auth aglo");
2392 break;
2393 case CSP_MODE_ETA:
2394 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2395 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2396 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2397 panic("invalid cipher algo");
2398 if (auth_mode == SCMD_AUTH_MODE_NOP)
2399 panic("invalid auth algo");
2400 break;
2401 default:
2402 panic("invalid csp mode");
2403 }
2404#endif
2405
2406 s = crypto_get_driver_session(cses);
2407 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF);
2408 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2409 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2410 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2411 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT);
2412 if (s->sg_input == NULL || s->sg_output == NULL ||
2413 s->sg_ulptx == NULL || s->sg_dsgl == NULL) {
2415 return (ENOMEM);
2416 }
2417
2418 if (csp->csp_mode == CSP_MODE_AEAD) {
2419 error = crypto_newsession(&s->sw_session, csp,
2420 CRYPTOCAP_F_SOFTWARE);
2421 if (error) {
2423 return (error);
2424 }
2425 }
2426
2427 sc = device_get_softc(dev);
2428
2429 mtx_lock(&sc->lock);
2430 if (sc->detaching) {
2431 mtx_unlock(&sc->lock);
2433 return (ENXIO);
2434 }
2435
2436 s->port = ccr_choose_port(sc);
2437 if (s->port == NULL) {
2438 mtx_unlock(&sc->lock);
2440 return (ENXIO);
2441 }
2442
2443 switch (csp->csp_mode) {
2444 case CSP_MODE_AEAD:
2445 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2446 s->mode = CCM;
2447 else
2448 s->mode = GCM;
2449 break;
2450 case CSP_MODE_ETA:
2451 s->mode = ETA;
2452 break;
2453 case CSP_MODE_DIGEST:
2454 if (csp->csp_auth_klen != 0)
2455 s->mode = HMAC;
2456 else
2457 s->mode = HASH;
2458 break;
2459 case CSP_MODE_CIPHER:
2460 s->mode = CIPHER;
2461 break;
2462 }
2463
2464 if (s->mode == GCM) {
2465 if (csp->csp_auth_mlen == 0)
2466 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2467 else
2468 s->gmac.hash_len = csp->csp_auth_mlen;
2469 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2470 s->gmac.ghash_h);
2471 } else if (s->mode == CCM) {
2472 if (csp->csp_auth_mlen == 0)
2473 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2474 else
2475 s->ccm_mac.hash_len = csp->csp_auth_mlen;
2476 } else if (auth_mode != SCMD_AUTH_MODE_NOP) {
2477 s->hmac.auth_hash = auth_hash;
2478 s->hmac.auth_mode = auth_mode;
2479 s->hmac.mk_size = mk_size;
2480 s->hmac.partial_digest_len = partial_digest_len;
2481 if (csp->csp_auth_mlen == 0)
2482 s->hmac.hash_len = auth_hash->hashsize;
2483 else
2484 s->hmac.hash_len = csp->csp_auth_mlen;
2485 if (csp->csp_auth_key != NULL)
2486 t4_init_hmac_digest(auth_hash, partial_digest_len,
2487 csp->csp_auth_key, csp->csp_auth_klen,
2488 s->hmac.pads);
2489 else
2491 }
2492 if (cipher_mode != SCMD_CIPH_MODE_NOP) {
2493 s->cipher.cipher_mode = cipher_mode;
2494 s->cipher.iv_len = csp->csp_ivlen;
2495 if (csp->csp_cipher_key != NULL)
2496 ccr_aes_setkey(s, csp->csp_cipher_key,
2497 csp->csp_cipher_klen);
2498 }
2499
2500 s->port->active_sessions++;
2501 mtx_unlock(&sc->lock);
2502 return (0);
2503}
2504
2505static void
2506ccr_freesession(device_t dev, crypto_session_t cses)
2507{
2508 struct ccr_softc *sc;
2509 struct ccr_session *s;
2510
2511 sc = device_get_softc(dev);
2512 s = crypto_get_driver_session(cses);
2513#ifdef INVARIANTS
2514 if (s->pending != 0)
2515 device_printf(dev,
2516 "session %p freed with %d pending requests\n", s,
2517 s->pending);
2518#endif
2519 mtx_lock(&sc->lock);
2520 s->port->active_sessions--;
2521 mtx_unlock(&sc->lock);
2523}
2524
2525static int
2526ccr_process(device_t dev, struct cryptop *crp, int hint)
2527{
2528 const struct crypto_session_params *csp;
2529 struct ccr_softc *sc;
2530 struct ccr_session *s;
2531 int error;
2532
2533 csp = crypto_get_params(crp->crp_session);
2534 s = crypto_get_driver_session(crp->crp_session);
2535 sc = device_get_softc(dev);
2536
2537 mtx_lock(&s->lock);
2538 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
2539 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
2540 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf);
2541 if (error) {
2542 counter_u64_add(sc->stats_sglist_error, 1);
2543 goto out;
2544 }
2545
2546 switch (s->mode) {
2547 case HASH:
2548 error = ccr_hash(sc, s, crp);
2549 if (error == 0)
2550 counter_u64_add(sc->stats_hash, 1);
2551 break;
2552 case HMAC:
2553 if (crp->crp_auth_key != NULL)
2555 s->hmac.partial_digest_len, crp->crp_auth_key,
2556 csp->csp_auth_klen, s->hmac.pads);
2557 error = ccr_hash(sc, s, crp);
2558 if (error == 0)
2559 counter_u64_add(sc->stats_hmac, 1);
2560 break;
2561 case CIPHER:
2562 if (crp->crp_cipher_key != NULL)
2563 ccr_aes_setkey(s, crp->crp_cipher_key,
2564 csp->csp_cipher_klen);
2565 error = ccr_cipher(sc, s, crp);
2566 if (error == 0) {
2567 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2568 counter_u64_add(sc->stats_cipher_encrypt, 1);
2569 else
2570 counter_u64_add(sc->stats_cipher_decrypt, 1);
2571 }
2572 break;
2573 case ETA:
2574 if (crp->crp_auth_key != NULL)
2576 s->hmac.partial_digest_len, crp->crp_auth_key,
2577 csp->csp_auth_klen, s->hmac.pads);
2578 if (crp->crp_cipher_key != NULL)
2579 ccr_aes_setkey(s, crp->crp_cipher_key,
2580 csp->csp_cipher_klen);
2581 error = ccr_eta(sc, s, crp);
2582 if (error == 0) {
2583 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2584 counter_u64_add(sc->stats_eta_encrypt, 1);
2585 else
2586 counter_u64_add(sc->stats_eta_decrypt, 1);
2587 }
2588 break;
2589 case GCM:
2590 if (crp->crp_cipher_key != NULL) {
2591 t4_init_gmac_hash(crp->crp_cipher_key,
2592 csp->csp_cipher_klen, s->gmac.ghash_h);
2593 ccr_aes_setkey(s, crp->crp_cipher_key,
2594 csp->csp_cipher_klen);
2595 }
2596 error = ccr_gcm(sc, s, crp);
2597 if (error == EMSGSIZE || error == EFBIG) {
2598 counter_u64_add(sc->stats_sw_fallback, 1);
2599 mtx_unlock(&s->lock);
2600 ccr_soft(s, crp);
2601 return (0);
2602 }
2603 if (error == 0) {
2604 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2605 counter_u64_add(sc->stats_gcm_encrypt, 1);
2606 else
2607 counter_u64_add(sc->stats_gcm_decrypt, 1);
2608 }
2609 break;
2610 case CCM:
2611 if (crp->crp_cipher_key != NULL) {
2612 ccr_aes_setkey(s, crp->crp_cipher_key,
2613 csp->csp_cipher_klen);
2614 }
2615 error = ccr_ccm(sc, s, crp);
2616 if (error == EMSGSIZE || error == EFBIG) {
2617 counter_u64_add(sc->stats_sw_fallback, 1);
2618 mtx_unlock(&s->lock);
2619 ccr_soft(s, crp);
2620 return (0);
2621 }
2622 if (error == 0) {
2623 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2624 counter_u64_add(sc->stats_ccm_encrypt, 1);
2625 else
2626 counter_u64_add(sc->stats_ccm_decrypt, 1);
2627 }
2628 break;
2629 }
2630
2631 if (error == 0) {
2632#ifdef INVARIANTS
2633 s->pending++;
2634#endif
2635 counter_u64_add(sc->stats_inflight, 1);
2636 counter_u64_add(s->port->stats_queued, 1);
2637 } else
2638 counter_u64_add(sc->stats_process_error, 1);
2639
2640out:
2641 mtx_unlock(&s->lock);
2642
2643 if (error) {
2644 crp->crp_etype = error;
2645 crypto_done(crp);
2646 }
2647
2648 return (0);
2649}
2650
2651static int
2652do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2653 struct mbuf *m)
2654{
2655 struct ccr_softc *sc = iq->adapter->ccr_softc;
2656 struct ccr_session *s;
2657 const struct cpl_fw6_pld *cpl;
2658 struct cryptop *crp;
2659 uint32_t status;
2660 int error;
2661
2662 if (m != NULL)
2663 cpl = mtod(m, const void *);
2664 else
2665 cpl = (const void *)(rss + 1);
2666
2667 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2668 s = crypto_get_driver_session(crp->crp_session);
2669 status = be64toh(cpl->data[0]);
2670 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2671 error = EBADMSG;
2672 else
2673 error = 0;
2674
2675#ifdef INVARIANTS
2676 mtx_lock(&s->lock);
2677 s->pending--;
2678 mtx_unlock(&s->lock);
2679#endif
2680 counter_u64_add(sc->stats_inflight, -1);
2681 counter_u64_add(s->port->stats_completed, 1);
2682
2683 switch (s->mode) {
2684 case HASH:
2685 case HMAC:
2686 error = ccr_hash_done(sc, s, crp, cpl, error);
2687 break;
2688 case CIPHER:
2689 error = ccr_cipher_done(sc, s, crp, cpl, error);
2690 break;
2691 case ETA:
2692 error = ccr_eta_done(sc, s, crp, cpl, error);
2693 break;
2694 case GCM:
2695 error = ccr_gcm_done(sc, s, crp, cpl, error);
2696 break;
2697 case CCM:
2698 error = ccr_ccm_done(sc, s, crp, cpl, error);
2699 break;
2700 }
2701
2702 if (error == EBADMSG) {
2703 if (CHK_MAC_ERR_BIT(status))
2704 counter_u64_add(sc->stats_mac_error, 1);
2705 if (CHK_PAD_ERR_BIT(status))
2706 counter_u64_add(sc->stats_pad_error, 1);
2707 }
2708 crp->crp_etype = error;
2709 crypto_done(crp);
2710 m_freem(m);
2711 return (0);
2712}
2713
2714static int
2715ccr_modevent(module_t mod, int cmd, void *arg)
2716{
2717
2718 switch (cmd) {
2719 case MOD_LOAD:
2721 return (0);
2722 case MOD_UNLOAD:
2724 return (0);
2725 default:
2726 return (EOPNOTSUPP);
2727 }
2728}
2729
2730static device_method_t ccr_methods[] = {
2731 DEVMETHOD(device_identify, ccr_identify),
2732 DEVMETHOD(device_probe, ccr_probe),
2733 DEVMETHOD(device_attach, ccr_attach),
2734 DEVMETHOD(device_detach, ccr_detach),
2735
2736 DEVMETHOD(cryptodev_probesession, ccr_probesession),
2737 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2738 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2739 DEVMETHOD(cryptodev_process, ccr_process),
2740
2741 DEVMETHOD_END
2742};
2743
2744static driver_t ccr_driver = {
2745 "ccr",
2747 sizeof(struct ccr_softc)
2748};
2749
2750static devclass_t ccr_devclass;
2751
2754MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2755MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
@ TX_SGL_SEGS
Definition: adapter.h:119
static struct wrqe * alloc_wrqe(int wr_len, struct sge_wrq *wrq)
Definition: adapter.h:1437
void t4_init_gmac_hash(const char *, int, char *)
Definition: t4_keyctx.c:301
uint8_t ss[SGE_MAX_WR_LEN]
Definition: adapter.h:27
void t4_register_cpl_handler(int, cpl_handler_t)
Definition: t4_sge.c:387
static void * wrtod(struct wrqe *wr)
Definition: adapter.h:1451
static void t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
Definition: adapter.h:1463
void t4_init_hmac_digest(const struct auth_hash *, u_int, const char *, int, char *)
Definition: t4_keyctx.c:352
void t4_aes_getdeckey(void *, const void *, unsigned int)
Definition: t4_keyctx.c:375
struct sge_iq iq
Definition: adapter.h:0
void t4_copy_partial_hash(int, union authctx *, void *)
Definition: t4_keyctx.c:314
#define for_each_port(adapter, iter)
Definition: common.h:468
@ MAX_NPORTS
Definition: common.h:38
uint32_t __be32
Definition: osdep.h:69
__be32 ctx_hdr
Definition: t4_crypto.h:42
unsigned char key[0]
Definition: t4_crypto.h:45
unsigned int fw_vers
Definition: common.h:371
uint16_t cryptocaps
Definition: adapter.h:968
struct adapter_params params
Definition: adapter.h:958
struct port_info * port[MAX_NPORTS]
Definition: adapter.h:912
void * ccr_softc
Definition: adapter.h:929
struct sge sge
Definition: adapter.h:902
device_t dev
Definition: adapter.h:866
int rx_channel_id
Definition: t4_crypto.c:172
counter_u64_t stats_completed
Definition: t4_crypto.c:177
int tx_channel_id
Definition: t4_crypto.c:173
struct sge_wrq * txq
Definition: t4_crypto.c:170
struct sge_rxq * rxq
Definition: t4_crypto.c:171
counter_u64_t stats_queued
Definition: t4_crypto.c:176
u_int active_sessions
Definition: t4_crypto.c:174
unsigned int iv_len
Definition: t4_crypto.c:163
char deckey[CHCR_AES_MAX_KEY_LEN]
Definition: t4_crypto.c:166
unsigned int key_len
Definition: t4_crypto.c:162
unsigned int cipher_mode
Definition: t4_crypto.c:161
char enckey[CHCR_AES_MAX_KEY_LEN]
Definition: t4_crypto.c:165
char ghash_h[GMAC_BLOCK_LEN]
Definition: t4_crypto.c:153
unsigned int mk_size
Definition: t4_crypto.c:147
unsigned int auth_mode
Definition: t4_crypto.c:146
const struct auth_hash * auth_hash
Definition: t4_crypto.c:143
unsigned int partial_digest_len
Definition: t4_crypto.c:145
char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 *2]
Definition: t4_crypto.c:148
struct mtx lock
Definition: t4_crypto.c:192
enum ccr_session::@53 mode
struct ccr_port * port
Definition: t4_crypto.c:185
struct ccr_session_ccm_mac ccm_mac
Definition: t4_crypto.c:189
struct sglist * sg_output
Definition: t4_crypto.c:212
crypto_session_t sw_session
Definition: t4_crypto.c:199
struct sglist * sg_dsgl
Definition: t4_crypto.c:214
struct ccr_session_cipher cipher
Definition: t4_crypto.c:191
struct ccr_session_gmac gmac
Definition: t4_crypto.c:188
struct sglist * sg_ulptx
Definition: t4_crypto.c:213
struct sglist * sg_input
Definition: t4_crypto.c:211
struct ccr_session_hmac hmac
Definition: t4_crypto.c:187
int first_rxq_id
Definition: t4_crypto.c:225
counter_u64_t stats_gcm_decrypt
Definition: t4_crypto.c:242
counter_u64_t stats_inflight
Definition: t4_crypto.c:246
counter_u64_t stats_eta_encrypt
Definition: t4_crypto.c:239
bool detaching
Definition: t4_crypto.c:222
counter_u64_t stats_ccm_encrypt
Definition: t4_crypto.c:243
counter_u64_t stats_cipher_encrypt
Definition: t4_crypto.c:235
counter_u64_t stats_eta_decrypt
Definition: t4_crypto.c:240
counter_u64_t stats_mac_error
Definition: t4_crypto.c:247
struct ccr_port ports[MAX_NPORTS]
Definition: t4_crypto.c:223
uint32_t cid
Definition: t4_crypto.c:220
struct sysctl_ctx_list ctx
Definition: t4_crypto.c:253
counter_u64_t stats_hmac
Definition: t4_crypto.c:238
counter_u64_t stats_gcm_encrypt
Definition: t4_crypto.c:241
device_t dev
Definition: t4_crypto.c:219
counter_u64_t stats_wr_nomem
Definition: t4_crypto.c:245
counter_u64_t stats_pad_error
Definition: t4_crypto.c:248
u_int port_mask
Definition: t4_crypto.c:224
counter_u64_t stats_cipher_decrypt
Definition: t4_crypto.c:236
struct sglist * sg_iv_aad
Definition: t4_crypto.c:232
struct mtx lock
Definition: t4_crypto.c:221
counter_u64_t stats_ccm_decrypt
Definition: t4_crypto.c:244
char * iv_aad_buf
Definition: t4_crypto.c:231
counter_u64_t stats_sglist_error
Definition: t4_crypto.c:249
counter_u64_t stats_hash
Definition: t4_crypto.c:237
struct adapter * adapter
Definition: t4_crypto.c:218
counter_u64_t stats_process_error
Definition: t4_crypto.c:250
counter_u64_t stats_sw_fallback
Definition: t4_crypto.c:251
struct fw_crypto_lookaside_wr wreq
Definition: t4_crypto.h:49
struct ulp_txpkt ulptx
Definition: t4_crypto.h:50
struct cpl_tx_sec_pdu sec_cpl
Definition: t4_crypto.h:52
struct ulptx_idata sc_imm
Definition: t4_crypto.h:51
struct _key_ctx key_ctx
Definition: t4_crypto.h:53
__be64 data[4]
Definition: t4_msg.h:2740
__be32 op_to_tid
Definition: t4_msg.h:3598
struct rss_header rss_hdr_int
Definition: t4_msg.h:3600
__be32 pcirlxorder_to_noofsgentr
Definition: t4_msg.h:3599
__be32 ivgen_hdrlen
Definition: t4_msg.h:3465
__be32 seqno_numivs
Definition: t4_msg.h:3464
__be32 aadstart_cipherstop_hi
Definition: t4_msg.h:3462
__be32 pldlen
Definition: t4_msg.h:3461
__be32 op_ivinsrtofst
Definition: t4_msg.h:3460
__be32 cipherstop_lo_authinsert
Definition: t4_msg.h:3463
__be16 len[8]
Definition: t4_crypto.h:124
__be64 addr[8]
Definition: t4_crypto.h:125
uint8_t rx_c_chan
Definition: adapter.h:328
uint8_t tx_chan
Definition: adapter.h:325
device_t dev
Definition: adapter.h:305
struct vi_info * vi
Definition: adapter.h:308
__u8 channel
Definition: t4_msg.h:387
__be16 qid
Definition: t4_msg.h:389
__u8 opcode
Definition: t4_msg.h:373
__be32 hash_val
Definition: t4_msg.h:390
struct adapter * adapter
Definition: adapter.h:422
uint16_t abs_id
Definition: adapter.h:432
struct sge_iq iq
Definition: adapter.h:649
struct adapter * adapter
Definition: adapter.h:716
struct sge_wrq * ctrlq
Definition: adapter.h:832
struct sge_rxq * rxq
Definition: adapter.h:834
__be32 cmd_dest
Definition: t4_msg.h:2946
__be32 len
Definition: t4_msg.h:2947
__be32 len
Definition: t4_msg.h:2885
__be32 cmd_more
Definition: t4_msg.h:2884
__be64 addr[2]
Definition: t4_msg.h:2857
__be32 len[2]
Definition: t4_msg.h:2856
struct ulptx_sge_pair sge[]
Definition: t4_msg.h:2865
__be32 len0
Definition: t4_msg.h:2862
__be32 cmd_nsge
Definition: t4_msg.h:2861
__be64 addr0
Definition: t4_msg.h:2863
int first_rxq
Definition: adapter.h:227
Definition: adapter.h:696
int wr_len
Definition: adapter.h:699
static int ccr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp)
Definition: t4_crypto.c:2318
static bool ccr_use_imm_data(u_int transhdr_len, u_int input_len)
Definition: t4_crypto.c:419
static void ccr_freesession(device_t dev, crypto_session_t cses)
Definition: t4_crypto.c:2506
static void ccr_soft(struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:1775
static int ccr_cipher_mode(const struct crypto_session_params *csp)
Definition: t4_crypto.c:2207
static int ccr_ccm_hmac_ctrl(unsigned int authsize)
Definition: t4_crypto.c:1392
static int ccr_modevent(module_t mod, int cmd, void *arg)
Definition: t4_crypto.c:2715
#define MAX_RX_PHYS_DSGL_SGE
Definition: t4_crypto.c:130
static struct ccr_port * ccr_choose_port(struct ccr_softc *sc)
Definition: t4_crypto.c:2273
static void ccr_sysctls(struct ccr_softc *sc)
Definition: t4_crypto.c:1822
static device_method_t ccr_methods[]
Definition: t4_crypto.c:2730
static void ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs)
Definition: t4_crypto.c:336
static bool ccr_auth_supported(const struct crypto_session_params *csp)
Definition: t4_crypto.c:2161
static void ccr_init_port(struct ccr_softc *sc, int port)
Definition: t4_crypto.c:1922
static int ccr_soft_done(struct cryptop *crp)
Definition: t4_crypto.c:1763
static int ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
Definition: t4_crypto.c:278
static int ccr_process(device_t dev, struct cryptop *crp, int hint)
Definition: t4_crypto.c:2526
static void generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, const char *iv, char *b0)
Definition: t4_crypto.c:1415
static int do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
Definition: t4_crypto.c:2652
static int ccr_detach(device_t dev)
Definition: t4_crypto.c:2009
static int ccr_cipher_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
Definition: t4_crypto.c:784
static int ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:622
static int ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
Definition: t4_crypto.c:1107
static int ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:1119
static driver_t ccr_driver
Definition: t4_crypto.c:2744
static void ccr_free_port(struct ccr_softc *sc, int port)
Definition: t4_crypto.c:2001
static void ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size, struct cryptop *crp)
Definition: t4_crypto.c:431
static int ccr_attach(device_t dev)
Definition: t4_crypto.c:1947
MODULE_VERSION(ccr, 1)
#define MAX_REQUEST_SIZE
Definition: t4_crypto.c:138
#define DSGL_SGE_MAXLEN
Definition: t4_crypto.c:131
__FBSDID("$FreeBSD$")
static devclass_t ccr_devclass
Definition: t4_crypto.c:2750
static int ccr_probesession(device_t dev, const struct crypto_session_params *csp)
Definition: t4_crypto.c:2227
static int ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:814
static void ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
Definition: t4_crypto.c:2084
static int ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
Definition: t4_crypto.c:801
static void ccr_init_hash_digest(struct ccr_session *s)
Definition: t4_crypto.c:2051
static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto")
MODULE_DEPEND(ccr, crypto, 1, 1, 1)
#define MAX_AAD_LEN
Definition: t4_crypto.c:122
static bool ccr_cipher_supported(const struct crypto_session_params *csp)
Definition: t4_crypto.c:2183
static bool ccr_aes_check_keylen(int alg, int klen)
Definition: t4_crypto.c:2062
static void ccr_identify(driver_t *driver, device_t parent)
Definition: t4_crypto.c:1803
static int ccr_phys_dsgl_len(int nsegs)
Definition: t4_crypto.c:323
static int ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
Definition: t4_crypto.c:602
static int ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
Definition: t4_crypto.c:1744
static void ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs)
Definition: t4_crypto.c:395
static int ccr_count_sgl(struct sglist *sg, int maxsegsize)
Definition: t4_crypto.c:311
static int ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:478
static int ccr_ulptx_sgl_len(int nsegs)
Definition: t4_crypto.c:385
DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL)
static void ccr_delete_session(struct ccr_session *s)
Definition: t4_crypto.c:2307
static int ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
Definition: t4_crypto.c:1378
static int ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
Definition: t4_crypto.c:1447
static int ccr_probe(device_t dev)
Definition: t4_crypto.c:1814
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256
Definition: t4_crypto.h:184
#define CHCR_ENCRYPT_OP
Definition: t4_crypto.h:135
#define SCMD_HMAC_CTRL_IPSEC_96BIT
Definition: t4_crypto.h:168
#define CHCR_KEYCTX_MAC_KEY_SIZE_192
Definition: t4_crypto.h:179
#define SCMD_AUTH_MODE_NOP
Definition: t4_crypto.h:153
#define IV_NOP
Definition: t4_crypto.h:187
#define SCMD_AUTH_MODE_GHASH
Definition: t4_crypto.h:157
#define SCMD_CIPH_MODE_AES_CCM
Definition: t4_crypto.h:151
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128
Definition: t4_crypto.h:182
#define CHCR_MAX_CRYPTO_IV_LEN
Definition: t4_crypto.h:133
#define V_KEY_CONTEXT_OPAD_PRESENT(x)
Definition: t4_crypto.h:74
#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs)
Definition: t4_crypto.h:115
#define SCMD_AUTH_MODE_SHA224
Definition: t4_crypto.h:155
#define CHCR_KEYCTX_MAC_KEY_SIZE_256
Definition: t4_crypto.h:180
#define CCM_AAD_FIELD_SIZE
Definition: t4_crypto.h:130
#define SCMD_HMAC_CTRL_NO_TRUNC
Definition: t4_crypto.h:166
#define SCMD_AUTH_MODE_SHA256
Definition: t4_crypto.h:156
#define HASH_TRANSHDR_SIZE(kctx_len)
Definition: t4_crypto.h:118
#define SCMD_AUTH_MODE_CBCMAC
Definition: t4_crypto.h:162
#define CHCR_DECRYPT_OP
Definition: t4_crypto.h:136
#define CHCR_KEYCTX_NO_KEY
Definition: t4_crypto.h:185
#define CHK_MAC_ERR_BIT(x)
Definition: t4_crypto.h:38
#define CHCR_KEYCTX_MAC_KEY_SIZE_512
Definition: t4_crypto.h:181
#define SCMD_AUTH_MODE_SHA1
Definition: t4_crypto.h:154
#define SCMD_HMAC_CTRL_NOP
Definition: t4_crypto.h:165
#define CHK_PAD_ERR_BIT(x)
Definition: t4_crypto.h:35
#define SCMD_PROTO_VERSION_GENERIC
Definition: t4_crypto.h:143
#define SCMD_CIPH_MODE_NOP
Definition: t4_crypto.h:145
#define CHCR_KEYCTX_MAC_KEY_SIZE_128
Definition: t4_crypto.h:177
#define SCMD_CIPH_MODE_AES_GCM
Definition: t4_crypto.h:147
#define CHCR_KEYCTX_MAC_KEY_SIZE_160
Definition: t4_crypto.h:178
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192
Definition: t4_crypto.h:183
#define CRYPTO_MAX_IMM_TX_PKT_LEN
Definition: t4_crypto.h:121
#define V_KEY_CONTEXT_MK_SIZE(x)
Definition: t4_crypto.h:96
#define SCMD_HMAC_CTRL_PL1
Definition: t4_crypto.h:169
#define SCMD_HMAC_CTRL_DIV2
Definition: t4_crypto.h:172
#define CCM_B0_SIZE
Definition: t4_crypto.h:129
#define V_KEY_CONTEXT_CK_SIZE(x)
Definition: t4_crypto.h:90
#define SCMD_AUTH_MODE_SHA512_512
Definition: t4_crypto.h:161
#define SCMD_HMAC_CTRL_PL3
Definition: t4_crypto.h:171
#define CHCR_AES_MAX_KEY_LEN
Definition: t4_crypto.h:132
#define V_KEY_CONTEXT_SALT_PRESENT(x)
Definition: t4_crypto.h:82
#define SCMD_HMAC_CTRL_PL2
Definition: t4_crypto.h:170
#define CHCR_HASH_MAX_BLOCK_SIZE_128
Definition: t4_crypto.h:192
#define SCMD_AUTH_MODE_SHA512_384
Definition: t4_crypto.h:160
#define DUMMY_BYTES
Definition: t4_crypto.h:110
#define SCMD_CIPH_MODE_AES_XTS
Definition: t4_crypto.h:150
#define SCMD_CIPH_MODE_AES_CTR
Definition: t4_crypto.h:148
#define V_KEY_CONTEXT_CTX_LEN(x)
Definition: t4_crypto.h:61
#define SCMD_CIPH_MODE_AES_CBC
Definition: t4_crypto.h:146
#define V_KEY_CONTEXT_DUAL_CK(x)
Definition: t4_crypto.h:67
#define V_KEY_CONTEXT_VALID(x)
Definition: t4_crypto.h:102
#define SCMD_HMAC_CTRL_TRUNC_RFC4366
Definition: t4_crypto.h:167
@ SGE_MAX_WR_LEN
Definition: t4_hw.h:93
int port
Definition: t4_if.m:63
@ CPL_FW6_PLD
Definition: t4_msg.h:151
@ CPL_RX_PHYS_DSGL
Definition: t4_msg.h:148
@ CPL_TX_SEC_PDU
Definition: t4_msg.h:122
@ CPL_RX_PHYS_ADDR
Definition: t4_msg.h:81
#define V_CPL_TX_SEC_PDU_AADSTOP(x)
Definition: t4_msg.h:3537
#define V_CPL_TX_SEC_PDU_CPLLEN(x)
Definition: t4_msg.h:3502
#define V_SCMD_ENC_DEC_CTRL(x)
Definition: t4_msg.h:3325
#define V_ULP_TXPKT_FID(x)
Definition: t4_msg.h:2972
#define V_CPL_TX_SEC_PDU_ULPTXLPBK(x)
Definition: t4_msg.h:3494
#define V_CPL_TX_SEC_PDU_RXCHID(x)
Definition: t4_msg.h:3478
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(x)
Definition: t4_msg.h:3555
#define V_CPL_TX_SEC_PDU_AUTHINSERT(x)
Definition: t4_msg.h:3592
#define V_SCMD_CIPH_AUTH_SEQ_CTRL(x)
Definition: t4_msg.h:3333
#define V_SCMD_HDR_LEN(x)
Definition: t4_msg.h:3455
#define V_SCMD_IV_GEN_CTRL(x)
Definition: t4_msg.h:3392
#define V_SCMD_HMAC_CTRL(x)
Definition: t4_msg.h:3360
#define V_CPL_TX_SEC_PDU_AADSTART(x)
Definition: t4_msg.h:3528
#define V_CPL_RX_PHYS_DSGL_PCIRLXORDER(x)
Definition: t4_msg.h:3624
#define V_SCMD_CIPH_MODE(x)
Definition: t4_msg.h:3343
#define V_ULP_TXPKT_CHANNELID(x)
Definition: t4_msg.h:2960
#define V_ULP_TXPKT_RO(x)
Definition: t4_msg.h:2975
#define V_CPL_RX_PHYS_DSGL_PCINOSNOOP(x)
Definition: t4_msg.h:3633
#define V_SCMD_AADIVDROP(x)
Definition: t4_msg.h:3446
#define V_SCMD_NUM_IVS(x)
Definition: t4_msg.h:3374
#define V_CPL_TX_SEC_PDU_ACKFOLLOWS(x)
Definition: t4_msg.h:3486
#define V_ULP_TXPKT_DEST(x)
Definition: t4_msg.h:2968
#define V_SCMD_MORE_FRAGS(x)
Definition: t4_msg.h:3400
#define V_SCMD_AUTH_MODE(x)
Definition: t4_msg.h:3351
#define V_CPL_TX_SEC_PDU_AUTHSTART(x)
Definition: t4_msg.h:3574
#define V_SCMD_PROTO_VERSION(x)
Definition: t4_msg.h:3318
#define V_SCMD_LAST_FRAG(x)
Definition: t4_msg.h:3406
#define V_CPL_RX_PHYS_DSGL_PCITPHNTENB(x)
Definition: t4_msg.h:3642
@ ULP_TX_PKT
Definition: t4_msg.h:2831
#define V_CPL_TX_SEC_PDU_IVINSRTOFST(x)
Definition: t4_msg.h:3517
#define V_ULPTX_CMD(x)
Definition: t4_msg.h:2845
#define V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(x)
Definition: t4_msg.h:3563
#define V_CPL_RX_PHYS_DSGL_DCAID(x)
Definition: t4_msg.h:3658
#define V_ULP_TX_SC_MORE(x)
Definition: t4_msg.h:2852
#define V_ULPTX_NSGE(x)
Definition: t4_msg.h:2890
#define V_SCMD_SEQ_NO_CTRL(x)
Definition: t4_msg.h:3302
#define V_CPL_TX_SEC_PDU_PLACEHOLDER(x)
Definition: t4_msg.h:3509
#define V_ULP_TXPKT_DATAMODIFY(x)
Definition: t4_msg.h:2953
#define V_CPL_TX_SEC_PDU_AUTHSTOP(x)
Definition: t4_msg.h:3583
#define V_CPL_RX_PHYS_DSGL_NOOFSGENTR(x)
Definition: t4_msg.h:3665
#define V_CPL_TX_SEC_PDU_CIPHERSTART(x)
Definition: t4_msg.h:3546
#define V_CPL_TX_SEC_PDU_OPCODE(x)
Definition: t4_msg.h:3471
#define V_CPL_RX_PHYS_DSGL_ISRDMA(x)
Definition: t4_msg.h:3611
#define V_CPL_RX_PHYS_DSGL_OPCODE(x)
Definition: t4_msg.h:3605
#define V_SCMD_MAC_ONLY(x)
Definition: t4_msg.h:3436
@ ULP_TX_SC_DSGL
Definition: t4_msg.h:2837
@ ULP_TX_SC_IMM
Definition: t4_msg.h:2836
@ ULP_TX_SC_NOOP
Definition: t4_msg.h:2835
#define V_SCMD_IV_SIZE(x)
Definition: t4_msg.h:3367
#define V_FW_CRYPTO_LOOKASIDE_WR_COMPL(x)
@ FW_CAPS_CONFIG_CRYPTO_LOOKASIDE
#define V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_LCB(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_PHASH(x)
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD)
#define V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_LEN16(x)
@ FW_CRYPTO_LOOKASIDE_WR
#define V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x)
#define V_FW_CRYPTO_LOOKASIDE_WR_IV(x)