diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-04 17:11:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-04 17:11:08 -0700 |
commit | 9eb31227cbccd3a37da0f42604f1ab5fc556bc53 (patch) | |
tree | 9aa467e620e002bf01cecdd98e3908e0cc3e7221 /drivers/crypto/chelsio | |
parent | 527cd20771888443b5d8707debe98f62c7a1f596 (diff) | |
parent | f444ec106407d600f17fa1a4bd14f84577401dec (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- add AEAD support to crypto engine
- allow batch registration in simd
Algorithms:
- add CFB mode
- add speck block cipher
- add sm4 block cipher
- new test case for crct10dif
- improve scheduling latency on ARM
- scatter/gather support to gcm in aesni
- convert x86 crypto algorithms to skcihper
Drivers:
- hmac(sha224/sha256) support in inside-secure
- aes gcm/ccm support in stm32
- stm32mp1 support in stm32
- ccree driver from staging tree
- gcm support over QI in caam
- add ks-sa hwrng driver"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (212 commits)
crypto: ccree - remove unused enums
crypto: ahash - Fix early termination in hash walk
crypto: brcm - explicitly cast cipher to hash type
crypto: talitos - don't leak pointers to authenc keys
crypto: qat - don't leak pointers to authenc keys
crypto: picoxcell - don't leak pointers to authenc keys
crypto: ixp4xx - don't leak pointers to authenc keys
crypto: chelsio - don't leak pointers to authenc keys
crypto: caam/qi - don't leak pointers to authenc keys
crypto: caam - don't leak pointers to authenc keys
crypto: lrw - Free rctx->ext with kzfree
crypto: talitos - fix IPsec cipher in length
crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array()
crypto: doc - clarify hash callbacks state machine
crypto: api - Keep failed instances alive
crypto: api - Make crypto_alg_lookup static
crypto: api - Remove unused crypto_type lookup function
crypto: chelsio - Remove declaration of static function from header
crypto: inside-secure - hmac(sha224) support
crypto: inside-secure - hmac(sha256) support
..
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 577 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.h | 11 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.h | 6 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 31 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ipsec.c | 5 |
5 files changed, 413 insertions, 217 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 34a02d690548..59fe6631e73e 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb) return (skb->len <= SGE_MAX_WR_LEN); } +static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) +{ + memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); +} + static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, unsigned int entlen, unsigned int skip) @@ -160,41 +165,6 @@ static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, return nents; } -static inline void chcr_handle_ahash_resp(struct ahash_request *req, - unsigned char *input, - int err) -{ - struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); - int digestsize, updated_digestsize; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); - - if (input == NULL) - goto out; - digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); - if (reqctx->is_sg_map) - chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); - if (reqctx->dma_addr) - dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr, - reqctx->dma_len, DMA_TO_DEVICE); - reqctx->dma_addr = 0; - updated_digestsize = digestsize; - if (digestsize == SHA224_DIGEST_SIZE) - updated_digestsize = SHA256_DIGEST_SIZE; - else if (digestsize == SHA384_DIGEST_SIZE) - updated_digestsize = SHA512_DIGEST_SIZE; - if (reqctx->result == 1) { - reqctx->result = 0; - memcpy(req->result, input + sizeof(struct cpl_fw6_pld), - digestsize); - } else { - memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), - updated_digestsize); - } -out: - req->base.complete(&req->base, err); -} - static inline int get_aead_subtype(struct crypto_aead *aead) { struct aead_alg *alg = crypto_aead_alg(aead); @@ -247,34 +217,6 @@ static inline void chcr_handle_aead_resp(struct aead_request *req, req->base.complete(&req->base, err); } -/* - * chcr_handle_resp - Unmap the DMA buffers associated with the request - * @req: crypto request - */ -int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, - int err) -{ - struct crypto_tfm *tfm = req->tfm; - struct chcr_context *ctx = crypto_tfm_ctx(tfm); - struct adapter *adap = padap(ctx->dev); - - switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_AEAD: - chcr_handle_aead_resp(aead_request_cast(req), input, err); - break; - - case CRYPTO_ALG_TYPE_ABLKCIPHER: - err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), - input, err); - break; - - case CRYPTO_ALG_TYPE_AHASH: - chcr_handle_ahash_resp(ahash_request_cast(req), input, err); - } - atomic_inc(&adap->chcr_stats.complete); - return err; -} - static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength) @@ -563,7 +505,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk, if (!len) return; - while (sg && skip) { if (sg_dma_len(sg) <= skip) { skip -= sg_dma_len(sg); @@ -653,6 +594,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx, } return 0; } + +static int chcr_hash_ent_in_wr(struct scatterlist *src, + unsigned int minsg, + unsigned int space, + unsigned int srcskip) +{ + int srclen = 0; + int srcsg = minsg; + int soffset = 0, sless; + + if (sg_dma_len(src) == srcskip) { + src = sg_next(src); + srcskip = 0; + } + while (src && space > (sgl_ent_len[srcsg + 1])) { + sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip, + CHCR_SRC_SG_SIZE); + srclen += sless; + soffset += sless; + srcsg++; + if (sg_dma_len(src) == (soffset + srcskip)) { + src = sg_next(src); + soffset = 0; + srcskip = 0; + } + } + return srclen; +} + static int chcr_sg_ent_in_wr(struct scatterlist *src, struct scatterlist *dst, unsigned int minsg, @@ -662,7 +632,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, { int srclen = 0, dstlen = 0; int srcsg = minsg, dstsg = minsg; - int offset = 0, less; + int offset = 0, soffset = 0, less, sless = 0; if (sg_dma_len(src) == srcskip) { src = sg_next(src); @@ -676,7 +646,9 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, while (src && dst && space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { - srclen += (sg_dma_len(src) - srcskip); + sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset, + CHCR_SRC_SG_SIZE); + srclen += sless; srcsg++; offset = 0; while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && @@ -687,15 +659,20 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, dstskip, CHCR_DST_SG_SIZE); dstlen += less; offset += less; - if (offset == sg_dma_len(dst)) { + if ((offset + dstskip) == sg_dma_len(dst)) { dst = sg_next(dst); offset = 0; } dstsg++; dstskip = 0; } - src = sg_next(src); - srcskip = 0; + soffset += sless; + if ((soffset + srcskip) == sg_dma_len(src)) { + src = sg_next(src); + srcskip = 0; + soffset = 0; + } + } return min(srclen, dstlen); } @@ -784,14 +761,14 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); dst_size = get_space_for_phys_dsgl(nents + 1); - kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); + kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, CHCR_SRC_SG_SIZE, reqctx->src_ofst); - temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16) - * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8); + temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) : + (sgl_len(nents + MIN_CIPHER_SG) * 8); transhdr_len += temp; - transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; + transhdr_len = roundup(transhdr_len, 16); skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { error = -ENOMEM; @@ -847,6 +824,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) transhdr_len, temp, ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); reqctx->skb = skb; + + if (reqctx->op && (ablkctx->ciph_mode == + CHCR_SCMD_CIPHER_MODE_AES_CBC)) + sg_pcopy_to_buffer(wrparam->req->src, + sg_nents(wrparam->req->src), wrparam->req->info, 16, + reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); + return skb; err: return ERR_PTR(error); @@ -1070,9 +1054,8 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req, ret = chcr_update_tweak(req, iv, 0); else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { if (reqctx->op) - sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, - 16, - reqctx->processed - AES_BLOCK_SIZE); + /*Updated before sending last WR*/ + memcpy(iv, req->info, AES_BLOCK_SIZE); else memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); } @@ -1100,11 +1083,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req, else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) ret = chcr_update_tweak(req, iv, 1); else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { - if (reqctx->op) - sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, - 16, - reqctx->processed - AES_BLOCK_SIZE); - else + /*Already updated for Decrypt*/ + if (!reqctx->op) memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); } @@ -1143,12 +1123,12 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, } if (!reqctx->imm) { bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, - SPACE_LEFT(ablkctx->enckey_len), + CIP_SPACE_LEFT(ablkctx->enckey_len), reqctx->src_ofst, reqctx->dst_ofst); if ((bytes + reqctx->processed) >= req->nbytes) bytes = req->nbytes - reqctx->processed; else - bytes = ROUND_16(bytes); + bytes = rounddown(bytes, 16); } else { /*CTR mode counter overfloa*/ bytes = req->nbytes - reqctx->processed; @@ -1234,7 +1214,7 @@ static int process_cipher(struct ablkcipher_request *req, CHCR_DST_SG_SIZE, 0); dnents += 1; // IV phys_dsgl = get_space_for_phys_dsgl(dnents); - kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); + kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); reqctx->imm = (transhdr_len + IV + req->nbytes) <= SGE_MAX_WR_LEN; @@ -1247,12 +1227,12 @@ static int process_cipher(struct ablkcipher_request *req, if (!reqctx->imm) { bytes = chcr_sg_ent_in_wr(req->src, req->dst, MIN_CIPHER_SG, - SPACE_LEFT(ablkctx->enckey_len), + CIP_SPACE_LEFT(ablkctx->enckey_len), 0, 0); if ((bytes + reqctx->processed) >= req->nbytes) bytes = req->nbytes - reqctx->processed; else - bytes = ROUND_16(bytes); + bytes = rounddown(bytes, 16); } else { bytes = req->nbytes; } @@ -1282,7 +1262,7 @@ static int process_cipher(struct ablkcipher_request *req, req->src, req->dst, req->nbytes, - req->info, + reqctx->iv, op_type); goto error; } @@ -1503,35 +1483,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); struct chcr_wr *chcr_req; struct ulptx_sgl *ulptx; - unsigned int nents = 0, transhdr_len, iopad_alignment = 0; - unsigned int digestsize = crypto_ahash_digestsize(tfm); - unsigned int kctx_len = 0, temp = 0; - u8 hash_size_in_response = 0; + unsigned int nents = 0, transhdr_len; + unsigned int temp = 0; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(h_ctx(tfm)->dev); int error = 0; - iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); - kctx_len = param->alg_prm.result_size + iopad_alignment; - if (param->opad_needed) - kctx_len += param->alg_prm.result_size + iopad_alignment; - - if (req_ctx->result) - hash_size_in_response = digestsize; - else - hash_size_in_response = param->alg_prm.result_size; - transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); - req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <= - SGE_MAX_WR_LEN; - nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0); + transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); + req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + + param->sg_len) <= SGE_MAX_WR_LEN; + nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len, + CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst); nents += param->bfr_len ? 1 : 0; - transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len + - param->sg_len), 16) * 16) : - (sgl_len(nents) * 8); - transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; + transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len + + param->sg_len, 16) : (sgl_len(nents) * 8); + transhdr_len = roundup(transhdr_len, 16); - skb = alloc_skb(SGE_MAX_WR_LEN, flags); + skb = alloc_skb(transhdr_len, flags); if (!skb) return ERR_PTR(-ENOMEM); chcr_req = __skb_put_zero(skb, transhdr_len); @@ -1563,33 +1532,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, param->alg_prm.mk_size, 0, param->opad_needed, - ((kctx_len + + ((param->kctx_len + sizeof(chcr_req->key_ctx)) >> 4)); chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); - ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len + + ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len + DUMMY_BYTES); if (param->bfr_len != 0) { - req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, - req_ctx->reqbfr, param->bfr_len, - DMA_TO_DEVICE); + req_ctx->hctx_wr.dma_addr = + dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr, + param->bfr_len, DMA_TO_DEVICE); if (dma_mapping_error(&u_ctx->lldi.pdev->dev, - req_ctx->dma_addr)) { + req_ctx->hctx_wr. dma_addr)) { error = -ENOMEM; goto err; } - req_ctx->dma_len = param->bfr_len; + req_ctx->hctx_wr.dma_len = param->bfr_len; } else { - req_ctx->dma_addr = 0; + req_ctx->hctx_wr.dma_addr = 0; } chcr_add_hash_src_ent(req, ulptx, param); /* Request upto max wr size */ - temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len - + param->bfr_len) : 0); + temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ? + (param->sg_len + param->bfr_len) : 0); atomic_inc(&adap->chcr_stats.digest_rqst); - create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm, - hash_size_in_response, transhdr_len, + create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm, + param->hash_size, transhdr_len, temp, 0); - req_ctx->skb = skb; + req_ctx->hctx_wr.skb = skb; return skb; err: kfree_skb(skb); @@ -1608,7 +1577,6 @@ static int chcr_ahash_update(struct ahash_request *req) int error; bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); - u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { @@ -1625,17 +1593,26 @@ static int chcr_ahash_update(struct ahash_request *req) req_ctx->reqlen += nbytes; return 0; } + chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); if (error) return -ENOMEM; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + params.kctx_len = roundup(params.alg_prm.result_size, 16); + params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, + HASH_SPACE_LEFT(params.kctx_len), 0); + if (params.sg_len > req->nbytes) + params.sg_len = req->nbytes; + params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - + req_ctx->reqlen; params.opad_needed = 0; params.more = 1; params.last = 0; - params.sg_len = nbytes - req_ctx->reqlen; params.bfr_len = req_ctx->reqlen; params.scmd1 = 0; - get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); - req_ctx->result = 0; + req_ctx->hctx_wr.srcsg = req->src; + + params.hash_size = params.alg_prm.result_size; req_ctx->data_len += params.sg_len + params.bfr_len; skb = create_hash_wr(req, ¶ms); if (IS_ERR(skb)) { @@ -1643,6 +1620,7 @@ static int chcr_ahash_update(struct ahash_request *req) goto unmap; } + req_ctx->hctx_wr.processed += params.sg_len; if (remainder) { /* Swap buffers */ swap(req_ctx->reqbfr, req_ctx->skbfr); @@ -1680,16 +1658,27 @@ static int chcr_ahash_final(struct ahash_request *req) struct uld_ctx *u_ctx = NULL; u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + chcr_init_hctx_per_wr(req_ctx); u_ctx = ULD_CTX(h_ctx(rtfm)); if (is_hmac(crypto_ahash_tfm(rtfm))) params.opad_needed = 1; else params.opad_needed = 0; params.sg_len = 0; + req_ctx->hctx_wr.isfinal = 1; get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); - req_ctx->result = 1; + params.kctx_len = roundup(params.alg_prm.result_size, 16); + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.opad_needed = 1; + params.kctx_len *= 2; + } else { + params.opad_needed = 0; + } + + req_ctx->hctx_wr.result = 1; params.bfr_len = req_ctx->reqlen; req_ctx->data_len += params.bfr_len + params.sg_len; + req_ctx->hctx_wr.srcsg = req->src; if (req_ctx->reqlen == 0) { create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.last = 0; @@ -1702,10 +1691,11 @@ static int chcr_ahash_final(struct ahash_request *req) params.last = 1; params.more = 0; } + params.hash_size = crypto_ahash_digestsize(rtfm); skb = create_hash_wr(req, ¶ms); if (IS_ERR(skb)) return PTR_ERR(skb); - + req_ctx->reqlen = 0; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); @@ -1730,37 +1720,59 @@ static int chcr_ahash_finup(struct ahash_request *req) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; } + chcr_init_hctx_per_wr(req_ctx); + error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); + if (error) + return -ENOMEM; - if (is_hmac(crypto_ahash_tfm(rtfm))) + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + params.kctx_len = roundup(params.alg_prm.result_size, 16); + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len *= 2; params.opad_needed = 1; - else + } else { params.opad_needed = 0; + } - params.sg_len = req->nbytes; + params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, + HASH_SPACE_LEFT(params.kctx_len), 0); + if (params.sg_len < req->nbytes) { + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len /= 2; + params.opad_needed = 0; + } + params.last = 0; + params.more = 1; + params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) + - req_ctx->reqlen; + params.hash_size = params.alg_prm.result_size; + params.scmd1 = 0; + } else { + params.last = 1; + params.more = 0; + params.sg_len = req->nbytes; + params.hash_size = crypto_ahash_digestsize(rtfm); + params.scmd1 = req_ctx->data_len + req_ctx->reqlen + + params.sg_len; + } params.bfr_len = req_ctx->reqlen; - get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); req_ctx->data_len += params.bfr_len + params.sg_len; - req_ctx->result = 1; + req_ctx->hctx_wr.result = 1; + req_ctx->hctx_wr.srcsg = req->src; if ((req_ctx->reqlen + req->nbytes) == 0) { create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.last = 0; params.more = 1; params.scmd1 = 0; params.bfr_len = bs; - } else { - params.scmd1 = req_ctx->data_len; - params.last = 1; - params.more = 0; } - error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); - if (error) - return -ENOMEM; - skb = create_hash_wr(req, ¶ms); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto unmap; } + req_ctx->reqlen = 0; + req_ctx->hctx_wr.processed += params.sg_len; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); @@ -1791,21 +1803,42 @@ static int chcr_ahash_digest(struct ahash_request *req) return -EBUSY; } - if (is_hmac(crypto_ahash_tfm(rtfm))) - params.opad_needed = 1; - else - params.opad_needed = 0; + chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); if (error) return -ENOMEM; - params.last = 0; - params.more = 0; - params.sg_len = req->nbytes; - params.bfr_len = 0; - params.scmd1 = 0; get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); - req_ctx->result = 1; + params.kctx_len = roundup(params.alg_prm.result_size, 16); + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len *= 2; + params.opad_needed = 1; + } else { + params.opad_needed = 0; + } + params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, + HASH_SPACE_LEFT(params.kctx_len), 0); + if (params.sg_len < req->nbytes) { + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len /= 2; + params.opad_needed = 0; + } + params.last = 0; + params.more = 1; + params.scmd1 = 0; + params.sg_len = rounddown(params.sg_len, bs); + params.hash_size = params.alg_prm.result_size; + } else { + params.sg_len = req->nbytes; + params.hash_size = crypto_ahash_digestsize(rtfm); + params.last = 1; + params.more = 0; + params.scmd1 = req->nbytes + req_ctx->data_len; + + } + params.bfr_len = 0; + req_ctx->hctx_wr.result = 1; + req_ctx->hctx_wr.srcsg = req->src; req_ctx->data_len += params.bfr_len + params.sg_len; if (req->nbytes == 0) { @@ -1819,6 +1852,7 @@ static int chcr_ahash_digest(struct ahash_request *req) error = PTR_ERR(skb); goto unmap; } + req_ctx->hctx_wr.processed += params.sg_len; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); @@ -1828,6 +1862,151 @@ unmap: return error; } +static int chcr_ahash_continue(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); + struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + struct hash_wr_param params; + u8 bs; + int error; + + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + u_ctx = ULD_CTX(h_ctx(rtfm)); + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + h_ctx(rtfm)->tx_qidx))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + params.kctx_len = roundup(params.alg_prm.result_size, 16); + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len *= 2; + params.opad_needed = 1; + } else { + params.opad_needed = 0; + } + params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, + HASH_SPACE_LEFT(params.kctx_len), + hctx_wr->src_ofst); + if ((params.sg_len + hctx_wr->processed) > req->nbytes) + params.sg_len = req->nbytes - hctx_wr->processed; + if (!hctx_wr->result || + ((params.sg_len + hctx_wr->processed) < req->nbytes)) { + if (is_hmac(crypto_ahash_tfm(rtfm))) { + params.kctx_len /= 2; + params.opad_needed = 0; + } + params.last = 0; + params.more = 1; + params.sg_len = rounddown(params.sg_len, bs); + params.hash_size = params.alg_prm.result_size; + params.scmd1 = 0; + } else { + params.last = 1; + params.more = 0; + params.hash_size = crypto_ahash_digestsize(rtfm); + params.scmd1 = reqctx->data_len + params.sg_len; + } + params.bfr_len = 0; + reqctx->data_len += params.sg_len; + skb = create_hash_wr(req, ¶ms); + if (IS_ERR(skb)) { + error = PTR_ERR(skb); + goto err; + } + hctx_wr->processed += params.sg_len; + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); + chcr_send_wr(skb); + return 0; +err: + return error; +} + +static inline void chcr_handle_ahash_resp(struct ahash_request *req, + unsigned char *input, + int err) +{ + struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); + struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; + int digestsize, updated_digestsize; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); + + if (input == NULL) + goto out; + digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + updated_digestsize = digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + updated_digestsize = SHA256_DIGEST_SIZE; + else if (digestsize == SHA384_DIGEST_SIZE) + updated_digestsize = SHA512_DIGEST_SIZE; + + if (hctx_wr->dma_addr) { + dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, + hctx_wr->dma_len, DMA_TO_DEVICE); + hctx_wr->dma_addr = 0; + } + if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == + req->nbytes)) { + if (hctx_wr->result == 1) { + hctx_wr->result = 0; + memcpy(req->result, input + sizeof(struct cpl_fw6_pld), + digestsize); + } else { + memcpy(reqctx->partial_hash, + input + sizeof(struct cpl_fw6_pld), + updated_digestsize); + + } + goto unmap; + } + memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), + updated_digestsize); + + err = chcr_ahash_continue(req); + if (err) + goto unmap; + return; +unmap: + if (hctx_wr->is_sg_map) + chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); + + +out: + req->base.complete(&req->base, err); +} + +/* + * chcr_handle_resp - Unmap the DMA buffers associated with the request + * @req: crypto request + */ +int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, + int err) +{ + struct crypto_tfm *tfm = req->tfm; + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct adapter *adap = padap(ctx->dev); + + switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AEAD: + chcr_handle_aead_resp(aead_request_cast(req), input, err); + break; + + case CRYPTO_ALG_TYPE_ABLKCIPHER: + err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), + input, err); + break; + + case CRYPTO_ALG_TYPE_AHASH: + chcr_handle_ahash_resp(ahash_request_cast(req), input, err); + } + atomic_inc(&adap->chcr_stats.complete); + return err; +} static int chcr_ahash_export(struct ahash_request *areq, void *out) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); @@ -1835,11 +2014,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) state->reqlen = req_ctx->reqlen; state->data_len = req_ctx->data_len; - state->is_sg_map = 0; - state->result = 0; memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); memcpy(state->partial_hash, req_ctx->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); + chcr_init_hctx_per_wr(state); return 0; } @@ -1852,11 +2030,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) req_ctx->data_len = state->data_len; req_ctx->reqbfr = req_ctx->bfr1; req_ctx->skbfr = req_ctx->bfr2; - req_ctx->is_sg_map = 0; - req_ctx->result = 0; memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); memcpy(req_ctx->partial_hash, state->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); + chcr_init_hctx_per_wr(req_ctx); return 0; } @@ -1953,10 +2130,8 @@ static int chcr_sha_init(struct ahash_request *areq) req_ctx->reqlen = 0; req_ctx->reqbfr = req_ctx->bfr1; req_ctx->skbfr = req_ctx->bfr2; - req_ctx->skb = NULL; - req_ctx->result = 0; - req_ctx->is_sg_map = 0; copy_hash_init_values(req_ctx->partial_hash, digestsize); + return 0; } @@ -2124,11 +2299,11 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < SGE_MAX_WR_LEN; - temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16) - * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents + temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) + : (sgl_len(reqctx->src_nents + reqctx->aad_nents + MIN_GCM_SG) * 8); transhdr_len += temp; - transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; + transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, transhdr_len, op_type)) { @@ -2187,9 +2362,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, aeadctx->enckey_len); - memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << - 4), actx->h_iopad, kctx_len - - (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); + memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), + actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); @@ -2398,22 +2572,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_walk ulp_walk; struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); - if (reqctx->imm) { + if (reqctx->hctx_wr.imm) { u8 *buf = (u8 *)ulptx; if (param->bfr_len) { memcpy(buf, reqctx->reqbfr, param->bfr_len); buf += param->bfr_len; } - sg_pcopy_to_buffer(req->src, sg_nents(req->src), - buf, param->sg_len, 0); + + sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg, + sg_nents(reqctx->hctx_wr.srcsg), buf, + param->sg_len, 0); } else { ulptx_walk_init(&ulp_walk, ulptx); if (param->bfr_len) ulptx_walk_add_page(&ulp_walk, param->bfr_len, - &reqctx->dma_addr); - ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, - 0); + &reqctx->hctx_wr.dma_addr); + ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, + param->sg_len, reqctx->hctx_wr.src_ofst); + reqctx->hctx_wr.srcsg = ulp_walk.last_sg; + reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len; ulptx_walk_end(&ulp_walk); } } @@ -2430,7 +2608,7 @@ int chcr_hash_dma_map(struct device *dev, DMA_TO_DEVICE); if (!error) return -ENOMEM; - req_ctx->is_sg_map = 1; + req_ctx->hctx_wr.is_sg_map = 1; return 0; } @@ -2444,7 +2622,7 @@ void chcr_hash_dma_unmap(struct device *dev, dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - req_ctx->is_sg_map = 0; + req_ctx->hctx_wr.is_sg_map = 0; } @@ -2636,10 +2814,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, 0, dst_size); } -int aead_ccm_validate_input(unsigned short op_type, - struct aead_request *req, - struct chcr_aead_ctx *aeadctx, - unsigned int sub_type) +static int aead_ccm_validate_input(unsigned short op_type, + struct aead_request *req, + struct chcr_aead_ctx *aeadctx, + unsigned int sub_type) { if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { if (crypto_ccm_check_iv(req->iv)) { @@ -2696,16 +2874,16 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_CCM_SG; // For IV and B0 dst_size = get_space_for_phys_dsgl(dnents); - kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; + kctx_len = roundup(aeadctx->enckey_len, 16) * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + reqctx->b0_len) <= SGE_MAX_WR_LEN; - temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen + - reqctx->b0_len), 16) * 16) : + temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen + + reqctx->b0_len, 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents + MIN_CCM_SG) * 8); transhdr_len += temp; - transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; + transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - reqctx->b0_len, transhdr_len, op_type)) { @@ -2727,8 +2905,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); - memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * - 16), aeadctx->key, aeadctx->enckey_len); + memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), + aeadctx->key, aeadctx->enckey_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); @@ -2798,16 +2976,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_GCM_SG; // For IV dst_size = get_space_for_phys_dsgl(dnents); - kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + - AEAD_H_SIZE; + kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= SGE_MAX_WR_LEN; - temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + - req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents + - reqctx->aad_nents + MIN_GCM_SG) * 8); + temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) : + (sgl_len(reqctx->src_nents + + reqctx->aad_nents + MIN_GCM_SG) * 8); transhdr_len += temp; - transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; + transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, transhdr_len, op_type)) { atomic_inc(&adap->chcr_stats.fallback); @@ -2846,8 +3023,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, 0, 0, dst_size); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); - memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * - 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); + memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), + GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); /* prepare a 16 byte iv */ /* S A L T | IV | 0x00000001 */ @@ -3067,11 +3244,10 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, unsigned char ck_size, mk_size; int key_ctx_size = 0; - key_ctx_size = sizeof(struct _key_ctx) + - ((DIV_ROUND_UP(keylen, 16)) << 4) * 2; + key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; if (keylen == AES_KEYSIZE_128) { - mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; } else if (keylen == AES_KEYSIZE_192) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; @@ -3178,10 +3354,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, memcpy(aeadctx->key, key, keylen); aeadctx->enckey_len = keylen; - key_ctx_size = sizeof(struct _key_ctx) + - ((DIV_ROUND_UP(keylen, 16)) << 4) + + key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + AEAD_H_SIZE; - aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, + aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_MAC_KEY_SIZE_128, 0, 0, key_ctx_size >> 4); @@ -3281,6 +3456,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, if (IS_ERR(base_hash)) { pr_err("chcr : Base driver cannot be loaded\n"); aeadctx->enckey_len = 0; + memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } { @@ -3325,17 +3501,19 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, chcr_change_order(actx->h_iopad, param.result_size); chcr_change_order(o_ptr, param.result_size); key_ctx_len = sizeof(struct _key_ctx) + - ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) + + roundup(keys.enckeylen, 16) + (param.result_size + align) * 2; aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1, key_ctx_len >> 4); actx->auth_mode = param.auth_mode; chcr_free_shash(base_hash); + memzero_explicit(&keys, sizeof(keys)); return 0; } out: aeadctx->enckey_len = 0; + memzero_explicit(&keys, sizeof(keys)); if (!IS_ERR(base_hash)) chcr_free_shash(base_hash); return -EINVAL; @@ -3393,15 +3571,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, aeadctx->enckey_len << 3); } - key_ctx_len = sizeof(struct _key_ctx) - + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); + key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 0, key_ctx_len >> 4); actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; + memzero_explicit(&keys, sizeof(keys)); return 0; out: aeadctx->enckey_len = 0; + memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index f263cd42a84f..dba3dff1e209 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -258,15 +258,16 @@ #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ ULP_TX_SC_MORE_V((immdatalen))) #define MAX_NK 8 -#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) #define MAX_DSGL_ENT 32 #define MIN_CIPHER_SG 1 /* IV */ #define MIN_AUTH_SG 1 /* IV */ #define MIN_GCM_SG 1 /* IV */ #define MIN_DIGEST_SG 1 /*Partial Buffer*/ #define MIN_CCM_SG 2 /*IV+B0*/ -#define SPACE_LEFT(len) \ - ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) +#define CIP_SPACE_LEFT(len) \ + ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len))) +#define HASH_SPACE_LEFT(len) \ + ((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len))) struct algo_param { unsigned int auth_mode; @@ -275,12 +276,14 @@ struct algo_param { }; struct hash_wr_param { + struct algo_param alg_prm; unsigned int opad_needed; unsigned int more; unsigned int last; - struct algo_param alg_prm; + unsigned int kctx_len; unsigned int sg_len; unsigned int bfr_len; + unsigned int hash_size; u64 scmd1; }; diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 77056a90c8e1..1a20424e18c6 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -54,10 +54,14 @@ #define MAC_ERROR_BIT 0 #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) #define MAX_SALT 4 -#define WR_MIN_LEN (sizeof(struct chcr_wr) + \ +#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \ sizeof(struct cpl_rx_phys_dsgl) + \ sizeof(struct ulptx_sgl)) +#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \ + DUMMY_BYTES + \ + sizeof(struct ulptx_sgl)) + #define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) struct uld_ctx; diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 7daf0a17a7d2..c8e8972af283 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -258,21 +258,32 @@ struct chcr_context { struct __crypto_ctx crypto_ctx[0]; }; -struct chcr_ahash_req_ctx { +struct chcr_hctx_per_wr { + struct scatterlist *srcsg; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 dma_len; + unsigned int src_ofst; + unsigned int processed; u32 result; - u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128]; - u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 is_sg_map; + u8 imm; + /*Final callback called. Driver cannot rely on nbytes to decide + * final call + */ + u8 isfinal; +}; + +struct chcr_ahash_req_ctx { + struct chcr_hctx_per_wr hctx_wr; u8 *reqbfr; u8 *skbfr; - dma_addr_t dma_addr; - u32 dma_len; + /* SKB which is being sent to the hardware for processing */ + u64 data_len; /* Data len till time */ u8 reqlen; - u8 imm; - u8 is_sg_map; u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; - u64 data_len; /* Data len till time */ - /* SKB which is being sent to the hardware for processing */ - struct sk_buff *skb; + u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; }; struct chcr_blkcipher_req_ctx { diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index db1e241104ed..8e0aa3f175c9 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -360,8 +360,7 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb, cpl = (struct cpl_tx_pkt_core *)pos; - if (skb->ip_summed == CHECKSUM_PARTIAL) - cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | TXPKT_PF_V(adap->pf); if (skb_vlan_tag_present(skb)) { @@ -475,7 +474,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); /* Sub-command */ - wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); + wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen); wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + sizeof(wr->req.key_ctx) + kctx_len + |