diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-03 10:40:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-03 10:40:14 -0700 |
commit | ab5c60b79ab6cc50b39bbb21b2f9fb55af900b84 (patch) | |
tree | 71fa895fbf01e3b88f26cf257d9105f9d286b631 /drivers/crypto | |
parent | 5577416c39652d395a6045677f4f598564aba1cf (diff) | |
parent | 3cbfe80737c18ac6e635421ab676716a393d3074 (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add support for allocating transforms on a specific NUMA Node
- Introduce the flag CRYPTO_ALG_ALLOCATES_MEMORY for storage users
Algorithms:
- Drop PMULL based ghash on arm64
- Fixes for building with clang on x86
- Add sha256 helper that does the digest in one go
- Add SP800-56A rev 3 validation checks to dh
Drivers:
- Permit users to specify NUMA node in hisilicon/zip
- Add support for i.MX6 in imx-rngc
- Add sa2ul crypto driver
- Add BA431 hwrng driver
- Add Ingenic JZ4780 and X1000 hwrng driver
- Spread IRQ affinity in inside-secure and marvell/cesa"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (157 commits)
crypto: sa2ul - Fix inconsistent IS_ERR and PTR_ERR
hwrng: core - remove redundant initialization of variable ret
crypto: x86/curve25519 - Remove unused carry variables
crypto: ingenic - Add hardware RNG for Ingenic JZ4780 and X1000
dt-bindings: RNG: Add Ingenic RNG bindings.
crypto: caam/qi2 - add module alias
crypto: caam - add more RNG hw error codes
crypto: caam/jr - remove incorrect reference to caam_jr_register()
crypto: caam - silence .setkey in case of bad key length
crypto: caam/qi2 - create ahash shared descriptors only once
crypto: caam/qi2 - fix error reporting for caam_hash_alloc
crypto: caam - remove deadcode on 32-bit platforms
crypto: ccp - use generic power management
crypto: xts - Replace memcpy() invocation with simple assignment
crypto: marvell/cesa - irq balance
crypto: inside-secure - irq balance
crypto: ecc - SP800-56A rev 3 local public key validation
crypto: dh - SP800-56A rev 3 local public key validation
crypto: dh - check validity of Z before export
lib/mpi: Add mpi_sub_ui()
...
Diffstat (limited to 'drivers/crypto')
154 files changed, 4778 insertions, 3941 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 802b9ada4e9e..aa3a4ed07a66 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER config CRYPTO_DEV_QCE_SHA bool depends on CRYPTO_DEV_QCE + select CRYPTO_SHA1 + select CRYPTO_SHA256 choice prompt "Algorithms enabled for QCE acceleration" @@ -756,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST - select CRYPTO_AES + select CRYPTO_LIB_AES select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_CTR select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -865,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig" source "drivers/crypto/amlogic/Kconfig" +config CRYPTO_DEV_SA2UL + tristate "Support for TI security accelerator" + depends on ARCH_K3 || COMPILE_TEST + select ARM64_CRYPTO + select CRYPTO_AES + select CRYPTO_AES_ARM64 + select CRYPTO_ALGAPI + select HW_RANDOM + select SG_SPLIT + help + K3 devices include a security accelerator engine that may be + used for crypto offload. Select this if you want to use hardware + acceleration for cryptographic algorithms on these devices. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 944ed7226e37..53fc115cf459 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o +obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index 7f22d305178e..b72de8939497 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); int err; - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (ctx->mode & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&ctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&ctx->fallback_req); return err; } @@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm) alg.crypto.base); op->ss = algt->ss; - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct sun4i_cipher_req_ctx)); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct sun4i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm)); + + err = pm_runtime_get_sync(op->ss->dev); if (err < 0) goto error_pm; return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put(op->ss->dev); } @@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the DES key, prepare the mode to be used */ @@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the 3DES key, prepare the mode to be used */ @@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, op->keylen = keylen; memcpy(op->key, key, keylen); - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index 2b4c6333eb67..163962f9e284 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -170,11 +170,12 @@ struct sun4i_tfm_ctx { u32 keylen; u32 keymode; struct sun4i_ss_ctx *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; struct sun4i_cipher_req_ctx { u32 mode; + struct skcipher_request fallback_req; // keep at the end }; struct sun4i_req_ctx { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index a6abb701bfc6..1e4f9a58bb24 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & CE_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); op->ce = algt->ce; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ce->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -358,7 +357,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + pm_runtime_put_noidle(op->ce->dev); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -370,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync_suspend(op->ce->dev); } @@ -400,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -425,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index b957061424a1..138759dc8190 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 0e9eac397e1b..963645fe4adb 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -181,12 +181,14 @@ struct sun8i_ce_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ce_dev *ce; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index c89cb2ee2496..7a131675a41c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; @@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); - skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & SS_DECRYPTION) - err = crypto_skcipher_decrypt(subreq); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); op->ss = algt->ss; - sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + dev_info(op->ss->dev, "Fallback for %s is %s\n", crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; op->enginectx.op.prepare_request = NULL; @@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) return 0; error_pm: - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); return err; } @@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync(op->ss->dev); } @@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 5d9d0fedcb06..9a23515783a6 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index 29c44f279112..0405767f1f7e 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -135,17 +135,18 @@ struct sun8i_ss_dev { /* * struct sun8i_cipher_req_ctx - context for a skcipher request - * @t_src: list of mapped SGs with their size - * @t_dst: list of mapped SGs with their size - * @p_key: DMA address of the key - * @p_iv: DMA address of the IV - * @method: current algorithm for this request - * @op_mode: op_mode for this request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request - * @ivlen: size of biv - * @keylen: keylen for this request - * @biv: buffer which contain the IV + * @t_src: list of mapped SGs with their size + * @t_dst: list of mapped SGs with their size + * @p_key: DMA address of the key + * @p_iv: DMA address of the IV + * @method: current algorithm for this request + * @op_mode: op_mode for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of biv + * @keylen: keylen for this request + * @biv: buffer which contain the IV + * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { struct sginfo t_src[MAX_SG]; @@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx { unsigned int ivlen; unsigned int keylen; void *biv; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx { u32 *key; u32 keylen; struct sun8i_ss_dev *ss; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig index cf9547602670..cf2c676a7093 100644 --- a/drivers/crypto/amlogic/Kconfig +++ b/drivers/crypto/amlogic/Kconfig @@ -1,7 +1,7 @@ config CRYPTO_DEV_AMLOGIC_GXL tristate "Support for amlogic cryptographic offloader" depends on HAS_IOMEM - default y if ARCH_MESON + default m if ARCH_MESON select CRYPTO_SKCIPHER select CRYPTO_ENGINE select CRYPTO_ECB diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 9819dd50fbad..5880b94dcb32 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq) #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct meson_alg_template *algt; -#endif - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm); -#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG algt = container_of(alg, struct meson_alg_template, alg.skcipher); algt->stat_fb++; #endif - skcipher_request_set_sync_tfm(req, op->fallback_tfm); - skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); - skcipher_request_set_crypt(req, areq->src, areq->dst, + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); + if (rctx->op_dir == MESON_DECRYPT) - err = crypto_skcipher_decrypt(req); + err = crypto_skcipher_decrypt(&rctx->fallback_req); else - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } @@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm) algt = container_of(alg, struct meson_alg_template, alg.skcipher); op->mc = algt->mc; - sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); - - op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } + sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + op->enginectx.op.do_one_request = meson_handle_cipher_request; op->enginectx.op.prepare_request = NULL; op->enginectx.op.unprepare_request = NULL; @@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm) memzero_explicit(op->key, op->keylen); kfree(op->key); } - crypto_free_sync_skcipher(op->fallback_tfm); + crypto_free_skcipher(op->fallback_tfm); } int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (!op->key) return -ENOMEM; - return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 411857fad8ba..466552acbbbb 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, @@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index b7f2de91ab76..dc0f142324a3 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -109,6 +109,7 @@ struct meson_dev { struct meson_cipher_req_ctx { u32 op_dir; int flow; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx { u32 keylen; u32 keymode; struct meson_dev *mc; - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; }; /* diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 62ba0325a618..1a46eeddf082 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha1", .cra_driver_name = "artpec-sha1", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "sha256", .cra_driver_name = "artpec-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "artpec-hmac-sha256", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, @@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "ecb(aes)", .cra_driver_name = "artpec6-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = { .cra_driver_name = "artpec6-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), @@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "cbc(aes)", .cra_driver_name = "artpec6-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = { .cra_name = "xts(aes)", .cra_driver_name = "artpec6-xts-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, @@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = { .cra_driver_name = "artpec-gcm-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index a353217a0d33..8a7fa1ae1ade 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, @@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, @@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .cipher_info = { @@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg) crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + crypto->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; crypto->init = skcipher_init_tfm; crypto->exit = skcipher_exit_tfm; @@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); hash->halg.base.cra_init = ahash_cra_init; hash->halg.base.cra_exit = generic_cra_exit; - hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { @@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg) aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); - aead->base.cra_flags |= CRYPTO_ALG_ASYNC; + aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; /* setkey set in alg initialization */ aead->setauthsize = aead_setauthsize; aead->encrypt = aead_encrypt; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b2f9882bc010..91feda5b63f6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } -static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) -{ - return skcipher_setkey(skcipher, key, keylen, 0); -} - static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -838,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } @@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, - { - .skcipher = { - .base = { - .cra_name = "ecb(arc4)", - .cra_driver_name = "ecb-arc4-caam", - .cra_blocksize = ARC4_BLOCK_SIZE, - }, - .setkey = arc4_skcipher_setkey, - .encrypt = skcipher_encrypt, - .decrypt = skcipher_decrypt, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - }, - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, - }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3433,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -3446,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; @@ -3457,7 +3438,6 @@ int caam_algapi_init(struct device *ctrldev) struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; - u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false, gcm_support; @@ -3477,8 +3457,6 @@ int caam_algapi_init(struct device *ctrldev) CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; - arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> - CHA_ID_LS_ARC4_SHIFT; ccha_inst = 0; ptha_inst = 0; @@ -3499,7 +3477,6 @@ int caam_algapi_init(struct device *ctrldev) md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; - arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; gcm_support = aesa & CHA_VER_MISC_AES_GCM; } @@ -3522,10 +3499,6 @@ int caam_algapi_init(struct device *ctrldev) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; - /* Skip ARC4 algorithms if not supported by device */ - if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) - continue; - /* * Check support for AES modes not available * on LP devices. diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 27e36bdf6163..bb1c0106a95c 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); + dev_dbg(jrdev, "key size mismatch\n"); return -EINVAL; } @@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; @@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 28669cbecf77..66ae1d581168 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1058,7 +1058,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(dev, "key size mismatch\n"); + dev_dbg(dev, "key size mismatch\n"); return -EINVAL; } @@ -2912,7 +2912,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_skcipher; alg->exit = caam_cra_exit; @@ -2925,7 +2926,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_aead; alg->exit = caam_cra_exit_aead; @@ -4004,7 +4006,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; - int ret; + int ret = -ENOMEM; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -4017,7 +4019,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); - return -ENOMEM; + return ret; } } else { mapped_nents = 0; @@ -4027,7 +4029,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc = qi_cache_zalloc(GFP_DMA | flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); - return -ENOMEM; + return ret; } edesc->src_nents = src_nents; @@ -4082,7 +4084,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); - return -ENOMEM; + return ret; } static int ahash_update_first(struct ahash_request *req) @@ -4498,7 +4500,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -4547,7 +4553,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->dev = dev; @@ -4697,6 +4703,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int err; + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpseci_reset() failed\n"); + } dpaa2_dpseci_congestion_free(priv); dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); @@ -4894,6 +4907,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_reset() failed\n"); + goto err_get_vers; + } + } + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, &priv->dpseci_attr); if (err) { @@ -5221,7 +5242,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s hash alg allocation failed: %d\n", - alg->driver_name, err); + alg->hmac_driver_name, err); continue; } @@ -5384,6 +5405,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { }, { .vendor = 0x0 } }; +MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); static struct fsl_mc_driver dpaa2_caam_driver = { .driver = { diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 27ff4a3d037e..e8a6d8bc43b5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1927,7 +1927,7 @@ caam_hash_alloc(struct caam_hash_template *template, alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 60e2a54c19f1..c3c22a8de4c0 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -43,7 +43,6 @@ #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> -#include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/rsa.h> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f3d20b7645e0..94502f1d4b48 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl) * pipeline to a depth of 1 (from it's default of 4) to preclude this situation * from occurring. */ -static void handle_imx6_err005766(u32 *mcr) +static void handle_imx6_err005766(u32 __iomem *mcr) { if (of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6dl") || @@ -527,11 +527,21 @@ static const struct caam_imx_data caam_imx6ul_data = { .num_clks = ARRAY_SIZE(caam_imx6ul_clks), }; +static const struct clk_bulk_data caam_vf610_clks[] = { + { .id = "ipg" }, +}; + +static const struct caam_imx_data caam_vf610_data = { + .clks = caam_vf610_clks, + .num_clks = ARRAY_SIZE(caam_vf610_clks), +}; + static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, + { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c index 8a68531ded0b..039df6c5790c 100644 --- a/drivers/crypto/caam/dpseci.c +++ b/drivers/crypto/caam/dpseci.c @@ -104,6 +104,24 @@ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) } /** + * dpseci_reset() - Reset the DPSECI, returns the object to initial state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + +/** * dpseci_is_enabled() - Check if the DPSECI is enabled. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h index 4550e134d166..6dcd9be8144b 100644 --- a/drivers/crypto/caam/dpseci.h +++ b/drivers/crypto/caam/dpseci.h @@ -59,6 +59,8 @@ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int *en); diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h index 6ab77ead6e3d..71a007c85adb 100644 --- a/drivers/crypto/caam/dpseci_cmd.h +++ b/drivers/crypto/caam/dpseci_cmd.h @@ -33,6 +33,7 @@ #define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002) #define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003) #define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004) +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005) #define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006) #define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194) diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 17c6108b6d41..72db90176b1a 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -212,6 +212,9 @@ static const char * const rng_err_id_list[] = { "Prediction resistance and test request", "Uninstantiate", "Secure key generation", + "", + "Hardware error", + "Continuous check" }; static int report_ccb_status(struct device *jrdev, const u32 status, diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 4af22e7ceb4f..bf6b03b17251 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -339,8 +339,7 @@ EXPORT_SYMBOL(caam_jr_free); * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's * descriptor. - * @dev: device of the job ring to be used. This device should have - * been assigned prior by caam_jr_register(). + * @dev: struct device of the job ring to be used * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 0f810bc13b2b..af61f3a2c0d4 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -173,9 +173,14 @@ static inline u64 rd_reg64(void __iomem *reg) static inline u64 cpu_to_caam_dma64(dma_addr_t value) { - if (caam_imx) - return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | - (u64)cpu_to_caam32(upper_32_bits(value))); + if (caam_imx) { + u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + ret_val |= (u64)cpu_to_caam32(upper_32_bits(value)); + + return ret_val; + } return cpu_to_caam64(value); } diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1be1adffff1d..5af0dc2a8909 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -99,10 +99,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm); struct cvm_req_ctx *rctx = skcipher_request_ctx(req); struct fc_context *fctx = &rctx->fctx; - u64 *offset_control = &rctx->control_word; u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct cpt_request_info *req_info = &rctx->cpt_req; - u64 *ctrl_flags = NULL; + __be64 *ctrl_flags = NULL; + __be64 *offset_control; req_info->ctrl.s.grp = 0; req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER; @@ -126,9 +126,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); else memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags; + *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + offset_control = (__be64 *)&rctx->control_word; *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16)); /* Storing Packet Data Information in offset * Control Word First 8 bytes @@ -200,6 +201,7 @@ static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); @@ -339,7 +341,8 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm) } static struct skcipher_alg algs[] = { { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -356,7 +359,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -373,7 +377,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -389,7 +394,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, @@ -406,7 +412,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, @@ -423,7 +430,8 @@ static struct skcipher_alg algs[] = { { .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7a24019356b5..3878b01e19e1 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -4,6 +4,7 @@ */ #include "cptvf.h" +#include "cptvf_algs.h" #include "request_manager.h" /** @@ -133,7 +134,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -150,7 +151,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -167,17 +168,16 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; } - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; - ((u16 *)info->in_buffer)[2] = 0; - ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); + ((__be16 *)info->in_buffer)[2] = 0; + ((__be16 *)info->in_buffer)[3] = 0; memcpy(&info->in_buffer[8], info->gather_components, g_sz_bytes); @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; @@ -470,8 +470,6 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64); vq_cmd.dptr = info->dptr_baddr; vq_cmd.rptr = info->rptr_baddr; vq_cmd.cptr.u64 = 0; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 3514b082eca7..8d40e4ba3af1 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -62,6 +62,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; @@ -73,16 +75,16 @@ struct sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct cpt_info_buffer { @@ -112,10 +114,10 @@ struct cpt_info_buffer { union vq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index dce5423a5883..1be2571363fe 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -522,7 +522,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "gcm(aes)", .cra_driver_name = "n5_aes_gcm", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -541,7 +541,7 @@ static struct aead_alg nitrox_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "n5_rfc4106", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 18088b0a2257..a553ac65f324 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -388,7 +388,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "n5_cbc(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -407,7 +407,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "n5_ecb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -426,7 +426,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cfb(aes)", .cra_driver_name = "n5_cfb(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -445,7 +445,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "xts(aes)", .cra_driver_name = "n5_xts(aes)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -464,7 +464,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "n5_rfc3686(ctr(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -483,7 +483,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cts(cbc(aes))", .cra_driver_name = "n5_cts(cbc(aes))", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -502,7 +502,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "cbc(des3_ede)", .cra_driver_name = "n5_cbc(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, @@ -521,7 +521,7 @@ static struct skcipher_alg nitrox_skciphers[] = { { .cra_name = "ecb(des3_ede)", .cra_driver_name = "n5_ecb(des3_ede)", .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 5eba7ee49e81..11a305fa19e6 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -378,6 +378,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head) snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 9e8f07c1afac..1c1c939f5c39 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -172,6 +172,7 @@ static struct aead_alg ccp_aes_gcm_defaults = { .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 04b2517df955..6849261ca47d 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -98,7 +98,7 @@ static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); - return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); + return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); } static int ccp_aes_xts_crypt(struct skcipher_request *req, @@ -145,20 +145,19 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req, (ctx->u.aes.key_len != AES_KEYSIZE_256)) fallback = 1; if (fallback) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, - ctx->u.aes.tfm_skcipher); - /* Use the fallback to process the request for any * unsupported unit sizes or key sizes */ - skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, + ctx->u.aes.tfm_skcipher); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -198,13 +197,12 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req) static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; ctx->u.aes.key_len = 0; - fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0, - CRYPTO_ALG_ASYNC | + fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { pr_warn("could not load fallback driver xts(aes)\n"); @@ -212,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) } ctx->u.aes.tfm_skcipher = fallback_tfm; - crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + + crypto_skcipher_reqsize(fallback_tfm)); return 0; } @@ -221,7 +220,7 @@ static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher); + crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } static int ccp_register_aes_xts_alg(struct list_head *head, @@ -243,6 +242,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head, snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; alg->base.cra_blocksize = AES_BLOCK_SIZE; diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 51e12fbd1159..e6dcd8cedd53 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -212,6 +212,7 @@ static const struct skcipher_alg ccp_aes_defaults = { .init = ccp_aes_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, @@ -229,6 +230,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = { .init = ccp_aes_rfc3686_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 9c129defdb50..ec97daf0fcb7 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -136,6 +136,7 @@ static const struct skcipher_alg ccp_des3_defaults = { .init = ccp_des3_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index b0cc2bd73af8..8fbfdb9e8cd3 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -19,6 +19,7 @@ #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <crypto/scatterwalk.h> +#include <linux/string.h> #include "ccp-crypto.h" @@ -424,7 +425,7 @@ static int ccp_register_hmac_alg(struct list_head *head, *ccp_alg = *base_alg; INIT_LIST_HEAD(&ccp_alg->entry); - strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); alg = &ccp_alg->alg; alg->setkey = ccp_sha_setkey; @@ -486,6 +487,7 @@ static int ccp_register_sha_alg(struct list_head *head, snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = def->block_size; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 90a009e6b5c1..aed3d2192d01 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -89,7 +89,7 @@ static inline struct ccp_crypto_ahash_alg * /***** AES related defines *****/ struct ccp_aes_ctx { /* Fallback cipher for XTS with unsupported unit sizes */ - struct crypto_sync_skcipher *tfm_skcipher; + struct crypto_skcipher *tfm_skcipher; enum ccp_engine engine; enum ccp_aes_type type; @@ -121,6 +121,8 @@ struct ccp_aes_req_ctx { u8 rfc3686_iv[AES_BLOCK_SIZE]; struct ccp_cmd cmd; + + struct skcipher_request fallback_req; // keep at the end }; struct ccp_aes_cmac_req_ctx { diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 82ac4c14c04c..7838f63bab32 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -221,8 +221,8 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { - u32 *mP; - __le32 *dP; + __le32 *mP; + u32 *dP; u32 tail; int i; int ret = 0; @@ -235,8 +235,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, } mutex_lock(&cmd_q->q_mutex); - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 19ac509ed76e..0971ee60f840 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -531,7 +531,6 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) return len; } -#ifdef CONFIG_PM bool ccp_queues_suspended(struct ccp_device *ccp) { unsigned int suspended = 0; @@ -549,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +int ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -601,7 +600,6 @@ int ccp_dev_resume(struct sp_device *sp) return 0; } -#endif int ccp_dev_init(struct sp_device *sp) { diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 3f68262d9ab4..a5d9123a22ea 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -469,6 +469,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; @@ -596,8 +597,8 @@ struct dword3 { }; union dword4 { - __le32 dst_lo; /* NON-SHA */ - __le32 sha_len_lo; /* SHA */ + u32 dst_lo; /* NON-SHA */ + u32 sha_len_lo; /* SHA */ }; union dword5 { @@ -607,7 +608,7 @@ union dword5 { unsigned int rsvd1:13; unsigned int fixed:1; } fields; - __le32 sha_len_hi; + u32 sha_len_hi; }; struct dword7 { @@ -618,12 +619,12 @@ struct dword7 { struct ccp5_desc { struct dword0 dw0; - __le32 length; - __le32 src_lo; + u32 length; + u32 src_lo; struct dword3 dw3; union dword4 dw4; union dword5 dw5; - __le32 key_lo; + u32 key_lo; struct dword7 dw7; }; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 422193690fd4..bd270e66185e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -617,13 +632,12 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_data src, dst; struct ccp_data aad; struct ccp_op op; - - unsigned long long *final; unsigned int dm_offset; unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ + __be64 *final; int ret; struct scatterlist *p_inp, sg_inp[2]; @@ -825,7 +839,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) DMA_BIDIRECTIONAL); if (ret) goto e_dst; - final = (unsigned long long *) final_wa.address; + final = (__be64 *)final_wa.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); @@ -1308,7 +1322,6 @@ ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } - ret = -EIO; /* Zero out all the fields of the command desc */ memset(&op, 0, sizeof(op)); @@ -2028,7 +2041,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2054,8 +2067,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index ce42675d3274..6284a15e5047 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -211,13 +211,12 @@ void sp_destroy(struct sp_device *sp) sp_del_device(sp); } -#ifdef CONFIG_PM -int sp_suspend(struct sp_device *sp, pm_message_t state) +int sp_suspend(struct sp_device *sp) { int ret; if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_suspend(sp, state); + ret = ccp_dev_suspend(sp); if (ret) return ret; } @@ -237,7 +236,6 @@ int sp_resume(struct sp_device *sp) return 0; } -#endif struct sp_device *sp_get_psp_master_device(void) { diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index f913f1494af9..0218d0670eee 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -119,7 +119,7 @@ int sp_init(struct sp_device *sp); void sp_destroy(struct sp_device *sp); struct sp_device *sp_get_master(void); -int sp_suspend(struct sp_device *sp, pm_message_t state); +int sp_suspend(struct sp_device *sp); int sp_resume(struct sp_device *sp); int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); @@ -134,7 +134,7 @@ struct sp_device *sp_get_psp_master_device(void); int ccp_dev_init(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp); -int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); +int ccp_dev_suspend(struct sp_device *sp); int ccp_dev_resume(struct sp_device *sp); #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ @@ -145,7 +145,7 @@ static inline int ccp_dev_init(struct sp_device *sp) } static inline void ccp_dev_destroy(struct sp_device *sp) { } -static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +static inline int ccp_dev_suspend(struct sp_device *sp) { return 0; } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index cb6cb47053f4..f471dbaef1fb 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -252,23 +252,19 @@ static void sp_pci_remove(struct pci_dev *pdev) sp_free_irqs(sp); } -#ifdef CONFIG_PM -static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused sp_pci_suspend(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } -static int sp_pci_resume(struct pci_dev *pdev) +static int __maybe_unused sp_pci_resume(struct device *dev) { - struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); return sp_resume(sp); } -#endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP static const struct sev_vdata sevv1 = { @@ -365,15 +361,14 @@ static const struct pci_device_id sp_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, sp_pci_table); +static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume); + static struct pci_driver sp_pci_driver = { .name = "ccp", .id_table = sp_pci_table, .probe = sp_pci_probe, .remove = sp_pci_remove, -#ifdef CONFIG_PM - .suspend = sp_pci_suspend, - .resume = sp_pci_resume, -#endif + .driver.pm = &sp_pci_pm_ops, }; int sp_pci_init(void) diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 831aac1393a2..9dba52fbee99 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -207,7 +207,7 @@ static int sp_platform_suspend(struct platform_device *pdev, struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); - return sp_suspend(sp, state); + return sp_suspend(sp); } static int sp_platform_resume(struct platform_device *pdev) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 872ea3ff1c6b..076669dc1035 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -45,7 +45,6 @@ enum cc_key_type { struct cc_cipher_ctx { struct cc_drvdata *drvdata; int keylen; - int key_round_number; int cipher_mode; int flow_mode; unsigned int flags; @@ -56,6 +55,8 @@ struct cc_cipher_ctx { struct cc_cpp_key_info cpp; }; struct crypto_shash *shash_tfm; + struct crypto_skcipher *fallback_tfm; + bool fallback_on; }; static void cc_cipher_complete(struct device *dev, void *cc_req, int err); @@ -75,7 +76,6 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) case CC_AES_128_BIT_KEY_SIZE: case CC_AES_192_BIT_KEY_SIZE: if (ctx_p->cipher_mode != DRV_CIPHER_XTS && - ctx_p->cipher_mode != DRV_CIPHER_ESSIV && ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER) return 0; break; @@ -159,22 +159,49 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; + unsigned int fallback_req_size = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); - crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), - sizeof(struct cipher_req_ctx)); - ctx_p->cipher_mode = cc_alg->cipher_mode; ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + const char *name = crypto_tfm_alg_name(tfm); + + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + max_key_buf_size <<= 1; + + /* Alloc fallabck tfm or essiv when key size != 256 bit */ + ctx_p->fallback_tfm = + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + + if (IS_ERR(ctx_p->fallback_tfm)) { + /* Note we're still allowing registration with no fallback since it's + * better to have most modes supported than none at all. + */ + dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n", + name); + ctx_p->fallback_tfm = NULL; + } else { + fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm); + } + } + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct cipher_req_ctx) + fallback_req_size); + /* Allocate key buffer, cache line aligned */ - ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); + ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_fallback; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -186,21 +213,20 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_fallback: + crypto_free_skcipher(ctx_p->fallback_tfm); + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) @@ -220,6 +246,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) /* Free hash tfm for essiv */ crypto_free_shash(ctx_p->shash_tfm); ctx_p->shash_tfm = NULL; + crypto_free_skcipher(ctx_p->fallback_tfm); + ctx_p->fallback_tfm = NULL; } /* Unmap key buffer */ @@ -303,6 +331,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, } ctx_p->keylen = keylen; + ctx_p->fallback_on = false; switch (cc_slot_to_key_type(hki.hw_key1)) { case CC_HW_PROTECTED_KEY: @@ -388,10 +417,33 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { - dev_dbg(dev, "Unsupported key size %d.\n", keylen); + dev_dbg(dev, "Invalid key size %d.\n", keylen); return -EINVAL; } + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + + /* We only support 256 bit ESSIV-CBC-AES keys */ + if (keylen != AES_KEYSIZE_256) { + unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK; + + if (likely(ctx_p->fallback_tfm)) { + ctx_p->fallback_on = true; + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags); + return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen); + } + + dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen); + return -EINVAL; + } + + /* Internal ESSIV key buffer is double sized */ + max_key_buf_size <<= 1; + } + + ctx_p->fallback_on = false; ctx_p->key_type = CC_UNPROTECTED_KEY; /* @@ -419,21 +471,20 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); - if (keylen == 24) - memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ - int key_len = keylen >> 1; int err; err = crypto_shash_tfm_digest(ctx_p->shash_tfm, - ctx_p->user.key, key_len, - ctx_p->user.key + key_len); + ctx_p->user.key, keylen, + ctx_p->user.key + keylen); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } + + keylen <<= 1; } dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); @@ -571,9 +622,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; + unsigned int key_len = (ctx_p->keylen / 2); dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; + unsigned int key_offset = key_len; struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, @@ -593,6 +645,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: + + if (cipher_mode == DRV_CIPHER_ESSIV) + key_len = SHA256_DIGEST_SIZE; + /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); @@ -602,12 +658,12 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, ctx_p->hw.key2_slot); } else { set_din_type(&desc[*seq_size], DMA_DLLI, - (key_dma_addr + (key_len / 2)), - (key_len / 2), NS_BIT); + (key_dma_addr + key_offset), + key_len, NS_BIT); } set_xex_data_unit_size(&desc[*seq_size], du_size); set_flow_mode(&desc[*seq_size], S_DIN_to_AES2); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); (*seq_size)++; @@ -616,7 +672,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); - set_key_size_aes(&desc[*seq_size], (key_len / 2)); + set_key_size_aes(&desc[*seq_size], key_len); set_flow_mode(&desc[*seq_size], flow_mode); set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); @@ -867,6 +923,17 @@ static int cc_cipher_process(struct skcipher_request *req, goto exit_process; } + if (ctx_p->fallback_on) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm); + if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT) + return crypto_skcipher_encrypt(subreq); + else + return crypto_skcipher_decrypt(subreq); + } + /* The IV we are handed may be allocted from the stack so * we must copy it to a DMAable buffer before use. */ @@ -1010,7 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv(paes)", + .name = "essiv(cbc(paes),sha256)", .driver_name = "essiv-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1028,7 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv512(paes)", + .name = "essiv512(cbc(paes),sha256)", .driver_name = "essiv-paes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1047,7 +1114,7 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { - .name = "essiv4096(paes)", + .name = "essiv4096(cbc(paes),sha256)", .driver_name = "essiv-paes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { @@ -1269,15 +1336,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv(aes)", + .name = "essiv(cbc(aes),sha256)", .driver_name = "essiv-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1286,15 +1353,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv512(aes)", + .name = "essiv512(cbc(aes),sha256)", .driver_name = "essiv-aes-du512-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, @@ -1304,15 +1371,15 @@ static const struct cc_alg_template skcipher_algs[] = { .std_body = CC_STD_NIST, }, { - .name = "essiv4096(aes)", + .name = "essiv4096(cbc(aes),sha256)", .driver_name = "essiv-aes-du4096-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4c2553672b6f..13b908ea4873 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, return min(srclen, dstlen); } -static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, - u32 flags, - struct scatterlist *src, - struct scatterlist *dst, - unsigned int nbytes, +static int chcr_cipher_fallback(struct crypto_skcipher *cipher, + struct skcipher_request *req, u8 *iv, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int err; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); - - skcipher_request_set_sync_tfm(subreq, cipher); - skcipher_request_set_callback(subreq, flags, NULL, NULL); - skcipher_request_set_crypt(subreq, src, dst, - nbytes, iv); + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, + req->cryptlen, iv); - err = op_type ? crypto_skcipher_decrypt(subreq) : - crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : + crypto_skcipher_encrypt(&reqctx->fallback_req); return err; @@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, + crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, + crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -1206,13 +1202,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, req); memcpy(req->iv, reqctx->init_iv, IV); atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, - req->iv, - reqctx->op); + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, + reqctx->op); goto complete; } @@ -1224,7 +1215,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, wrparam.bytes = bytes; skb = create_cipher_wr(&wrparam); if (IS_ERR(skb)) { - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + pr_err("%s : Failed to form WR. No memory\n", __func__); err = PTR_ERR(skb); goto unmap; } @@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); fallback: atomic_inc(&adap->chcr_stats.fallback); - err = chcr_cipher_fallback(ablkctx->sw_cipher, - req->base.flags, - req->src, - req->dst, - req->cryptlen, + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? reqctx->iv : req->iv, @@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } init_completion(&ctx->cbc_aes_aio_done); - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) * cannot be used as fallback in chcr_handle_cipher_response */ - ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } @@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm) struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - crypto_free_sync_skcipher(ablkctx->sw_cipher); + crypto_free_skcipher(ablkctx->sw_cipher); } static int get_alg_config(struct algo_param *params, @@ -1556,7 +1545,7 @@ static int get_alg_config(struct algo_param *params, params->result_size = SHA512_DIGEST_SIZE; break; default: - pr_err("chcr : ERROR, unsupported digest size\n"); + pr_err("ERROR, unsupported digest size\n"); return -EINVAL; } return 0; @@ -3571,7 +3560,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, goto out; if (get_alg_config(¶m, max_authsize)) { - pr_err("chcr : Unsupported digest size\n"); + pr_err("Unsupported digest size\n"); goto out; } subtype = get_aead_subtype(authenc); @@ -3590,7 +3579,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key\n"); + pr_err("Unsupported cipher key\n"); goto out; } @@ -3608,10 +3597,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } base_hash = chcr_alloc_shash(max_authsize); if (IS_ERR(base_hash)) { - pr_err("chcr : Base driver cannot be loaded\n"); - aeadctx->enckey_len = 0; - memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; + pr_err("Base driver cannot be loaded\n"); + goto out; } { SHASH_DESC_ON_STACK(shash, base_hash); @@ -3626,7 +3613,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, keys.authkeylen, o_ptr); if (err) { - pr_err("chcr : Base driver cannot be loaded\n"); + pr_err("Base driver cannot be loaded\n"); goto out; } keys.authkeylen = max_authsize; @@ -3711,7 +3698,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); + pr_err("Unsupported cipher key %d\n", keys.enckeylen); goto out; } memcpy(aeadctx->key, keys.enckey, keys.enckeylen); @@ -3747,7 +3734,7 @@ static int chcr_aead_op(struct aead_request *req, cdev = a_ctx(tfm)->dev; if (!cdev) { - pr_err("chcr : %s : No crypto device.\n", __func__); + pr_err("%s : No crypto device.\n", __func__); return -ENXIO; } @@ -4445,6 +4432,7 @@ static int chcr_register_alg(void) driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; driver_algs[i].alg.skcipher.base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK; driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + @@ -4456,7 +4444,8 @@ static int chcr_register_alg(void) break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY; driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; driver_algs[i].alg.aead.init = chcr_aead_cra_init; @@ -4476,7 +4465,8 @@ static int chcr_register_alg(void) a_hash->halg.statesize = SZ_AHASH_REQ_CTX; a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; a_hash->halg.base.cra_module = THIS_MODULE; - a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + a_hash->halg.base.cra_flags = + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; a_hash->halg.base.cra_alignmask = 0; a_hash->halg.base.cra_exit = NULL; @@ -4497,8 +4487,7 @@ static int chcr_register_alg(void) break; } if (err) { - pr_err("chcr : %s : Algorithm registration failed\n", - name); + pr_err("%s : Algorithm registration failed\n", name); goto register_err; } else { driver_algs[i].is_registered = 1; diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 31e427e273f8..e89f9e0094b4 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) } struct ablk_ctx { - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; @@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx { u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; u16 txqidx; u16 rxqidx; + struct skcipher_request fallback_req; // keep at the end }; struct chcr_alg_template { diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index a3ee127a70e3..b135c74fb619 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -12,7 +12,6 @@ #include <linux/topology.h> #include "hpre.h" -#define HPRE_VF_NUM 63 #define HPRE_QUEUE_NUM_V2 1024 #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -46,9 +45,9 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90 #define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_HAC_RAS_CE_ENABLE 0x1 #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_HAC_RAS_FE_ENABLE 0 @@ -83,6 +82,10 @@ #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5) +#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 @@ -231,6 +234,22 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) return 0; } +/* + * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). + * Or it may stay in D3 state when we bind and unbind hpre quickly, + * as it does FLR triggered by hardware. + */ +static void disable_flr_of_bme(struct hisi_qm *qm) +{ + u32 val; + + val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); +} + static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -242,10 +261,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); - /* disable FLR triggered by BME(bus master enable) */ - writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); - writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); - /* HPRE need more time, we close this interrupt */ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); @@ -264,7 +279,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, - val & BIT(0), + val & BIT(0), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) { @@ -296,6 +311,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) if (ret) dev_err(dev, "acpi_evaluate_dsm err.\n"); + disable_flr_of_bme(qm); + return ret; } @@ -372,7 +389,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) u32 num_vfs = qm->vfs_num; u32 vfq_num, tmp; - if (val > num_vfs) return -EINVAL; @@ -449,7 +465,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; @@ -477,7 +493,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, } static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; @@ -548,13 +564,15 @@ static int hpre_debugfs_atomic64_get(void *data, u64 *val) static int hpre_debugfs_atomic64_set(void *data, u64 val) { struct hpre_dfx *dfx_item = data; - struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + struct hpre_dfx *hpre_dfx = NULL; - if (val) + if (dfx_item->type == HPRE_OVERTIME_THRHLD) { + hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + } else if (val) { return -EINVAL; + } - if (dfx_item->type == HPRE_OVERTIME_THRHLD) - atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); atomic64_set(&dfx_item->value, val); return 0; @@ -563,15 +581,17 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); -static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, +static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { + struct hpre *hpre = container_of(qm, struct hpre, qm); + struct hpre_debug *dbg = &hpre->debug; struct dentry *file_dir; if (dir) file_dir = dir; else - file_dir = dbg->debug_root; + file_dir = qm->debug.debug_root; if (type >= HPRE_DEBUG_FILE_NUM) return -EINVAL; @@ -586,10 +606,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, return 0; } -static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) +static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; @@ -601,14 +619,12 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); regset->base = qm->io_base; - debugfs_create_regset32("regs", 0444, debug->debug_root, regset); + debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset); return 0; } -static int hpre_cluster_debugfs_init(struct hpre_debug *debug) +static int hpre_cluster_debugfs_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; @@ -619,7 +635,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); if (ret < 0) return -EINVAL; - tmp_d = debugfs_create_dir(buf, debug->debug_root); + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -630,7 +646,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) regset->base = qm->io_base + hpre_cluster_offsets[i]; debugfs_create_regset32("regs", 0444, tmp_d, regset); - ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, + ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, i + HPRE_CLUSTER_CTRL); if (ret) return ret; @@ -639,32 +655,31 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) return 0; } -static int hpre_ctrl_debug_init(struct hpre_debug *debug) +static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, HPRE_CURRENT_QM); if (ret) return ret; - ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE, + ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) return ret; - ret = hpre_pf_comm_regs_debugfs_init(debug); + ret = hpre_pf_comm_regs_debugfs_init(qm); if (ret) return ret; - return hpre_cluster_debugfs_init(debug); + return hpre_cluster_debugfs_init(qm); } -static void hpre_dfx_debug_init(struct hpre_debug *debug) +static void hpre_dfx_debug_init(struct hisi_qm *qm) { - struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; - struct hisi_qm *qm = &hpre->qm; struct dentry *parent; int i; @@ -676,30 +691,27 @@ static void hpre_dfx_debug_init(struct hpre_debug *debug) } } -static int hpre_debugfs_init(struct hpre *hpre) +static int hpre_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; - struct dentry *dir; int ret; - dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); - qm->debug.debug_root = dir; + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hpre_debugfs_root); + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; - ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { - hpre->debug.debug_root = dir; - ret = hpre_ctrl_debug_init(&hpre->debug); + ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; } - hpre_dfx_debug_init(&hpre->debug); + hpre_dfx_debug_init(qm); return 0; @@ -708,10 +720,8 @@ failed_to_create: return ret; } -static void hpre_debugfs_exit(struct hpre *hpre) +static void hpre_debugfs_exit(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; - debugfs_remove_recursive(qm->debug.debug_root); } @@ -732,6 +742,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; } @@ -849,7 +860,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_with_err_init; - ret = hpre_debugfs_init(hpre); + ret = hpre_debugfs_init(qm); if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); @@ -874,6 +885,7 @@ err_with_crypto_register: err_with_qm_start: hisi_qm_del_from_list(qm, &hpre_devices); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); err_with_err_init: @@ -905,7 +917,7 @@ static void hpre_remove(struct pci_dev *pdev) qm->debug.curr_qm_qp_num = 0; } - hpre_debugfs_exit(hpre); + hpre_debugfs_exit(qm); hisi_qm_stop(qm); hisi_qm_dev_err_uninit(qm); hisi_qm_uninit(qm); @@ -924,7 +936,8 @@ static struct pci_driver hpre_pci_driver = { .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, - .sriov_configure = hisi_qm_sriov_configure, + .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? + hisi_qm_sriov_configure : NULL, .err_handler = &hpre_err_handler, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9bb263cec6c3..6527c53b073f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1064,19 +1064,10 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, char buf[QM_DBG_READ_LEN]; int len; - if (*pos) - return 0; - - if (count < QM_DBG_READ_LEN) - return -ENOSPC; - - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", - "Please echo help to cmd to get help information"); + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); - if (copy_to_user(buffer, buf, len)) - return -EFAULT; - - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, @@ -1741,7 +1732,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) } EXPORT_SYMBOL_GPL(hisi_qm_release_qp); -static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -1813,7 +1804,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; int qp_id = qp->qp_id; - int pasid = arg; + u32 pasid = arg; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) @@ -2179,8 +2170,12 @@ static int qm_alloc_uacce(struct hisi_qm *qm) .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, }; + int ret; - strncpy(interface.name, pdev->driver->name, sizeof(interface.name)); + ret = strscpy(interface.name, pdev->driver->name, + sizeof(interface.name)); + if (ret < 0) + return -ENAMETOOLONG; uacce = uacce_alloc(&pdev->dev, &interface); if (IS_ERR(uacce)) @@ -2691,24 +2686,12 @@ static ssize_t qm_status_read(struct file *filp, char __user *buffer, { struct hisi_qm *qm = filp->private_data; char buf[QM_DBG_READ_LEN]; - int val, cp_len, len; - - if (*pos) - return 0; - - if (count < QM_DBG_READ_LEN) - return -ENOSPC; + int val, len; val = atomic_read(&qm->status.flags); - len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - if (!len) - return -EFAULT; - - cp_len = copy_to_user(buffer, buf, len); - if (cp_len) - return -EFAULT; + len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); - return (*pos = len); + return simple_read_from_buffer(buffer, count, pos, buf, len); } static const struct file_operations qm_status_fops = { diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 0a351de8d838..6c1d3c7d64ee 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -44,6 +44,7 @@ #define QM_AXI_M_CFG 0x1000ac #define AXI_M_CFG 0xffff #define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 #define AXI_M_CFG_ENABLE 0xffffffff #define QM_PEH_AXUSER_CFG 0x1000cc #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..8ca945ac297e 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; @@ -932,7 +934,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(aes)", .cra_driver_name = "hisi_sec_aes_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -951,7 +954,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(aes)", .cra_driver_name = "hisi_sec_aes_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -970,7 +974,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ctr(aes)", .cra_driver_name = "hisi_sec_aes_ctr", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -989,7 +994,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "xts(aes)", .cra_driver_name = "hisi_sec_aes_xts", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1009,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des)", .cra_driver_name = "hisi_sec_des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1028,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des)", .cra_driver_name = "hisi_sec_des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1047,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "hisi_sec_3des_cbc", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, @@ -1066,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "hisi_sec_3des_ecb", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 7b64aca704d6..037762b531e2 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -46,9 +46,11 @@ struct sec_req { struct sec_cipher_req c_req; struct sec_aead_req aead_req; + struct list_head backlog_head; int err_type; int req_id; + int flag; /* Status of the SEC request */ bool fake_busy; @@ -104,6 +106,7 @@ struct sec_qp_ctx { struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; struct mutex req_lock; + struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; atomic_t pending_reqs; @@ -161,6 +164,7 @@ struct sec_dfx { atomic64_t send_cnt; atomic64_t recv_cnt; atomic64_t send_busy_cnt; + atomic64_t recv_busy_cnt; atomic64_t err_bd_cnt; atomic64_t invalid_req_cnt; atomic64_t done_flag_cnt; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 64614a9bdf21..497969ae8b23 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); + atomic_inc(&qp->qp_status.used); return; } req->err_type = bd->type2.error_type; @@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int ret; + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && + !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { + list_add_tail(&req->backlog_head, &qp_ctx->backlog); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); + mutex_unlock(&qp_ctx->req_lock); + return -EBUSY; + } mutex_unlock(&qp_ctx->req_lock); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (unlikely(ret == -EBUSY)) return -ENOBUFS; - if (!ret) { - if (req->fake_busy) { - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - ret = -EBUSY; - } else { - ret = -EINPROGRESS; - } + if (likely(!ret)) { + ret = -EINPROGRESS; + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); } return ret; @@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp_ctx->ctx = ctx; mutex_init(&qp_ctx->req_lock); - atomic_set(&qp_ctx->pending_reqs, 0); idr_init(&qp_ctx->req_idr); + INIT_LIST_HEAD(&qp_ctx->backlog); qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, SEC_SGL_SGE_NR); @@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); } +static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct sec_req *backlog_req = NULL; + + mutex_lock(&qp_ctx->req_lock); + if (ctx->fake_req_limit >= + atomic_read(&qp_ctx->qp->qp_status.used) && + !list_empty(&qp_ctx->backlog)) { + backlog_req = list_first_entry(&qp_ctx->backlog, + typeof(*backlog_req), backlog_head); + list_del(&backlog_req->backlog_head); + } + mutex_unlock(&qp_ctx->req_lock); + + return backlog_req; +} + static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct skcipher_request *backlog_sk_req; + struct sec_req *backlog_req; - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); /* IV output at encrypto of CBC mode */ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - if (req->fake_busy) - sk_req->base.complete(&sk_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(ctx, qp_ctx); + if (!backlog_req) + break; + + backlog_sk_req = backlog_req->c_req.sk_req; + backlog_sk_req->base.complete(&backlog_sk_req->base, + -EINPROGRESS); + atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); + } + sk_req->base.complete(&sk_req->base, err); } @@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct aead_request *backlog_aead_req; + struct sec_req *backlog_req; size_t sz; - atomic_dec(&qp_ctx->pending_reqs); - if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) sec_update_iv(req, SEC_AEAD); @@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) sec_free_req_id(req); - if (req->fake_busy) - a_req->base.complete(&a_req->base, -EINPROGRESS); + while (1) { + backlog_req = sec_back_req_clear(c, qp_ctx); + if (!backlog_req) + break; + + backlog_aead_req = backlog_req->aead_req.aead_req; + backlog_aead_req->base.complete(&backlog_aead_req->base, + -EINPROGRESS); + atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); + } a_req->base.complete(&a_req->base, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - - atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); sec_free_queue_id(ctx, req); } @@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) return req->req_id; } - if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = true; - else - req->fake_busy = false; - return 0; } @@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || + (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); goto err_send_req; } @@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (!sk_req->cryptlen) return 0; + req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; @@ -1435,7 +1475,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; + req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; @@ -1558,7 +1599,7 @@ static int sec_aead_decrypt(struct aead_request *a_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index a4cb58b54b25..2297425486cb 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -22,17 +22,15 @@ #define SEC_PF_PCI_DEVICE_ID 0xa255 #define SEC_VF_PCI_DEVICE_ID 0xa256 -#define SEC_XTS_MIV_ENABLE_REG 0x301384 -#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF -#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF -#define SEC_BD_ERR_CHK_EN1 0xfffff7fd -#define SEC_BD_ERR_CHK_EN2 0xffffbfff +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd +#define SEC_BD_ERR_CHK_EN3 0xffffbfff #define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) -#define SEC_PF_DEF_Q_NUM 64 +#define SEC_PF_DEF_Q_NUM 256 #define SEC_PF_DEF_Q_BASE 0 -#define SEC_CTX_Q_NUM_DEF 24 +#define SEC_CTX_Q_NUM_DEF 2 #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 @@ -47,17 +45,18 @@ #define SEC_ECC_ADDR(err) ((err) >> 0) #define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff +#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_SAA_ENABLE 0x17f -#define SEC_RAS_CE_REG 0x50 -#define SEC_RAS_FE_REG 0x54 -#define SEC_RAS_NFE_REG 0x58 +#define SEC_RAS_CE_REG 0x301050 +#define SEC_RAS_FE_REG 0x301054 +#define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_RAS_NFE_ENB_MSK 0x177 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x0100 #define SEC_MEM_INIT_DONE_REG 0x0104 -#define SEC_QM_ABNORMAL_INT_MASK 0x100004 #define SEC_CONTROL_REG 0x0200 #define SEC_TRNG_EN_SHIFT 8 @@ -68,8 +67,10 @@ #define SEC_INTERFACE_USER_CTRL0_REG 0x0220 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 +#define SEC_SAA_EN_REG 0x0270 +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 #define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG2 0x038c +#define SEC_BD_ERR_CHK_EN_REG3 0x038c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -77,8 +78,8 @@ #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 -#define SEC_VF_CNT_MASK 0xffffffc0 #define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SINGLE_PORT_MAX_TRANS 0x2060 #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 @@ -122,6 +123,7 @@ static struct sec_dfx_item sec_dfx_labels[] = { {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, @@ -191,7 +193,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = { }; static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); -MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); +MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, @@ -280,7 +282,7 @@ static int sec_engine_init(struct hisi_qm *qm) reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { - dev_err(&qm->pdev->dev, "fail to init sec mem\n"); + pci_err(qm->pdev, "fail to init sec mem\n"); return ret; } @@ -296,25 +298,25 @@ static int sec_engine_init(struct hisi_qm *qm) reg |= SEC_USER1_SMMU_NORMAL; writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel(SEC_SINGLE_PORT_MAX_TRANS, + qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); + + writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + + /* Enable sm4 extra mode, as ctr/ecb */ + writel_relaxed(SEC_BD_ERR_CHK_EN0, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); - writel_relaxed(SEC_BD_ERR_CHK_EN2, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2)); - - /* enable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); - reg |= SEC_CLK_GATE_ENABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); /* config endian */ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); reg |= sec_get_endian(qm); writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_XTS_MIV_ENABLE_MSK, - qm->io_base + SEC_XTS_MIV_ENABLE_REG); - return 0; } @@ -346,10 +348,17 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm) /* sec_debug_regs_clear() - clear the sec debug regs */ static void sec_debug_regs_clear(struct hisi_qm *qm) { + int i; + /* clear current_qm */ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + /* clear sec dfx regs */ + writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + readl(qm->io_base + sec_dfx_regs[i].offset); + /* clear rdclr_en */ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); @@ -362,14 +371,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); - dev_info(&qm->pdev->dev, "V1 not support hw error handle\n"); + pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE); + writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); /* enable SEC hw error interrupts */ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -382,14 +391,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(qm->io_base + SEC_CONTROL_REG); + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -402,7 +411,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, qm->io_base + SEC_CONTROL_REG); + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); } static u32 sec_current_qm_read(struct sec_debug_file *file) @@ -577,20 +586,20 @@ static int sec_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, sec_debugfs_atomic64_set, "%lld\n"); -static int sec_core_debug_init(struct sec_dev *sec) +static int sec_core_debug_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; int i; - tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); + tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) - return -ENOENT; + return -ENOMEM; regset->regs = sec_dfx_regs; regset->nregs = ARRAY_SIZE(sec_dfx_regs); @@ -609,44 +618,44 @@ static int sec_core_debug_init(struct sec_dev *sec) return 0; } -static int sec_debug_init(struct sec_dev *sec) +static int sec_debug_init(struct hisi_qm *qm) { + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; - for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { - spin_lock_init(&sec->debug.files[i].lock); - sec->debug.files[i].index = i; - sec->debug.files[i].qm = &sec->qm; - - debugfs_create_file(sec_dbg_file_name[i], 0600, - sec->qm.debug.debug_root, - sec->debug.files + i, - &sec_dbg_fops); + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + spin_lock_init(&sec->debug.files[i].lock); + sec->debug.files[i].index = i; + sec->debug.files[i].qm = qm; + + debugfs_create_file(sec_dbg_file_name[i], 0600, + qm->debug.debug_root, + sec->debug.files + i, + &sec_dbg_fops); + } } - return sec_core_debug_init(sec); + return sec_core_debug_init(qm); } -static int sec_debugfs_init(struct sec_dev *sec) +static int sec_debugfs_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; struct device *dev = &qm->pdev->dev; int ret; qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); - qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { - ret = sec_debug_init(sec); - if (ret) - goto failed_to_create; - } + ret = sec_debug_init(qm); + if (ret) + goto failed_to_create; + return 0; @@ -656,9 +665,9 @@ failed_to_create: return ret; } -static void sec_debugfs_exit(struct sec_dev *sec) +static void sec_debugfs_exit(struct hisi_qm *qm) { - debugfs_remove_recursive(sec->qm.debug.debug_root); + debugfs_remove_recursive(qm->debug.debug_root); } static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) @@ -677,8 +686,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", SEC_ECC_NUM(err_val)); - dev_err(dev, "multi ecc sram addr=0x%x\n", - SEC_ECC_ADDR(err_val)); } } errs++; @@ -868,7 +875,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_uninit; } - ret = sec_debugfs_init(sec); + ret = sec_debugfs_init(qm); if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); @@ -893,7 +900,7 @@ err_crypto_unregister: err_remove_from_list: hisi_qm_del_from_list(qm, &sec_devices); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); hisi_qm_stop(qm); err_probe_uninit: @@ -917,7 +924,7 @@ static void sec_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev); - sec_debugfs_exit(sec); + sec_debugfs_exit(qm); (void)hisi_qm_stop(qm); @@ -987,5 +994,6 @@ module_exit(sec_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); +MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index f3ed4c0e5493..4484be13812b 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -76,7 +76,7 @@ struct hisi_zip_sqe { u32 rsvd1[4]; }; -int zip_create_qps(struct hisi_qp **qps, int ctx_num); +int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); int hisi_zip_register_to_crypto(void); void hisi_zip_unregister_from_crypto(void); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index c73707c2e539..01fd6a78111d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -158,13 +158,13 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) hisi_qm_release_qp(ctx->qp); } -static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; struct hisi_zip *hisi_zip; int ret, i, j; - ret = zip_create_qps(qps, HZIP_CTX_Q_NUM); + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); if (ret) { pr_err("Can not create zip qps!\n"); return -ENODEV; @@ -379,7 +379,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm) struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); int ret; - ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); + ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node); if (ret) return ret; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 2229a21ae7c8..e2845b2c963d 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -234,9 +234,10 @@ static const struct pci_device_id hisi_zip_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); -int zip_create_qps(struct hisi_qp **qps, int qp_num) +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) { - int node = cpu_to_node(smp_processor_id()); + if (node == NUMA_NO_NODE) + node = cpu_to_node(smp_processor_id()); return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 0e25fc3087f3..87226b7c2795 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -330,7 +330,7 @@ static int img_hash_write_via_dma(struct img_hash_dev *hdev) static int img_hash_dma_init(struct img_hash_dev *hdev) { struct dma_slave_config dma_conf; - int err = -EINVAL; + int err; hdev->dma_lch = dma_request_chan(hdev->dev, "tx"); if (IS_ERR(hdev->dma_lch)) { diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 2cb53fbae841..fa7398e68858 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) static int safexcel_request_ring_irq(void *pdev, int irqid, int is_pci_dev, + int ring_id, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq; + int ret, irq, cpu; struct device *dev; if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { @@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid, return ret; } + /* Set affinity */ + cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + return irq; } @@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev, irq = safexcel_request_ring_irq(pdev, EIP197_IRQ_NUMBER(i, is_pci_dev), is_pci_dev, + i, safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); @@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev, return irq; } + priv->ring[i].irq = irq; priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; INIT_WORK(&priv->ring[i].work_data.work, @@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev) clk_disable_unprepare(priv->reg_clk); clk_disable_unprepare(priv->clk); - for (i = 0; i < priv->config.rings; i++) + for (i = 0; i < priv->config.rings; i++) { + irq_set_affinity_hint(priv->ring[i].irq, NULL); destroy_workqueue(priv->ring[i].workqueue); + } return 0; } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 94016c505abb..7c5fe382d272 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -707,6 +707,9 @@ struct safexcel_ring { */ struct crypto_async_request *req; struct crypto_async_request *backlog; + + /* irq of this ring */ + int irq; }; /* EIP integration context flags */ diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 0c5e80c3f6e3..1ac3253b7903 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1300,6 +1300,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .cra_driver_name = "safexcel-ecb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1337,6 +1338,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .cra_driver_name = "safexcel-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1374,6 +1376,7 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = { .cra_driver_name = "safexcel-cfb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1411,6 +1414,7 @@ struct safexcel_alg_template safexcel_alg_ofb_aes = { .cra_driver_name = "safexcel-ofb-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1485,6 +1489,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = { .cra_driver_name = "safexcel-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1545,6 +1550,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = { .cra_driver_name = "safexcel-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1582,6 +1588,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = { .cra_driver_name = "safexcel-ecb-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1642,6 +1649,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { .cra_driver_name = "safexcel-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1679,6 +1687,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { .cra_driver_name = "safexcel-ecb-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1751,6 +1760,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1786,6 +1796,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1821,6 +1832,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1856,6 +1868,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1891,6 +1904,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1927,6 +1941,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1963,6 +1978,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1999,6 +2015,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2035,6 +2052,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2071,6 +2089,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2107,6 +2126,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2143,6 +2163,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2179,6 +2200,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2215,6 +2237,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2251,6 +2274,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = { .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2285,6 +2309,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2319,6 +2344,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2353,6 +2379,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2387,6 +2414,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2421,6 +2449,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = { .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2534,6 +2563,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = { .cra_driver_name = "safexcel-xts-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = XTS_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2646,6 +2676,7 @@ struct safexcel_alg_template safexcel_alg_gcm = { .cra_driver_name = "safexcel-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2769,6 +2800,7 @@ struct safexcel_alg_template safexcel_alg_ccm = { .cra_driver_name = "safexcel-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2832,6 +2864,7 @@ struct safexcel_alg_template safexcel_alg_chacha20 = { .cra_driver_name = "safexcel-chacha20", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -2993,6 +3026,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3032,6 +3066,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly_esp = { /* +1 to put it above HW chacha + SW poly */ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, @@ -3110,6 +3145,7 @@ struct safexcel_alg_template safexcel_alg_ecb_sm4 = { .cra_driver_name = "safexcel-ecb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3147,6 +3183,7 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = { .cra_driver_name = "safexcel-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3184,6 +3221,7 @@ struct safexcel_alg_template safexcel_alg_ofb_sm4 = { .cra_driver_name = "safexcel-ofb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3221,6 +3259,7 @@ struct safexcel_alg_template safexcel_alg_cfb_sm4 = { .cra_driver_name = "safexcel-cfb-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3273,6 +3312,7 @@ struct safexcel_alg_template safexcel_alg_ctr_sm4 = { .cra_driver_name = "safexcel-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3332,6 +3372,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3441,6 +3482,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SM4_BLOCK_SIZE, @@ -3476,6 +3518,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3510,6 +3553,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = { .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3578,6 +3622,7 @@ struct safexcel_alg_template safexcel_alg_rfc4106_gcm = { .cra_driver_name = "safexcel-rfc4106-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3622,6 +3667,7 @@ struct safexcel_alg_template safexcel_alg_rfc4543_gcm = { .cra_driver_name = "safexcel-rfc4543-gcm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -3713,6 +3759,7 @@ struct safexcel_alg_template safexcel_alg_rfc4309_ccm = { .cra_driver_name = "safexcel-rfc4309-ccm-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 43962bc709c6..16a467969d8e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -992,6 +992,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = { .cra_driver_name = "safexcel-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1235,6 +1236,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .cra_driver_name = "safexcel-hmac-sha1", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1291,6 +1293,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = { .cra_driver_name = "safexcel-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1347,6 +1350,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = { .cra_driver_name = "safexcel-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1418,6 +1422,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .cra_driver_name = "safexcel-hmac-sha224", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1489,6 +1494,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .cra_driver_name = "safexcel-hmac-sha256", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1545,6 +1551,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = { .cra_driver_name = "safexcel-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1601,6 +1608,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = { .cra_driver_name = "safexcel-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1672,6 +1680,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = { .cra_driver_name = "safexcel-hmac-sha512", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1743,6 +1752,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = { .cra_driver_name = "safexcel-hmac-sha384", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1799,6 +1809,7 @@ struct safexcel_alg_template safexcel_alg_md5 = { .cra_driver_name = "safexcel-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1871,6 +1882,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = { .cra_driver_name = "safexcel-hmac-md5", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -1952,6 +1964,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = { .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2041,6 +2054,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = { .cra_driver_name = "safexcel-cbcmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2136,6 +2150,7 @@ struct safexcel_alg_template safexcel_alg_xcbcmac = { .cra_driver_name = "safexcel-xcbc-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2232,6 +2247,7 @@ struct safexcel_alg_template safexcel_alg_cmac = { .cra_driver_name = "safexcel-cmac-aes", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2288,6 +2304,7 @@ struct safexcel_alg_template safexcel_alg_sm3 = { .cra_driver_name = "safexcel-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), @@ -2359,6 +2376,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sm3 = { .cra_driver_name = "safexcel-hmac-sm3", .cra_priority = SAFEXCEL_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index ad73fc946682..f478bb0a566a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1402,7 +1402,8 @@ static int __init ixp_module_init(void) /* block ciphers */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; if (!cra->setkey) cra->setkey = ablk_setkey; if (!cra->encrypt) @@ -1435,7 +1436,8 @@ static int __init ixp_module_init(void) /* authenc */ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; cra->setkey = cra->setkey ?: aead_setkey; cra->setauthsize = aead_setauthsize; cra->encrypt = aead_encrypt; diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 8a5f0b0bdf77..d63bca9718dc 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -438,7 +438,7 @@ static int mv_cesa_probe(struct platform_device *pdev) struct mv_cesa_dev *cesa; struct mv_cesa_engine *engines; struct resource *res; - int irq, ret, i; + int irq, ret, i, cpu; u32 sram_size; if (cesa_dev) { @@ -505,6 +505,8 @@ static int mv_cesa_probe(struct platform_device *pdev) goto err_cleanup; } + engine->irq = irq; + /* * Not all platforms can gate the CESA clocks: do not complain * if the clock does not exist. @@ -548,6 +550,10 @@ static int mv_cesa_probe(struct platform_device *pdev) if (ret) goto err_cleanup; + /* Set affinity */ + cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); atomic_set(&engine->load, 0); INIT_LIST_HEAD(&engine->complete_queue); @@ -570,6 +576,8 @@ err_cleanup: clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + if (cesa->engines[i].irq > 0) + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return ret; @@ -586,6 +594,7 @@ static int mv_cesa_remove(struct platform_device *pdev) clk_disable_unprepare(cesa->engines[i].zclk); clk_disable_unprepare(cesa->engines[i].clk); mv_cesa_put_sram(pdev, i); + irq_set_affinity_hint(cesa->engines[i].irq, NULL); } return 0; diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index e8632d5f343f..0c9cbb681e49 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -457,6 +457,7 @@ struct mv_cesa_engine { atomic_t load; struct mv_cesa_tdma_chain chain; struct list_head complete_queue; + int irq; }; /** diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index f133c2ccb5ae..45b4d7a29833 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -508,7 +508,8 @@ struct skcipher_alg mv_cesa_ecb_des_alg = { .cra_name = "ecb(des)", .cra_driver_name = "mv-ecb-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -558,7 +559,8 @@ struct skcipher_alg mv_cesa_cbc_des_alg = { .cra_name = "cbc(des)", .cra_driver_name = "mv-cbc-des", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), .cra_alignmask = 0, @@ -616,7 +618,8 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "mv-ecb-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -669,7 +672,8 @@ struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "mv-cbc-des3-ede", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), .cra_alignmask = 0, @@ -741,7 +745,8 @@ struct skcipher_alg mv_cesa_ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "mv-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, @@ -790,7 +795,8 @@ struct skcipher_alg mv_cesa_cbc_aes_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "mv-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), .cra_alignmask = 0, diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index b971284332b6..bd0bd9ffd6e9 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -921,6 +921,7 @@ struct ahash_alg mv_md5_alg = { .cra_driver_name = "mv-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -991,6 +992,7 @@ struct ahash_alg mv_sha1_alg = { .cra_driver_name = "mv-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1064,6 +1066,7 @@ struct ahash_alg mv_sha256_alg = { .cra_driver_name = "mv-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), @@ -1298,6 +1301,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .cra_driver_name = "mv-hmac-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1368,6 +1372,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .cra_driver_name = "mv-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), @@ -1438,6 +1443,7 @@ struct ahash_alg mv_ahmac_sha256_alg = { .cra_driver_name = "mv-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index fec8f3b9b112..cc103b1bc224 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -878,11 +878,11 @@ static int copy_ucode_to_dma_mem(struct device *dev, /* Byte swap 64-bit */ for (i = 0; i < (ucode->size / 8); i++) - ((u64 *)ucode->align_va)[i] = + ((__be64 *)ucode->align_va)[i] = cpu_to_be64(((u64 *)ucode->align_va)[i]); /* Ucode needs 16-bit swap */ for (i = 0; i < (ucode->size / 2); i++) - ((u16 *)ucode->align_va)[i] = + ((__be16 *)ucode->align_va)[i] = cpu_to_be16(((u16 *)ucode->align_va)[i]); return 0; } @@ -1463,8 +1463,8 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps, int pf_type) { - struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 }; - struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} }; + struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; + struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; struct tar_arch_info_t *tar_arch = NULL; char *tar_filename; int i, ret = 0; diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h index 14f02b60d0c2..8620ac87a447 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h @@ -74,7 +74,7 @@ struct otx_cpt_ucode_ver_num { struct otx_cpt_ucode_hdr { struct otx_cpt_ucode_ver_num ver_num; u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ]; - u32 code_length; + __be32 code_length; u32 padding[3]; }; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 1e0a1d70ebd3..90bb31329d4b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -239,7 +239,6 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, struct otx_cpt_fc_ctx *fctx = &rctx->fctx; int ivsize = crypto_skcipher_ivsize(stfm); u32 start = req->cryptlen - ivsize; - u64 *ctrl_flags = NULL; gfp_t flags; flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -280,8 +279,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); - ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; - *ctrl_flags = cpu_to_be64(*ctrl_flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset @@ -692,20 +690,17 @@ static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) static inline void swap_data32(void *buf, u32 len) { - u32 *store = (u32 *) buf; - int i = 0; - - for (i = 0 ; i < len/sizeof(u32); i++, store++) - *store = cpu_to_be32(*store); + cpu_to_be32_array(buf, buf, len / 4); } static inline void swap_data64(void *buf, u32 len) { - u64 *store = (u64 *) buf; + __be64 *dst = buf; + u64 *src = buf; int i = 0; - for (i = 0 ; i < len/sizeof(u64); i++, store++) - *store = cpu_to_be64(*store); + for (i = 0 ; i < len / 8; i++, src++, dst++) + *dst = cpu_to_be64p(src); } static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) @@ -1012,7 +1007,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, /* Unknown cipher type */ return -EINVAL; } - rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags); + rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags); req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; @@ -1032,7 +1027,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; fctx->enc.enc_ctrl.e.mac_len = mac_len; - fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); + fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); /* * Storing Packet Data Information in offset @@ -1306,7 +1301,7 @@ static int otx_cpt_aead_null_decrypt(struct aead_request *req) static struct skcipher_alg otx_cpt_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "cpt_xts_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1323,7 +1318,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cpt_cbc_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1340,7 +1335,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "cpt_ecb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1357,7 +1352,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cfb(aes)", .base.cra_driver_name = "cpt_cfb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), .base.cra_alignmask = 7, @@ -1374,7 +1369,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cpt_cbc_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1391,7 +1386,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { }, { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "cpt_ecb_des3_ede", - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), .base.cra_alignmask = 7, @@ -1412,7 +1407,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "cpt_hmac_sha1_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1431,7 +1426,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "cpt_hmac_sha256_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1450,7 +1445,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "cpt_hmac_sha384_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1469,7 +1464,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "cpt_hmac_sha512_cbc_aes", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1488,7 +1483,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha1_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1507,7 +1502,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha256_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1526,7 +1521,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha384_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1545,7 +1540,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", .cra_driver_name = "cpt_hmac_sha512_ecb_null", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, @@ -1564,7 +1559,7 @@ static struct aead_alg otx_cpt_aeads[] = { { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "cpt_rfc4106_gcm_aes", .cra_blocksize = 1, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), .cra_priority = 4001, .cra_alignmask = 0, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h index 67cc0025f5d5..4181b5c5c356 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h @@ -66,7 +66,8 @@ enum otx_cpt_aes_key_len { }; union otx_cpt_encr_ctrl { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 enc_cipher:4; @@ -138,7 +139,8 @@ struct otx_cpt_des3_ctx { }; union otx_cpt_offset_ctrl_word { - u64 flags; + __be64 flags; + u64 cflags; struct { #if defined(__BIG_ENDIAN_BITFIELD) u64 reserved:32; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index 239195cccf93..cbc3d7869ebe 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -202,11 +202,10 @@ static inline int setup_sgio_list(struct pci_dev *pdev, info->dlen = dlen; info->in_buffer = (u8 *)info + info_len; - ((u16 *)info->in_buffer)[0] = req->outcnt; - ((u16 *)info->in_buffer)[1] = req->incnt; + ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); + ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); ((u16 *)info->in_buffer)[2] = 0; ((u16 *)info->in_buffer)[3] = 0; - *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); /* Setup gather (input) components */ if (setup_sgio_components(pdev, req->in, req->incnt, @@ -367,8 +366,6 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); - /* 64-bit swap for microcode data reads, not needed for addresses*/ - iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64); iq_cmd.dptr = info->dptr_baddr; iq_cmd.rptr = info->rptr_baddr; iq_cmd.cptr.u64 = 0; @@ -436,7 +433,7 @@ static int cpt_process_ccode(struct pci_dev *pdev, u8 ccode = cpt_status->s.compcode; union otx_cpt_error_code ecode; - ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer)); + ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer); switch (ccode) { case CPT_COMP_E_FAULT: dev_err(&pdev->dev, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h index a4c9ff730b13..d912fe0c532d 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h @@ -92,10 +92,10 @@ union otx_cpt_ctrl_info { union otx_cpt_iq_cmd_word0 { u64 u64; struct { - u16 opcode; - u16 param1; - u16 param2; - u16 dlen; + __be16 opcode; + __be16 param1; + __be16 param2; + __be16 dlen; } s; }; @@ -123,16 +123,16 @@ struct otx_cpt_sglist_component { union { u64 len; struct { - u16 len0; - u16 len1; - u16 len2; - u16 len3; + __be16 len0; + __be16 len1; + __be16 len2; + __be16 len3; } s; } u; - u64 ptr0; - u64 ptr1; - u64 ptr2; - u64 ptr3; + __be64 ptr0; + __be64 ptr1; + __be64 ptr2; + __be64 ptr3; }; struct otx_cpt_pending_entry { diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 78d660d963e2..4ad3571ab6af 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -137,8 +137,6 @@ struct mtk_aes_gcm_ctx { u32 authsize; size_t textlen; - - struct crypto_skcipher *ctr; }; struct mtk_aes_drv { @@ -996,17 +994,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, u32 keylen) { struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - struct crypto_skcipher *ctr = gctx->ctr; - struct { - u32 hash[4]; - u8 iv[8]; - - struct crypto_wait wait; - - struct scatterlist sg[1]; - struct skcipher_request req; - } *data; + u8 hash[AES_BLOCK_SIZE] __aligned(4) = {}; + struct crypto_aes_ctx aes_ctx; int err; switch (keylen) { @@ -1026,39 +1015,18 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, ctx->keylen = SIZE_IN_WORDS(keylen); - /* Same as crypto_gcm_setkey() from crypto/gcm.c */ - crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(ctr, key, keylen); + err = aes_expandkey(&aes_ctx, key, keylen); if (err) return err; - data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - crypto_init_wait(&data->wait); - sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); - skcipher_request_set_tfm(&data->req, ctr); - skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &data->wait); - skcipher_request_set_crypt(&data->req, data->sg, data->sg, - AES_BLOCK_SIZE, data->iv); - - err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), - &data->wait); - if (err) - goto out; + aes_encrypt(&aes_ctx, hash, hash); + memzero_explicit(&aes_ctx, sizeof(aes_ctx)); mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); - mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, + mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash, AES_BLOCK_SIZE); -out: - kzfree(data); - return err; + + return 0; } static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, @@ -1095,32 +1063,17 @@ static int mtk_aes_gcm_init(struct crypto_aead *aead) { struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->ctr)) { - pr_err("Error allocating ctr(aes)\n"); - return PTR_ERR(ctx->ctr); - } - crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); ctx->base.start = mtk_aes_gcm_start; return 0; } -static void mtk_aes_gcm_exit(struct crypto_aead *aead) -{ - struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - - crypto_free_skcipher(ctx->ctr); -} - static struct aead_alg aes_gcm_alg = { .setkey = mtk_aes_gcm_setkey, .setauthsize = mtk_aes_gcm_setauthsize, .encrypt = mtk_aes_gcm_encrypt, .decrypt = mtk_aes_gcm_decrypt, .init = mtk_aes_gcm_init, - .exit = mtk_aes_gcm_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d84530293036..909a7eb748e3 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -97,7 +97,7 @@ struct dcp_async_ctx { unsigned int hot:1; /* Crypto-specific context */ - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; }; @@ -105,6 +105,7 @@ struct dcp_async_ctx { struct dcp_aes_req_ctx { unsigned int enc:1; unsigned int ecb:1; + struct skcipher_request fallback_req; // keep at the end }; struct dcp_sha_req_ctx { @@ -426,21 +427,20 @@ static int dcp_chan_thread_aes(void *data) static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); int ret; - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); if (enc) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -510,24 +510,25 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, * but is supported by in-kernel software implementation, we use * software fallback. */ - crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(actx->fallback, + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(actx->fallback, key, len); + return crypto_skcipher_setkey(actx->fallback, key, len); } static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); actx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + + crypto_skcipher_reqsize(blk)); return 0; } @@ -535,7 +536,7 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) { struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(actx->fallback); + crypto_free_skcipher(actx->fallback); } /* diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 6a828bbecea4..d8aec5153b21 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1382,7 +1382,8 @@ static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl) snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); alg->base.cra_priority = N2_CRA_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY; alg->base.cra_blocksize = tmpl->block_size; p->enc_type = tmpl->enc_type; alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index b5aff20c5900..4fd14d90cc40 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -139,7 +139,7 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd) for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(dd, i), - __le32_to_cpu(dd->ctx->key[i])); + (__force u32)cpu_to_le32(dd->ctx->key[i])); } if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) @@ -363,7 +363,7 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -409,7 +409,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); omap_aes_dma_stop(dd); @@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) !!(mode & FLAGS_CBC)); if (req->cryptlen < aes_fallback_sz) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); if (mode & FLAGS_ENCRYPT) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } dd = omap_aes_find_dev(rctx); @@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) return 0; @@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(&tfm->base); struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); ctx->fallback = blk; - crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + + crypto_skcipher_reqsize(blk)); ctx->enginectx.op.prepare_request = omap_aes_prepare_req; ctx->enginectx.op.unprepare_request = NULL; @@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->fallback) - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 2d111bf906e1..23d073e87bb8 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -97,7 +97,7 @@ struct omap_aes_ctx { int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u8 nonce[4]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct omap_aes_gcm_ctx { @@ -110,6 +110,7 @@ struct omap_aes_reqctx { unsigned long mode; u8 iv[AES_BLOCK_SIZE]; u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)]; + struct skcipher_request fallback_req; // keep at the end }; #define OMAP_AES_QUEUE_LENGTH 1 diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 8eda43319204..c9d38bcfd1c7 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -87,7 +87,7 @@ struct omap_des_ctx { struct omap_des_dev *dd; int keylen; - u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; + __le32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; unsigned long flags; }; @@ -461,7 +461,7 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd) crypto_skcipher_reqtfm(dd->req)); int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -504,7 +504,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zd\n", dd->total); omap_des_dma_stop(dd); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 82691a057d2a..954d703f2981 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -357,10 +357,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) if (big_endian) for (i = 0; i < d; i++) - hash[i] = be32_to_cpu(in[i]); + hash[i] = be32_to_cpup((__be32 *)in + i); else for (i = 0; i < d; i++) - hash[i] = le32_to_cpu(in[i]); + hash[i] = le32_to_cpup((__le32 *)in + i); } static int omap_sham_hw_init(struct omap_sham_dev *dd) @@ -522,7 +522,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, int mlen; struct sg_mapping_iter mi; - dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); dd->pdata->write_ctrl(dd, length, final, 0); @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, struct dma_slave_config cfg; int ret; - dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", + dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n", ctx->digcnt, length, final); if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { @@ -871,7 +871,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) nbytes += req->nbytes - rctx->offset; dev_dbg(rctx->dd->dev, - "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n", __func__, nbytes, bs, rctx->total, rctx->offset, rctx->bufcnt); @@ -932,7 +932,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } -struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) { struct omap_sham_dev *dd; @@ -1023,7 +1023,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) bool final = (ctx->flags & BIT(FLAGS_FINUP)) && !(dd->flags & BIT(FLAGS_HUGE)); - dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d", + dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d", ctx->total, ctx->digcnt, final); if (ctx->total < get_block_size(ctx) || @@ -1036,7 +1036,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) err = omap_sham_xmit_dma(dd, ctx->total, final); /* wait for dma completion before can take more data */ - dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); + dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt); return err; } @@ -1097,7 +1097,7 @@ static int omap_sham_finish(struct ahash_request *req) err = omap_sham_finish_hmac(req); } - dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); + dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt); return err; } diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 7384e91c8b32..dac6eb37fff9 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -86,6 +86,7 @@ struct spacc_req { dma_addr_t src_addr, dst_addr; struct spacc_ddt *src_ddt, *dst_ddt; void (*complete)(struct spacc_req *req); + struct skcipher_request fallback_req; // keep at the end }; struct spacc_aead { @@ -158,7 +159,7 @@ struct spacc_ablk_ctx { * The fallback cipher. If the operation can't be done in hardware, * fallback to a software version. */ - struct crypto_sync_skcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; }; /* AEAD cipher context. */ @@ -792,13 +793,13 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, * Set the fallback transform to use the same request flags as * the hardware transform. */ - crypto_sync_skcipher_clear_flags(ctx->sw_cipher, + crypto_skcipher_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->sw_cipher, + crypto_skcipher_set_flags(ctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len); + err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); if (err) goto sw_setkey_failed; } @@ -900,7 +901,7 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, struct crypto_tfm *old_tfm = crypto_skcipher_tfm(crypto_skcipher_reqtfm(req)); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher); + struct spacc_req *dev_req = skcipher_request_ctx(req); int err; /* @@ -908,13 +909,13 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req, * the ciphering has completed, put the old transform back into the * request. */ - skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher); - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, + skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher); + skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst, req->cryptlen, req->iv); - err = is_encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) : + crypto_skcipher_decrypt(&dev_req->fallback_req); return err; } @@ -1007,19 +1008,24 @@ static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm) ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - ctx->sw_cipher = crypto_alloc_sync_skcipher( - alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); + ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ctx->sw_cipher); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) + + crypto_skcipher_reqsize(ctx->sw_cipher)); + } else { + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req, + fallback_req)); } + ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; - crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req)); - return 0; } @@ -1027,7 +1033,7 @@ static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->sw_cipher); + crypto_free_skcipher(ctx->sw_cipher); } static int spacc_ablk_encrypt(struct skcipher_request *req) @@ -1226,6 +1232,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1251,6 +1258,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1274,7 +1282,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1298,7 +1307,8 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), .base.cra_module = THIS_MODULE, @@ -1321,6 +1331,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "cbc-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1345,6 +1356,7 @@ static struct spacc_alg ipsec_engine_algs[] = { .base.cra_driver_name = "ecb-des3-ede-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), @@ -1376,6 +1388,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1406,6 +1419,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1436,6 +1450,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, @@ -1466,6 +1481,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1497,6 +1513,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1527,6 +1544,7 @@ static struct spacc_aead ipsec_engine_aeads[] = { "cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1556,6 +1574,7 @@ static struct spacc_alg l2_engine_algs[] = { .base.cra_driver_name = "f8-kasumi-picoxcell", .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = 8, .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx), diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 6bc68bc00d76..aee494d3da52 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_pf2vf_msg.h> diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index afc9a0a86747..8b5dd2c94ebf 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C3XXX_HW_DATA_H_ #define ADF_C3XXX_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index d937cc7248a5..020d099409e5 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index d2d0ae445fd8..d2fedbd7113c 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h index 934f216acf39..7945a9cd1c60 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C3XXXVF_HW_DATA_H_ #define ADF_C3XXXVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1dc5ac859f7b..11039fe55f61 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index 618cec360b39..844ad5ed33fc 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_pf2vf_msg.h> diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h index 17a8a32d5c63..88504d2bf30d 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_C62X_HW_DATA_H_ #define ADF_C62X_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 2bc06c89d2fe..4ba9c14383af 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 38e4bc04f407..29fd3f1091ab 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h index a28d83e77422..a6c04cf7a43c 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_C62XVF_HW_DATA_H_ #define ADF_C62XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index a68358b31292..b8b021d54bb5 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 33f0a6251e38..c1db8c26afb6 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ #include <linux/interrupt.h> @@ -103,8 +59,8 @@ struct adf_accel_pci { struct pci_dev *pci_dev; struct adf_accel_msix msix_entries; struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; - uint8_t revid; - uint8_t sku; + u8 revid; + u8 sku; } __packed; enum dev_state { @@ -144,7 +100,7 @@ static inline const char *get_sku_info(enum dev_sku_info info) struct adf_hw_device_class { const char *name; const enum adf_device_type type; - uint32_t instances; + u32 instances; } __packed; struct adf_cfg_device_data; @@ -154,15 +110,15 @@ struct adf_etr_ring_data; struct adf_hw_device_data { struct adf_hw_device_class *dev_class; - uint32_t (*get_accel_mask)(uint32_t fuse); - uint32_t (*get_ae_mask)(uint32_t fuse); - uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); - uint32_t (*get_num_aes)(struct adf_hw_device_data *self); - uint32_t (*get_num_accels)(struct adf_hw_device_data *self); - uint32_t (*get_pf2vf_offset)(uint32_t i); - uint32_t (*get_vintmsk_offset)(uint32_t i); + u32 (*get_accel_mask)(u32 fuse); + u32 (*get_ae_mask)(u32 fuse); + u32 (*get_sram_bar_id)(struct adf_hw_device_data *self); + u32 (*get_misc_bar_id)(struct adf_hw_device_data *self); + u32 (*get_etr_bar_id)(struct adf_hw_device_data *self); + u32 (*get_num_aes)(struct adf_hw_device_data *self); + u32 (*get_num_accels)(struct adf_hw_device_data *self); + u32 (*get_pf2vf_offset)(u32 i); + u32 (*get_vintmsk_offset)(u32 i); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); @@ -173,25 +129,25 @@ struct adf_hw_device_data { int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, - const uint32_t **cfg); + const u32 **cfg); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); const char *fw_name; const char *fw_mmp_name; - uint32_t fuses; - uint32_t accel_capabilities_mask; - uint32_t instance_id; - uint16_t accel_mask; - uint16_t ae_mask; - uint16_t tx_rings_mask; - uint8_t tx_rx_gap; - uint8_t num_banks; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t num_engines; - uint8_t min_iov_compat_ver; + u32 fuses; + u32 accel_capabilities_mask; + u32 instance_id; + u16 accel_mask; + u16 ae_mask; + u16 tx_rings_mask; + u8 tx_rx_gap; + u8 num_banks; + u8 num_accel; + u8 num_logical_accel; + u8 num_engines; + u8 min_iov_compat_ver; } __packed; /* CSR write macro */ @@ -248,8 +204,8 @@ struct adf_accel_dev { struct tasklet_struct pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion iov_msg_completion; - uint8_t compatible; - uint8_t pf_version; + u8 compatible; + u8 pf_version; } vf; }; bool is_vf; diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index a42fc42704be..c8ad85b882be 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/firmware.h> #include <linux/pci.h> #include "adf_cfg.h" @@ -118,7 +74,7 @@ int adf_ae_start(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; @@ -139,7 +95,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); if (!hw_data->fw_name) return 0; diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index d28cba34773e..1c8ca151a963 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -1,53 +1,9 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/delay.h> +#include <linux/iopoll.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" @@ -60,6 +16,9 @@ #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 +#define ADF_CONST_TABLE_SIZE 1024 +#define ADF_ADMIN_POLL_DELAY_US 20 +#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC) static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -154,11 +113,13 @@ struct adf_admin_comms { static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { + int ret; + u32 status; struct adf_admin_comms *admin = accel_dev->admin; int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; - int times, received; + struct icp_qat_fw_init_admin_req *request = in; mutex_lock(&admin->lock); @@ -169,46 +130,71 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); - received = 0; - for (times = 0; times < 50; times++) { - msleep(20); - if (ADF_CSR_RD(mailbox, mb_offset) == 0) { - received = 1; - break; - } - } - if (received) + + ret = readl_poll_timeout(mailbox + mb_offset, status, + status == 0, ADF_ADMIN_POLL_DELAY_US, + ADF_ADMIN_POLL_TIMEOUT_US); + if (ret < 0) { + /* Response timeout */ + dev_err(&GET_DEV(accel_dev), + "Failed to send admin msg %d to accelerator %d\n", + request->cmd_id, ae); + } else { + /* Response received from admin message, we can now + * make response data available in "out" parameter. + */ memcpy(out, admin->virt_addr + offset + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); - else - dev_err(&GET_DEV(accel_dev), - "Failed to send admin msg to accelerator\n"); + } mutex_unlock(&admin->lock); - return received ? 0 : -EFAULT; + return ret; +} + +static int adf_send_admin(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req, + struct icp_qat_fw_init_admin_resp *resp, + const unsigned long ae_mask) +{ + u32 ae; + + for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER) + if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) || + resp->status) + return -EFAULT; + + return 0; } -static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +static int adf_init_me(struct adf_accel_dev *accel_dev) { + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_INIT_ME; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + +static int adf_set_fw_constants(struct adf_accel_dev *accel_dev) +{ struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; - int i; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + u32 ae_mask = hw_device->ae_mask; - memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); - req.init_admin_cmd_id = cmd; + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG; - if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { - req.init_cfg_sz = 1024; - req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; - } - for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { - memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); - if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || - resp.init_resp_hdr.status) - return -EFAULT; - } - return 0; + req.init_cfg_sz = ADF_CONST_TABLE_SIZE; + req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); } /** @@ -221,11 +207,13 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { - int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); + int ret; + ret = adf_init_me(accel_dev); if (ret) return ret; - return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); + + return adf_set_fw_constants(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index f5e960d23a7a..32102e27e559 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/aer.h> @@ -86,7 +42,7 @@ void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; - uint16_t bridge_ctl = 0; + u16 bridge_ctl = 0; if (!parent) parent = pdev; diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index 5c7fdb0fc53d..ac462796cefc 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/list.h> diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h index 6a9c6f6b5ec9..376cde61a60e 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.h +++ b/drivers/crypto/qat/qat_common/adf_cfg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_H_ #define ADF_CFG_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 1211261de7c2..1ef46ccfba47 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_COMMON_H_ #define ADF_CFG_COMMON_H_ @@ -81,16 +37,16 @@ enum adf_device_type { struct adf_dev_status_info { enum adf_device_type type; - u32 accel_id; - u32 instance_id; - uint8_t num_ae; - uint8_t num_accel; - uint8_t num_logical_accel; - uint8_t banks_per_accel; - uint8_t state; - uint8_t bus; - uint8_t dev; - uint8_t fun; + __u32 accel_id; + __u32 instance_id; + __u8 num_ae; + __u8 num_accel; + __u8 num_logical_accel; + __u8 banks_per_accel; + __u8 state; + __u8 bus; + __u8 dev; + __u8 fun; char name[MAX_DEVICE_NAME_SIZE]; }; @@ -101,6 +57,6 @@ struct adf_dev_status_info { struct adf_user_cfg_ctl_data) #define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \ struct adf_user_cfg_ctl_data) -#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t) -#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t) +#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32) +#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32) #endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h index 7632ed0f25c5..314790f5b0af 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_STRINGS_H_ #define ADF_CFG_STRINGS_H_ diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index b5484bfa6996..421f4fb8b4dd 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_CFG_USER_H_ #define ADF_CFG_USER_H_ @@ -55,7 +11,7 @@ struct adf_user_cfg_key_val { char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *next; - uint64_t padding3; + __u64 padding3; }; enum adf_cfg_val_type type; } __packed; @@ -64,19 +20,19 @@ struct adf_user_cfg_section { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; union { struct adf_user_cfg_key_val *params; - uint64_t padding1; + __u64 padding1; }; union { struct adf_user_cfg_section *next; - uint64_t padding3; + __u64 padding3; }; } __packed; struct adf_user_cfg_ctl_data { union { struct adf_user_cfg_section *config_section; - uint64_t padding; + __u64 padding; }; - uint8_t device_id; + __u8 device_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index d78f8d5c89c3..ebfcb4ea618d 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DRV_H #define ADF_DRV_H @@ -123,11 +79,11 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id); struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev); -int adf_devmgr_verify_id(uint32_t id); -void adf_devmgr_get_num_dev(uint32_t *num); +int adf_devmgr_verify_id(u32 id); +void adf_devmgr_get_num_dev(u32 *num); int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev); int adf_dev_started(struct adf_accel_dev *accel_dev); int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev); @@ -198,7 +154,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask, unsigned int upc); void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword); + unsigned int words_num, u64 *uword); void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uword_addr, unsigned int words_num, unsigned int *data); @@ -233,9 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - uint32_t vf_mask); + u32 vf_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index ef0e482ee04f..71d0c44aacca 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -270,7 +226,7 @@ static int adf_ctl_is_device_in_use(int id) return 0; } -static void adf_ctl_stop_devices(uint32_t id) +static void adf_ctl_stop_devices(u32 id) { struct adf_accel_dev *accel_dev; @@ -374,7 +330,7 @@ out: static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, unsigned long arg) { - uint32_t num_devices = 0; + u32 num_devices = 0; adf_devmgr_get_num_dev(&num_devices); if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 2d06409bd3c4..72753af056b3 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include "adf_cfg.h" @@ -52,7 +8,7 @@ static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); -static uint32_t num_devices; +static u32 num_devices; static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { @@ -355,7 +311,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) } EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); -struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) +struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id) { struct list_head *itr; int real_id; @@ -380,7 +336,7 @@ unlock: return NULL; } -int adf_devmgr_verify_id(uint32_t id) +int adf_devmgr_verify_id(u32 id) { if (id == ADF_CFG_ALL_DEVICES) return 0; @@ -407,7 +363,7 @@ static int adf_get_num_dettached_vfs(void) return vfs; } -void adf_devmgr_get_num_dev(uint32_t *num) +void adf_devmgr_get_num_dev(u32 *num) { *num = num_devices - adf_get_num_dettached_vfs(); } diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index d7dd18d9bef8..d4162783f970 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport_internal.h" diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 26556c713049..42029153408e 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include <linux/bitops.h> diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index cd1cdf5305bc..36136f7db509 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index b3875fdf6cd7..519fd5acf713 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h index 5acd531a11ff..0690c031bfce 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_PF2VF_MSG_H #define ADF_PF2VF_MSG_H diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index b36d8653b1ba..8827aa139f96 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/device.h> diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 2136cbe4bf6c..2ad774017200 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/delay.h> #include "adf_accel_devices.h" #include "adf_transport_internal.h" @@ -51,22 +7,22 @@ #include "adf_cfg.h" #include "adf_common_drv.h" -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +static inline u32 adf_modulo(u32 data, u32 shift) { - uint32_t div = data >> shift; - uint32_t mult = div << shift; + u32 div = data >> shift; + u32 mult = div << shift; return data - mult; } -static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size) +static inline int adf_check_ring_alignment(u64 addr, u64 size) { if (((size - 1) & addr) != 0) return -EFAULT; return 0; } -static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) +static int adf_verify_ring_size(u32 msg_size, u32 msg_num) { int i = ADF_MIN_RING_SIZE; @@ -77,7 +33,7 @@ static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num) return ADF_DEFAULT_RING_SIZE; } -static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); if (bank->ring_mask & (1 << ring)) { @@ -89,14 +45,14 @@ static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) return 0; } -static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) { spin_lock(&bank->lock); bank->ring_mask &= ~(1 << ring); spin_unlock(&bank->lock); } -static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask |= (1 << ring); @@ -106,7 +62,7 @@ static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) bank->irq_coalesc_timer); } -static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) +static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) { spin_lock_bh(&bank->lock); bank->irq_mask &= ~(1 << ring); @@ -114,7 +70,7 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask); } -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { if (atomic_add_return(1, ring->inflights) > ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { @@ -136,18 +92,18 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) static int adf_handle_response(struct adf_etr_ring_data *ring) { - uint32_t msg_counter = 0; - uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + u32 msg_counter = 0; + u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { - ring->callback((uint32_t *)msg); + ring->callback((u32 *)msg); atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; - msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); + msg = (u32 *)((uintptr_t)ring->base_addr + ring->head); } if (msg_counter > 0) WRITE_CSR_RING_HEAD(ring->bank->csr_addr, @@ -158,7 +114,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size); + u32 ring_config = BUILD_RING_CONFIG(ring->ring_size); WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring_config); @@ -166,7 +122,7 @@ static void adf_configure_tx_ring(struct adf_etr_ring_data *ring) static void adf_configure_rx_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_config = + u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size, ADF_RING_NEAR_WATERMARK_512, ADF_RING_NEAR_WATERMARK_0); @@ -180,8 +136,8 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) struct adf_etr_bank_data *bank = ring->bank; struct adf_accel_dev *accel_dev = bank->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint64_t ring_base; - uint32_t ring_size_bytes = + u64 ring_base; + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -215,7 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) static void adf_cleanup_ring(struct adf_etr_ring_data *ring) { - uint32_t ring_size_bytes = + u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size); ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes); @@ -228,8 +184,8 @@ static void adf_cleanup_ring(struct adf_etr_ring_data *ring) } int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_msgs, - uint32_t msg_size, const char *ring_name, + u32 bank_num, u32 num_msgs, + u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr) { @@ -237,7 +193,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; - uint32_t ring_num; + u32 ring_num; int ret; if (bank_num >= GET_MAX_BANKS(accel_dev)) { @@ -330,7 +286,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring) static void adf_ring_response_handler(struct adf_etr_bank_data *bank) { - uint32_t empty_rings, i; + u32 empty_rings, i; empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number); empty_rings = ~empty_rings & bank->irq_mask; @@ -353,7 +309,7 @@ void adf_response_handler(uintptr_t bank_addr) static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, - uint32_t key, uint32_t *value) + u32 key, u32 *value) { char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; @@ -370,7 +326,7 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, const char *section, - uint32_t bank_num_in_accel) + u32 bank_num_in_accel) { if (adf_get_cfg_int(bank->accel_dev, section, ADF_ETRMGR_COALESCE_TIMER_FORMAT, @@ -384,12 +340,12 @@ static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank, static int adf_init_bank(struct adf_accel_dev *accel_dev, struct adf_etr_bank_data *bank, - uint32_t bank_num, void __iomem *csr_addr) + u32 bank_num, void __iomem *csr_addr) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_etr_ring_data *ring; struct adf_etr_ring_data *tx_ring; - uint32_t i, coalesc_enabled = 0; + u32 i, coalesc_enabled = 0; memset(bank, 0, sizeof(*bank)); bank->bank_number = bank_num; @@ -461,8 +417,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) struct adf_etr_data *etr_data; struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; - uint32_t size; - uint32_t num_banks = 0; + u32 size; + u32 num_banks = 0; int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, @@ -508,7 +464,7 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data); static void cleanup_bank(struct adf_etr_bank_data *bank) { - uint32_t i; + u32 i; for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { struct adf_accel_dev *accel_dev = bank->accel_dev; @@ -528,7 +484,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank) static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data = accel_dev->transport; - uint32_t i, num_banks = GET_MAX_BANKS(accel_dev); + u32 i, num_banks = GET_MAX_BANKS(accel_dev); for (i = 0; i < num_banks; i++) cleanup_bank(&etr_data->banks[i]); diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 386485bd9c95..2c95f1697c76 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_H #define ADF_TRANSPORT_H @@ -54,10 +10,10 @@ struct adf_etr_ring_data; typedef void (*adf_callback_fn)(void *resp_msg); int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size, + u32 bank_num, u32 num_mgs, u32 msg_size, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr); -int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg); +int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); void adf_remove_ring(struct adf_etr_ring_data *ring); #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 80e02a2a0a09..950d1988556c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_ACCESS_MACROS_H #define ADF_TRANSPORT_ACCESS_MACROS_H @@ -132,9 +88,9 @@ ADF_RING_CSR_RING_CONFIG + (ring << 2), value) #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ do { \ - uint32_t l_base = 0, u_base = 0; \ - l_base = (uint32_t)(value & 0xFFFFFFFF); \ - u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)(value & 0xFFFFFFFF); \ + u_base = (u32)((value & 0xFFFFFFFF00000000ULL) >> 32); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index e794e9d97b2c..2a2eccbf56ec 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/seq_file.h> diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index bb883368ac01..c7faf4e2d302 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_TRANSPORT_INTRN_H #define ADF_TRANSPORT_INTRN_H @@ -59,32 +15,31 @@ struct adf_etr_ring_debug_entry { struct adf_etr_ring_data { void *base_addr; atomic_t *inflights; - spinlock_t lock; /* protects ring data struct */ adf_callback_fn callback; struct adf_etr_bank_data *bank; dma_addr_t dma_addr; - uint16_t head; - uint16_t tail; - uint8_t ring_number; - uint8_t ring_size; - uint8_t msg_size; - uint8_t reserved; struct adf_etr_ring_debug_entry *ring_debug; -} __packed; + spinlock_t lock; /* protects ring data struct */ + u16 head; + u16 tail; + u8 ring_number; + u8 ring_size; + u8 msg_size; +}; struct adf_etr_bank_data { struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; struct tasklet_struct resp_handler; void __iomem *csr_addr; - struct adf_accel_dev *accel_dev; - uint32_t irq_coalesc_timer; - uint16_t ring_mask; - uint16_t irq_mask; + u32 irq_coalesc_timer; + u32 bank_number; + u16 ring_mask; + u16 irq_mask; spinlock_t lock; /* protects bank data struct */ + struct adf_accel_dev *accel_dev; struct dentry *bank_debug_dir; struct dentry *bank_debug_cfg; - uint32_t bank_number; -} __packed; +}; struct adf_etr_data { struct adf_etr_bank_data *banks; diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index cd5f37dffe8a..2c98fb63f7b7 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_pf2vf_msg.h" diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 4a73fc70f7a9..c4a44dc6af3e 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index 46747f01b1d1..6dc09d270082 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ #include <linux/types.h> @@ -89,41 +45,41 @@ enum icp_qat_fw_comn_request_id { struct icp_qat_fw_comn_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t serv_specif_fields[4]; + __u32 serv_specif_fields[4]; } s1; } u; }; struct icp_qat_fw_comn_req_mid { - uint64_t opaque_data; - uint64_t src_data_addr; - uint64_t dest_data_addr; - uint32_t src_length; - uint32_t dst_length; + __u64 opaque_data; + __u64 src_data_addr; + __u64 dest_data_addr; + __u32 src_length; + __u32 dst_length; }; struct icp_qat_fw_comn_req_cd_ctrl { - uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; + __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; }; struct icp_qat_fw_comn_req_hdr { - uint8_t resrvd1; - uint8_t service_cmd_id; - uint8_t service_type; - uint8_t hdr_flags; - uint16_t serv_specif_flags; - uint16_t comn_req_flags; + __u8 resrvd1; + __u8 service_cmd_id; + __u8 service_type; + __u8 hdr_flags; + __u16 serv_specif_flags; + __u16 comn_req_flags; }; struct icp_qat_fw_comn_req_rqpars { - uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; + __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; }; struct icp_qat_fw_comn_req { @@ -135,24 +91,24 @@ struct icp_qat_fw_comn_req { }; struct icp_qat_fw_comn_error { - uint8_t xlat_err_code; - uint8_t cmp_err_code; + __u8 xlat_err_code; + __u8 cmp_err_code; }; struct icp_qat_fw_comn_resp_hdr { - uint8_t resrvd1; - uint8_t service_id; - uint8_t response_type; - uint8_t hdr_flags; + __u8 resrvd1; + __u8 service_id; + __u8 response_type; + __u8 hdr_flags; struct icp_qat_fw_comn_error comn_error; - uint8_t comn_status; - uint8_t cmd_id; + __u8 comn_status; + __u8 cmd_id; }; struct icp_qat_fw_comn_resp { struct icp_qat_fw_comn_resp_hdr comn_hdr; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index 72a59faa9005..d4d188cd7ed0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_INIT_ADMIN_H_ #define _ICP_QAT_FW_INIT_ADMIN_H_ @@ -67,50 +23,75 @@ enum icp_qat_fw_init_admin_resp_status { }; struct icp_qat_fw_init_admin_req { - uint16_t init_cfg_sz; - uint8_t resrvd1; - uint8_t init_admin_cmd_id; - uint32_t resrvd2; - uint64_t opaque_data; - uint64_t init_cfg_ptr; - uint64_t resrvd3; -}; - -struct icp_qat_fw_init_admin_resp_hdr { - uint8_t flags; - uint8_t resrvd1; - uint8_t status; - uint8_t init_admin_cmd_id; -}; + __u16 init_cfg_sz; + __u8 resrvd1; + __u8 cmd_id; + __u32 resrvd2; + __u64 opaque_data; + __u64 init_cfg_ptr; -struct icp_qat_fw_init_admin_resp_pars { union { - uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint32_t version_patch_num; - uint8_t context_id; - uint8_t ae_id; - uint16_t resrvd1; - uint64_t resrvd2; - } s1; - struct { - uint64_t req_rec_count; - uint64_t resp_sent_count; - } s2; - } u; + __u16 ibuf_size_in_kb; + __u16 resrvd3; + }; + __u32 idle_filter; + }; + + __u32 resrvd4; }; struct icp_qat_fw_init_admin_resp { - struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr; + __u8 flags; + __u8 resrvd1; + __u8 status; + __u8 cmd_id; union { - uint32_t resrvd2; + __u32 resrvd2; + struct { + __u16 version_minor_num; + __u16 version_major_num; + }; + }; + __u64 opaque_data; + union { + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + __u32 version_patch_num; + __u8 context_id; + __u8 ae_id; + __u16 resrvd4; + __u64 resrvd5; + }; + struct { + __u64 req_rec_count; + __u64 resp_sent_count; + }; + struct { + __u16 compression_algos; + __u16 checksum_algos; + __u32 deflate_capabilities; + __u32 resrvd6; + __u32 lzs_capabilities; + }; + struct { + __u32 cipher_algos; + __u32 hash_algos; + __u16 keygen_algos; + __u16 other; + __u16 public_key_algos; + __u16 prime_algos; + }; + struct { + __u64 timestamp; + __u64 resrvd7; + }; struct { - uint16_t version_minor_num; - uint16_t version_major_num; - } s; - } u; - uint64_t opaque_data; - struct icp_qat_fw_init_admin_resp_pars init_resp_pars; + __u32 successful_count; + __u32 unsuccessful_count; + __u64 resrvd8; + }; + }; }; #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h index c8d26697e8ea..6757ec09d81f 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_LA_H_ #define _ICP_QAT_FW_LA_H_ #include "icp_qat_fw.h" @@ -226,14 +182,14 @@ struct icp_qat_fw_la_bulk_req { struct icp_qat_fw_cipher_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } u; }; @@ -241,70 +197,70 @@ struct icp_qat_fw_cipher_req_hdr_cd_pars { struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { union { struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; + __u64 content_desc_addr; + __u16 content_desc_resrvd1; + __u8 content_desc_params_sz; + __u8 content_desc_hdr_resrvd2; + __u32 content_desc_resrvd3; } s; struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } sl; } u; }; struct icp_qat_fw_cipher_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id; - uint8_t cipher_padding_sz; - uint8_t resrvd1; - uint16_t resrvd2; - uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id; + __u8 cipher_padding_sz; + __u8 resrvd1; + __u16 resrvd2; + __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; }; struct icp_qat_fw_auth_cd_ctrl_hdr { - uint32_t resrvd1; - uint8_t resrvd2; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id; - uint8_t resrvd3; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd4; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u32 resrvd1; + __u8 resrvd2; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id; + __u8 resrvd3; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd4; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id_cipher; - uint8_t cipher_padding_sz; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id_auth; - uint8_t resrvd1; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd2; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; + __u8 cipher_state_sz; + __u8 cipher_key_sz; + __u8 cipher_cfg_offset; + __u8 next_curr_id_cipher; + __u8 cipher_padding_sz; + __u8 hash_flags; + __u8 hash_cfg_offset; + __u8 next_curr_id_auth; + __u8 resrvd1; + __u8 outer_prefix_sz; + __u8 final_sz; + __u8 inner_res_sz; + __u8 resrvd2; + __u8 inner_state1_sz; + __u8 inner_state2_offset; + __u8 inner_state2_sz; + __u8 outer_config_offset; + __u8 outer_state1_sz; + __u8 outer_res_sz; + __u8 outer_prefix_offset; }; #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 @@ -315,48 +271,48 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) struct icp_qat_fw_la_cipher_req_params { - uint32_t cipher_offset; - uint32_t cipher_length; + __u32 cipher_offset; + __u32 cipher_length; union { - uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { - uint64_t cipher_IV_ptr; - uint64_t resrvd1; + __u64 cipher_IV_ptr; + __u64 resrvd1; } s; } u; }; struct icp_qat_fw_la_auth_req_params { - uint32_t auth_off; - uint32_t auth_len; + __u32 auth_off; + __u32 auth_len; union { - uint64_t auth_partial_st_prefix; - uint64_t aad_adr; + __u64 auth_partial_st_prefix; + __u64 aad_adr; } u1; - uint64_t auth_res_addr; + __u64 auth_res_addr; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint8_t hash_state_sz; - uint8_t auth_res_sz; + __u8 resrvd1; + __u8 hash_state_sz; + __u8 auth_res_sz; } __packed; struct icp_qat_fw_la_auth_req_params_resrvd_flds { - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; + __u8 inner_prefix_sz; + __u8 aad_sz; } u2; - uint8_t resrvd1; - uint16_t resrvd2; + __u8 resrvd1; + __u16 resrvd2; }; struct icp_qat_fw_la_resp { struct icp_qat_fw_comn_resp_hdr comn_resp; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; + __u64 opaque_data; + __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 2ffef3e4fd68..3e8e291cd122 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_FW_LOADER_HANDLE_H__ #define __ICP_QAT_FW_LOADER_HANDLE_H__ #include "icp_qat_uclo.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h index 0d7a9b51ce9f..9dddae0009fc 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -1,100 +1,56 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_FW_PKE_ #define _ICP_QAT_FW_PKE_ #include "icp_qat_fw.h" struct icp_qat_fw_req_hdr_pke_cd_pars { - u64 content_desc_addr; - u32 content_desc_resrvd; - u32 func_id; + __u64 content_desc_addr; + __u32 content_desc_resrvd; + __u32 func_id; }; struct icp_qat_fw_req_pke_mid { - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; struct icp_qat_fw_req_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 service_type; - u8 hdr_flags; - u16 comn_req_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 service_type; + __u8 hdr_flags; + __u16 comn_req_flags; + __u16 resrvd4; struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; }; struct icp_qat_fw_pke_request { struct icp_qat_fw_req_pke_hdr pke_hdr; struct icp_qat_fw_req_pke_mid pke_mid; - u8 output_param_count; - u8 input_param_count; - u16 resrvd1; - u32 resrvd2; - u64 next_req_adr; + __u8 output_param_count; + __u8 input_param_count; + __u16 resrvd1; + __u32 resrvd2; + __u64 next_req_adr; }; struct icp_qat_fw_resp_pke_hdr { - u8 resrvd1; - u8 resrvd2; - u8 response_type; - u8 hdr_flags; - u16 comn_resp_flags; - u16 resrvd4; + __u8 resrvd1; + __u8 resrvd2; + __u8 response_type; + __u8 hdr_flags; + __u16 comn_resp_flags; + __u16 resrvd4; }; struct icp_qat_fw_pke_resp { struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; - u64 opaque; - u64 src_data_addr; - u64 dest_data_addr; + __u64 opaque; + __u64 src_data_addr; + __u64 dest_data_addr; }; #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 7187917533d0..c0e9fc0c93dd 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_HAL_H #define __ICP_QAT_HAL_H #include "icp_qat_fw_loader_handle.h" diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 121d5e6e46ca..c4b6ef1506ab 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ @@ -105,8 +61,8 @@ enum icp_qat_hw_auth_mode { }; struct icp_qat_hw_auth_config { - uint32_t config; - uint32_t reserved; + __u32 config; + __u32 reserved; }; #define QAT_AUTH_MODE_BITPOS 4 @@ -131,7 +87,7 @@ struct icp_qat_hw_auth_config { struct icp_qat_hw_auth_counter { __be32 counter; - uint32_t reserved; + __u32 reserved; }; #define QAT_AUTH_COUNT_MASK 0xFFFFFFFF @@ -191,9 +147,9 @@ struct icp_qat_hw_auth_setup { struct icp_qat_hw_auth_sha512 { struct icp_qat_hw_auth_setup inner_setup; - uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ]; struct icp_qat_hw_auth_setup outer_setup; - uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; + __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ]; }; struct icp_qat_hw_auth_algo_blk { @@ -227,8 +183,8 @@ enum icp_qat_hw_cipher_mode { }; struct icp_qat_hw_cipher_config { - uint32_t val; - uint32_t reserved; + __u32 val; + __u32 reserved; }; enum icp_qat_hw_cipher_dir { @@ -296,7 +252,7 @@ enum icp_qat_hw_cipher_convert { struct icp_qat_hw_cipher_aes256_f8 { struct icp_qat_hw_cipher_config cipher_config; - uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; + __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ]; }; struct icp_qat_hw_cipher_algo_blk { diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 5d1ee7e53492..8fe1ec344fa2 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ @@ -176,7 +132,7 @@ struct icp_qat_uof_encap_obj { struct icp_qat_uclo_encap_uwblock { unsigned int start_addr; unsigned int words_num; - uint64_t micro_words; + u64 micro_words; }; struct icp_qat_uclo_encap_page { @@ -215,7 +171,7 @@ struct icp_qat_uclo_objhdr { struct icp_qat_uof_strtable { unsigned int table_len; unsigned int reserved; - uint64_t strings; + u64 strings; }; struct icp_qat_uclo_objhandle { @@ -235,7 +191,7 @@ struct icp_qat_uclo_objhandle { unsigned int ae_num; unsigned int ustore_phy_size; void *obj_buf; - uint64_t *uword_buf; + u64 *uword_buf; }; struct icp_qat_uof_uword_block { diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index e14d3dd291f0..72753b84dc95 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/slab.h> #include <linux/crypto.h> @@ -55,6 +11,7 @@ #include <crypto/hmac.h> #include <crypto/algapi.h> #include <crypto/authenc.h> +#include <crypto/xts.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" #include "adf_transport.h" @@ -78,15 +35,15 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; struct qat_alg_buf { - uint32_t len; - uint32_t resrvd; - uint64_t addr; + u32 len; + u32 resrvd; + u64 addr; } __packed; struct qat_alg_buf_list { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; struct qat_alg_buf bufers[]; } __packed __aligned(64); @@ -131,7 +88,8 @@ struct qat_alg_skcipher_ctx { struct icp_qat_fw_la_bulk_req enc_fw_req; struct icp_qat_fw_la_bulk_req dec_fw_req; struct qat_crypto_instance *inst; - struct crypto_skcipher *tfm; + struct crypto_skcipher *ftfm; + bool fallback; }; static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) @@ -151,7 +109,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_aead_ctx *ctx, - const uint8_t *auth_key, + const u8 *auth_key, unsigned int auth_keylen) { SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); @@ -467,7 +425,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, struct icp_qat_fw_la_bulk_req *req, struct icp_qat_hw_cipher_algo_blk *cd, - const uint8_t *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; @@ -487,7 +445,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; @@ -500,7 +458,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, } static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, - int alg, const uint8_t *key, + int alg, const u8 *key, unsigned int keylen, int mode) { struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; @@ -578,7 +536,7 @@ error: } static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, - const uint8_t *key, + const u8 *key, unsigned int keylen, int mode) { @@ -592,7 +550,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, return 0; } -static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -606,7 +564,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, ICP_QAT_HW_CIPHER_CBC_MODE); } -static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -658,7 +616,7 @@ out_free_inst: return ret; } -static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, +static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -820,7 +778,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; struct qat_crypto_instance *inst = ctx->inst; struct aead_request *areq = qat_req->aead_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); qat_alg_free_bufl(inst, qat_req); @@ -835,7 +793,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; struct qat_crypto_instance *inst = ctx->inst; struct skcipher_request *sreq = qat_req->skcipher_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; + u8 stat_filed = qat_resp->comn_resp.comn_status; struct device *dev = &GET_DEV(ctx->inst->accel_dev); int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); @@ -880,18 +838,18 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param->cipher_length = areq->cryptlen - digst_size; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -910,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; - uint8_t *iv = areq->iv; + u8 *iv = areq->iv; int ret, ctr = 0; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); @@ -922,11 +880,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) qat_req->aead_ctx = ctx; qat_req->aead_req = areq; qat_req->cb = qat_aead_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); cipher_param->cipher_length = areq->cryptlen; @@ -936,7 +894,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_len = areq->assoclen + areq->cryptlen; do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1038,6 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ret = xts_verify_key(tfm, key, keylen); + if (ret) + return ret; + + if (keylen >> 1 == AES_KEYSIZE_192) { + ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen); + if (ret) + return ret; + + ctx->fallback = true; + + return 0; + } + + ctx->fallback = false; + return qat_alg_skcipher_setkey(tfm, key, keylen, ICP_QAT_HW_CIPHER_XTS_MODE); } @@ -1073,7 +1050,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1082,7 +1059,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1102,6 +1079,24 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) return qat_alg_skcipher_encrypt(req); } +static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_encrypt(nreq); + } + + return qat_alg_skcipher_encrypt(req); +} + static int qat_alg_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); @@ -1133,7 +1128,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_req->skcipher_ctx = ctx; qat_req->skcipher_req = req; qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; @@ -1142,7 +1137,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); } while (ret == -EAGAIN && ctr++ < 10); if (ret == -EAGAIN) { @@ -1161,6 +1156,25 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) return qat_alg_skcipher_decrypt(req); } + +static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); + struct skcipher_request *nreq = skcipher_request_ctx(req); + + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + skcipher_request_set_tfm(nreq, ctx->ftfm); + return crypto_skcipher_decrypt(nreq); + } + + return qat_alg_skcipher_decrypt(req); +} + static int qat_alg_aead_init(struct crypto_aead *tfm, enum icp_qat_hw_auth_algo hash, const char *hash_name) @@ -1217,10 +1231,25 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm) static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) { + crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); + return 0; +} + +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm) +{ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int reqsize; + + ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + reqsize = max(sizeof(struct qat_crypto_request), + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(ctx->ftfm)); + crypto_skcipher_set_reqsize(tfm, reqsize); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); - ctx->tfm = tfm; return 0; } @@ -1251,13 +1280,22 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) qat_crypto_put_instance(inst); } +static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm) +{ + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->ftfm) + crypto_free_skcipher(ctx->ftfm); + + qat_alg_skcipher_exit_tfm(tfm); +} static struct aead_alg qat_aeads[] = { { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha1", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1274,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha256", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1291,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "qat_aes_cbc_hmac_sha512", .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), .cra_module = THIS_MODULE, @@ -1309,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "qat_aes_cbc", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1327,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "qat_aes_ctr", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, @@ -1345,17 +1383,18 @@ static struct skcipher_alg qat_skciphers[] = { { .base.cra_name = "xts(aes)", .base.cra_driver_name = "qat_aes_xts", .base.cra_priority = 4001, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, - .init = qat_alg_skcipher_init_tfm, - .exit = qat_alg_skcipher_exit_tfm, + .init = qat_alg_skcipher_init_xts_tfm, + .exit = qat_alg_skcipher_exit_xts_tfm, .setkey = qat_alg_skcipher_xts_setkey, - .decrypt = qat_alg_skcipher_blk_decrypt, - .encrypt = qat_alg_skcipher_blk_encrypt, + .decrypt = qat_alg_skcipher_xts_decrypt, + .encrypt = qat_alg_skcipher_xts_encrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 692a7aaee749..846569ec9066 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -1,50 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> @@ -384,12 +339,12 @@ static int qat_dh_compute_value(struct kpp_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = n_input_params; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -779,11 +734,11 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) @@ -927,7 +882,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->pke_mid.opaque = (u64)(__force long)qat_req; if (ctx->crt_mode) msg->input_param_count = 6; else @@ -935,7 +890,7 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->output_param_count = 1; do { - ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); } while (ret == -EBUSY && ctr++ < 100); if (!ret) diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index fb504cee0305..ab621b7dbd20 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/module.h> #include <linux/slab.h> #include "adf_accel_devices.h" diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index 300bb919a33a..12682d1e9f5f 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef _QAT_CRYPTO_INSTANCE_H_ #define _QAT_CRYPTO_INSTANCE_H_ diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index ff149e176f64..fa467e0f8285 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/slab.h> #include <linux/delay.h> @@ -78,13 +34,13 @@ #define AE(handle, ae) handle->hal_handle->aes[ae] -static const uint64_t inst_4b[] = { +static const u64 inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull, 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A021000000ull }; -static const uint64_t inst[] = { +static const u64 inst[] = { 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull, @@ -546,7 +502,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } -static uint64_t qat_hal_parity_64bit(uint64_t word) +static u64 qat_hal_parity_64bit(u64 word) { word ^= word >> 1; word ^= word >> 2; @@ -557,9 +513,9 @@ static uint64_t qat_hal_parity_64bit(uint64_t word) return word & 1; } -static uint64_t qat_hal_set_uword_ecc(uint64_t uword) +static u64 qat_hal_set_uword_ecc(u64 uword) { - uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, + u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL, bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL, bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL, bit6_mask = 0xdaf69a46910ULL; @@ -578,7 +534,7 @@ static uint64_t qat_hal_set_uword_ecc(uint64_t uword) void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int ustore_addr; unsigned int i; @@ -588,7 +544,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < words_num; i++) { unsigned int uwrd_lo, uwrd_hi; - uint64_t tmp; + u64 tmp; tmp = qat_hal_set_uword_ecc(uword[i]); uwrd_lo = (unsigned int)(tmp & 0xffffffff); @@ -644,7 +600,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) csr_val |= CE_NN_MODE; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst), - (uint64_t *)inst); + (u64 *)inst); qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, handle->hal_handle->upc_mask & INIT_PC_VALUE); @@ -821,7 +777,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int uaddr, - unsigned int words_num, uint64_t *uword) + unsigned int words_num, u64 *uword) { unsigned int i, uwrd_lo, uwrd_hi; unsigned int ustore_addr, misc_control; @@ -871,11 +827,11 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, #define MAX_EXEC_INST 100 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - uint64_t *micro_inst, unsigned int inst_num, + u64 *micro_inst, unsigned int inst_num, int code_off, unsigned int max_cycle, unsigned int *endpc) { - uint64_t savuwords[MAX_EXEC_INST]; + u64 savuwords[MAX_EXEC_INST]; unsigned int ind_lm_addr0, ind_lm_addr1; unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1; unsigned int ind_cnt_sig; @@ -972,7 +928,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned int ctxarb_cntl, ustore_addr, ctx_enables; unsigned short reg_addr; int status = 0; - uint64_t insts, savuword; + u64 insts, savuword; reg_addr = qat_hal_get_reg_addr(reg_type, reg_num); if (reg_addr == BAD_REGADDR) { @@ -984,7 +940,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, insts = 0xA070000000ull | (reg_addr & 0x3ff); break; default: - insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); + insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10); break; } savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); @@ -1030,7 +986,7 @@ static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle, unsigned short reg_num, unsigned int data) { unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo; - uint64_t insts[] = { + u64 insts[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0F0000C0300ull, @@ -1076,13 +1032,13 @@ int qat_hal_get_ins_num(void) return ARRAY_SIZE(inst_4b); } -static int qat_hal_concat_micro_code(uint64_t *micro_inst, +static int qat_hal_concat_micro_code(u64 *micro_inst, unsigned int inst_num, unsigned int size, unsigned int addr, unsigned int *value) { int i; unsigned int cur_value; - const uint64_t *inst_arr; + const u64 *inst_arr; int fixup_offset; int usize = 0; int orig_num; @@ -1107,7 +1063,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst, static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char ctx, - int *pfirst_exec, uint64_t *micro_inst, + int *pfirst_exec, u64 *micro_inst, unsigned int inst_num) { int stat = 0; @@ -1140,7 +1096,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_batch_init *lm_init_header) { struct icp_qat_uof_batch_init *plm_init; - uint64_t *micro_inst_arry; + u64 *micro_inst_arry; int micro_inst_num; int alloc_inst_size; int first_exec = 1; @@ -1150,7 +1106,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle, alloc_inst_size = lm_init_header->size; if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore) alloc_inst_size = handle->hal_handle->max_ustore; - micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t), + micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64), GFP_KERNEL); if (!micro_inst_arry) return -ENOMEM; @@ -1229,7 +1185,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, data16low; unsigned short reg_mask; int status = 0; - uint64_t micro_inst[] = { + u64 micro_inst[] = { 0x0F440000000ull, 0x0F040000000ull, 0x0A000000000ull, diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..bff759e2f811 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> @@ -332,13 +288,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } @@ -411,16 +372,16 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, unsigned int ustore_size; unsigned int patt_pos; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t *fill_data; + u64 *fill_data; uof_image = image->img_ptr; - fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), + fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64), GFP_KERNEL); if (!fill_data) return -ENOMEM; for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) memcpy(&fill_data[i], &uof_image->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); page = image->page; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { @@ -981,7 +942,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) pr_err("QAT: UOF incompatible\n"); return -EINVAL; } - obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), GFP_KERNEL); if (!obj_handle->uword_buf) return -ENOMEM; @@ -1185,7 +1146,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, return 0; } -#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) +#define ADD_ADDR(high, low) ((((u64)high) << 32) + low) #define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, @@ -1514,10 +1475,10 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, struct icp_qat_uclo_encap_page *encap_page, - uint64_t *uword, unsigned int addr_p, - unsigned int raddr, uint64_t fill) + u64 *uword, unsigned int addr_p, + unsigned int raddr, u64 fill) { - uint64_t uwrd = 0; + u64 uwrd = 0; unsigned int i; if (!encap_page) { @@ -1547,12 +1508,12 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, { unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; - uint64_t fill_pat; + u64 fill_pat; /* load the page starting at appropriate ustore address */ /* get fill-pattern from an image -- they are all the same */ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, - sizeof(uint64_t)); + sizeof(u64)); uw_physical_addr = encap_page->beg_addr_p; uw_relative_addr = 0; words_num = encap_page->micro_words_num; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 1dfcab317bed..b975c263446d 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,62 +1,18 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> #include "adf_dh895xcc_hw_data.h" /* Worker thread to service arbiter mappings based on dev SKUs */ -static const uint32_t thrd_to_arb_map_sku4[] = { +static const u32 thrd_to_arb_map_sku4[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; -static const uint32_t thrd_to_arb_map_sku6[] = { +static const u32 thrd_to_arb_map_sku6[] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222 @@ -68,20 +24,20 @@ static struct adf_hw_device_class dh895xcc_class = { .instances = 0 }; -static uint32_t get_accel_mask(uint32_t fuse) +static u32 get_accel_mask(u32 fuse) { return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; } -static uint32_t get_ae_mask(uint32_t fuse) +static u32 get_ae_mask(u32 fuse) { return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK; } -static uint32_t get_num_accels(struct adf_hw_device_data *self) +static u32 get_num_accels(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->accel_mask) return 0; @@ -93,9 +49,9 @@ static uint32_t get_num_accels(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_num_aes(struct adf_hw_device_data *self) +static u32 get_num_aes(struct adf_hw_device_data *self) { - uint32_t i, ctr = 0; + u32 i, ctr = 0; if (!self || !self->ae_mask) return 0; @@ -107,17 +63,17 @@ static uint32_t get_num_aes(struct adf_hw_device_data *self) return ctr; } -static uint32_t get_misc_bar_id(struct adf_hw_device_data *self) +static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_PMISC_BAR; } -static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) +static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_ETR_BAR; } -static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) +static u32 get_sram_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_SRAM_BAR; } @@ -161,12 +117,12 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, } } -static uint32_t get_pf2vf_offset(uint32_t i) +static u32 get_pf2vf_offset(u32 i) { return ADF_DH895XCC_PF2VF_OFFSET(i); } -static uint32_t get_vintmsk_offset(uint32_t i) +static u32 get_vintmsk_offset(u32 i) { return ADF_DH895XCC_VINTMSK_OFFSET(i); } diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 092f7353ed23..082a04466dca 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2014 - 2020 Intel Corporation */ #ifndef ADF_DH895x_HW_DATA_H_ #define ADF_DH895x_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index b11bf8c0e683..4e877b75822b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a3b4dd8099a7..5246f0524ca3 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2015 - 2020 Intel Corporation */ #include <adf_accel_devices.h> #include <adf_pf2vf_msg.h> #include <adf_common_drv.h> diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 6ddc19bd4410..2bfcc67f8f39 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2015 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2015 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2015 - 2020 Intel Corporation */ #ifndef ADF_DH895XVF_HW_DATA_H_ #define ADF_DH895XVF_HW_DATA_H_ diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 1b762eefc6c1..7d6e1db272c2 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -1,49 +1,5 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index 7770660bc853..cffa9fc628ff 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -14,7 +14,7 @@ struct qce_cipher_ctx { u8 enc_key[QCE_MAX_KEY_SIZE]; unsigned int enc_keylen; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; /** @@ -43,6 +43,7 @@ struct qce_cipher_reqctx { struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; + struct skcipher_request fallback_req; // keep at the end }; static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 9f989cba0f1b..85ba16418a04 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -87,6 +87,8 @@ struct qce_alg_template { struct ahash_alg ahash; } alg; struct qce_device *qce; + const u8 *hash_zero; + const u32 digest_size; }; void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 1ab62e7d5f3c..c230843e2ffb 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -203,10 +203,18 @@ static int qce_import_common(struct ahash_request *req, u64 in_count, static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - bool hmac = IS_SHA_HMAC(flags); - int ret = -EINVAL; + struct qce_sha_reqctx *rctx; + unsigned long flags; + bool hmac; + int ret; + + ret = qce_ahash_init(req); + if (ret) + return ret; + + rctx = ahash_request_ctx(req); + flags = rctx->flags; + hmac = IS_SHA_HMAC(flags); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { const struct sha1_state *state = in; @@ -284,8 +292,6 @@ static int qce_ahash_update(struct ahash_request *req) if (!sg_last) return -EINVAL; - sg_mark_end(sg_last); - if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); @@ -305,8 +311,12 @@ static int qce_ahash_final(struct ahash_request *req) struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; - if (!rctx->buflen) + if (!rctx->buflen) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); return 0; + } rctx->last_blk = true; @@ -338,6 +348,13 @@ static int qce_ahash_digest(struct ahash_request *req) rctx->first_blk = true; rctx->last_blk = true; + if (!rctx->nbytes_orig) { + if (tmpl->hash_zero) + memcpy(req->result, tmpl->hash_zero, + tmpl->alg.ahash.halg.digestsize); + return 0; + } + return qce->async_req_enqueue(tmpl->qce, &req->base); } @@ -490,6 +507,11 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, alg->halg.digestsize = def->digestsize; alg->halg.statesize = def->statesize; + if (IS_SHA1(def->flags)) + tmpl->hash_zero = sha1_zero_message_hash; + else if (IS_SHA256(def->flags)) + tmpl->hash_zero = sha256_zero_message_hash; + base = &alg->halg.base; base->cra_blocksize = def->blocksize; base->cra_priority = 300; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 9412433f3b21..5630c5addd28 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -178,7 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, break; } - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) ctx->enc_keylen = keylen; return ret; @@ -235,16 +235,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) req->cryptlen <= aes_sw_max_len) || (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && req->cryptlen % QCE_SECTOR_SIZE))) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - ret = encrypt ? crypto_skcipher_encrypt(subreq) : - crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } @@ -263,10 +262,9 @@ static int qce_skcipher_decrypt(struct skcipher_request *req) static int qce_skcipher_init(struct crypto_skcipher *tfm) { - struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - - memset(ctx, 0, sizeof(*ctx)); - crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + /* take the size without the fallback skcipher_request at the end */ + crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, + fallback_req)); return 0; } @@ -274,17 +272,21 @@ static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - qce_skcipher_init(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), - 0, CRYPTO_ALG_NEED_FALLBACK); - return PTR_ERR_OR_ZERO(ctx->fallback); + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), + 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback)) + return PTR_ERR(ctx->fallback); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); + return 0; } static void qce_skcipher_exit(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } struct qce_skcipher_def { @@ -404,6 +406,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->base.cra_priority = 300; alg->base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); alg->base.cra_alignmask = 0; diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c new file mode 100644 index 000000000000..5bc099052bd2 --- /dev/null +++ b/drivers/crypto/sa2ul.c @@ -0,0 +1,2420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dmapool.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <crypto/aes.h> +#include <crypto/authenc.h> +#include <crypto/des.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> + +#include "sa2ul.h" + +/* Byte offset for key in encryption security context */ +#define SC_ENC_KEY_OFFSET (1 + 27 + 4) +/* Byte offset for Aux-1 in encryption security context */ +#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32) + +#define SA_CMDL_UPD_ENC 0x0001 +#define SA_CMDL_UPD_AUTH 0x0002 +#define SA_CMDL_UPD_ENC_IV 0x0004 +#define SA_CMDL_UPD_AUTH_IV 0x0008 +#define SA_CMDL_UPD_AUX_KEY 0x0010 + +#define SA_AUTH_SUBKEY_LEN 16 +#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF +#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000 + +#define MODE_CONTROL_BYTES 27 +#define SA_HASH_PROCESSING 0 +#define SA_CRYPTO_PROCESSING 0 +#define SA_UPLOAD_HASH_TO_TLR BIT(6) + +#define SA_SW0_FLAGS_MASK 0xF0000 +#define SA_SW0_CMDL_INFO_MASK 0x1F00000 +#define SA_SW0_CMDL_PRESENT BIT(4) +#define SA_SW0_ENG_ID_MASK 0x3E000000 +#define SA_SW0_DEST_INFO_PRESENT BIT(30) +#define SA_SW2_EGRESS_LENGTH 0xFF000000 +#define SA_BASIC_HASH 0x10 + +#define SHA256_DIGEST_WORDS 8 +/* Make 32-bit word from 4 bytes */ +#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \ + ((b2) << 8) | (b3)) + +/* size of SCCTL structure in bytes */ +#define SA_SCCTL_SZ 16 + +/* Max Authentication tag size */ +#define SA_MAX_AUTH_TAG_SZ 64 + +#define PRIV_ID 0x1 +#define PRIV 0x1 + +static struct device *sa_k3_dev; + +/** + * struct sa_cmdl_cfg - Command label configuration descriptor + * @aalg: authentication algorithm ID + * @enc_eng_id: Encryption Engine ID supported by the SA hardware + * @auth_eng_id: Authentication Engine ID + * @iv_size: Initialization Vector size + * @akey: Authentication key + * @akey_len: Authentication key length + * @enc: True, if this is an encode request + */ +struct sa_cmdl_cfg { + int aalg; + u8 enc_eng_id; + u8 auth_eng_id; + u8 iv_size; + const u8 *akey; + u16 akey_len; + bool enc; +}; + +/** + * struct algo_data - Crypto algorithm specific data + * @enc_eng: Encryption engine info structure + * @auth_eng: Authentication engine info structure + * @auth_ctrl: Authentication control word + * @hash_size: Size of digest + * @iv_idx: iv index in psdata + * @iv_out_size: iv out size + * @ealg_id: Encryption Algorithm ID + * @aalg_id: Authentication algorithm ID + * @mci_enc: Mode Control Instruction for Encryption algorithm + * @mci_dec: Mode Control Instruction for Decryption + * @inv_key: Whether the encryption algorithm demands key inversion + * @ctx: Pointer to the algorithm context + * @keyed_mac: Whether the authentication algorithm has key + * @prep_iopad: Function pointer to generate intermediate ipad/opad + */ +struct algo_data { + struct sa_eng_info enc_eng; + struct sa_eng_info auth_eng; + u8 auth_ctrl; + u8 hash_size; + u8 iv_idx; + u8 iv_out_size; + u8 ealg_id; + u8 aalg_id; + u8 *mci_enc; + u8 *mci_dec; + bool inv_key; + struct sa_tfm_ctx *ctx; + bool keyed_mac; + void (*prep_iopad)(struct algo_data *algo, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad); +}; + +/** + * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms + * @type: Type of the crypto algorithm. + * @alg: Union of crypto algorithm definitions. + * @registered: Flag indicating if the crypto algorithm is already registered + */ +struct sa_alg_tmpl { + u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */ + union { + struct skcipher_alg skcipher; + struct ahash_alg ahash; + struct aead_alg aead; + } alg; + bool registered; +}; + +/** + * struct sa_rx_data: RX Packet miscellaneous data place holder + * @req: crypto request data pointer + * @ddev: pointer to the DMA device + * @tx_in: dma_async_tx_descriptor pointer for rx channel + * @split_src_sg: Set if the src sg is split and needs to be freed up + * @split_dst_sg: Set if the dst sg is split and needs to be freed up + * @enc: Flag indicating either encryption or decryption + * @enc_iv_size: Initialisation vector size + * @iv_idx: Initialisation vector index + * @rx_sg: Static scatterlist entry for overriding RX data + * @tx_sg: Static scatterlist entry for overriding TX data + * @src: Source data pointer + * @dst: Destination data pointer + */ +struct sa_rx_data { + void *req; + struct device *ddev; + struct dma_async_tx_descriptor *tx_in; + struct scatterlist *split_src_sg; + struct scatterlist *split_dst_sg; + u8 enc; + u8 enc_iv_size; + u8 iv_idx; + struct scatterlist rx_sg; + struct scatterlist tx_sg; + struct scatterlist *src; + struct scatterlist *dst; +}; + +/** + * struct sa_req: SA request definition + * @dev: device for the request + * @size: total data to the xmitted via DMA + * @enc_offset: offset of cipher data + * @enc_size: data to be passed to cipher engine + * @enc_iv: cipher IV + * @auth_offset: offset of the authentication data + * @auth_size: size of the authentication data + * @auth_iv: authentication IV + * @type: algorithm type for the request + * @cmdl: command label pointer + * @base: pointer to the base request + * @ctx: pointer to the algorithm context data + * @enc: true if this is an encode request + * @src: source data + * @dst: destination data + * @callback: DMA callback for the request + * @mdata_size: metadata size passed to DMA + */ +struct sa_req { + struct device *dev; + u16 size; + u8 enc_offset; + u16 enc_size; + u8 *enc_iv; + u8 auth_offset; + u16 auth_size; + u8 *auth_iv; + u32 type; + u32 *cmdl; + struct crypto_async_request *base; + struct sa_tfm_ctx *ctx; + bool enc; + struct scatterlist *src; + struct scatterlist *dst; + dma_async_tx_callback callback; + u16 mdata_size; +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = { + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = { + { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for encryption + */ +static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For CBC (Cipher Block Chaining) mode for decryption + */ +static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for encryption + */ +static u8 mci_ecb_enc_array[3][27] = { + { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for various Key lengths 128, 192, 256 + * For ECB (Electronic Code Book) mode for decryption + */ +static u8 mci_ecb_dec_array[3][27] = { + { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, +}; + +/* + * Mode Control Instructions for DES algorithm + * For CBC (Cipher Block Chaining) mode and ECB mode + * encryption and for decryption respectively + */ +static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = { + 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = { + 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +/* + * Perform 16 byte or 128 bit swizzling + * The SA2UL Expects the security context to + * be in little Endian and the bus width is 128 bits or 16 bytes + * Hence swap 16 bytes at a time from higher to lower address + */ +static void sa_swiz_128(u8 *in, u16 len) +{ + u8 data[16]; + int i, j; + + for (i = 0; i < len; i += 16) { + memcpy(data, &in[i], 16); + for (j = 0; j < 16; j++) + in[i + j] = data[15 - j]; + } +} + +/* Prepare the ipad and opad from key as per SHA algorithm step 1*/ +static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) { + k_ipad[i] = key[i] ^ 0x36; + k_opad[i] = key[i] ^ 0x5c; + } + + /* Instead of XOR with 0 */ + for (; i < SHA1_BLOCK_SIZE; i++) { + k_ipad[i] = 0x36; + k_opad[i] = 0x5c; + } +} + +static void sa_export_shash(struct shash_desc *hash, int block_size, + int digest_size, __be32 *out) +{ + union { + struct sha1_state sha1; + struct sha256_state sha256; + struct sha512_state sha512; + } sha; + void *state; + u32 *result; + int i; + + switch (digest_size) { + case SHA1_DIGEST_SIZE: + state = &sha.sha1; + result = sha.sha1.state; + break; + case SHA256_DIGEST_SIZE: + state = &sha.sha256; + result = sha.sha256.state; + break; + default: + dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__, + digest_size); + return; + } + + crypto_shash_export(hash, state); + + for (i = 0; i < digest_size >> 2; i++) + out[i] = cpu_to_be32(result[i]); +} + +static void sa_prepare_iopads(struct algo_data *data, const u8 *key, + u16 key_sz, __be32 *ipad, __be32 *opad) +{ + SHASH_DESC_ON_STACK(shash, data->ctx->shash); + int block_size = crypto_shash_blocksize(data->ctx->shash); + int digest_size = crypto_shash_digestsize(data->ctx->shash); + u8 k_ipad[SHA1_BLOCK_SIZE]; + u8 k_opad[SHA1_BLOCK_SIZE]; + + shash->tfm = data->ctx->shash; + + prepare_kiopad(k_ipad, k_opad, key, key_sz); + + memzero_explicit(ipad, block_size); + memzero_explicit(opad, block_size); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_ipad, block_size); + sa_export_shash(shash, block_size, digest_size, ipad); + + crypto_shash_init(shash); + crypto_shash_update(shash, k_opad, block_size); + + sa_export_shash(shash, block_size, digest_size, opad); +} + +/* Derive the inverse key used in AES-CBC decryption operation */ +static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz) +{ + struct crypto_aes_ctx ctx; + int key_pos; + + if (aes_expandkey(&ctx, key, key_sz)) { + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + /* work around to get the right inverse for AES_KEYSIZE_192 size keys */ + if (key_sz == AES_KEYSIZE_192) { + ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46]; + ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47]; + } + + /* Based crypto_aes_expand_key logic */ + switch (key_sz) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + key_pos = key_sz + 24; + break; + + case AES_KEYSIZE_256: + key_pos = key_sz + 24 - 4; + break; + + default: + dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz); + return -EINVAL; + } + + memcpy(inv_key, &ctx.key_enc[key_pos], key_sz); + return 0; +} + +/* Set Security context for the encryption engine */ +static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 enc, u8 *sc_buf) +{ + const u8 *mci = NULL; + + /* Set Encryption mode selector to crypto processing */ + sc_buf[0] = SA_CRYPTO_PROCESSING; + + if (enc) + mci = ad->mci_enc; + else + mci = ad->mci_dec; + /* Set the mode control instructions in security context */ + if (mci) + memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES); + + /* For AES-CBC decryption get the inverse key */ + if (ad->inv_key && !enc) { + if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz)) + return -EINVAL; + /* For all other cases: key is used */ + } else { + memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz); + } + + return 0; +} + +/* Set Security context for the authentication engine */ +static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz, + u8 *sc_buf) +{ + __be32 ipad[64], opad[64]; + + /* Set Authentication mode selector to hash processing */ + sc_buf[0] = SA_HASH_PROCESSING; + /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */ + sc_buf[1] = SA_UPLOAD_HASH_TO_TLR; + sc_buf[1] |= ad->auth_ctrl; + + /* Copy the keys or ipad/opad */ + if (ad->keyed_mac) { + ad->prep_iopad(ad, key, key_sz, ipad, opad); + + /* Copy ipad to AuthKey */ + memcpy(&sc_buf[32], ipad, ad->hash_size); + /* Copy opad to Aux-1 */ + memcpy(&sc_buf[64], opad, ad->hash_size); + } else { + /* basic hash */ + sc_buf[1] |= SA_BASIC_HASH; + } +} + +static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16) +{ + int j; + + for (j = 0; j < ((size16) ? 4 : 2); j++) { + *out = cpu_to_be32(*((u32 *)iv)); + iv += 4; + out++; + } +} + +/* Format general command label */ +static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + u8 enc_offset = 0, auth_offset = 0, total = 0; + u8 enc_next_eng = SA_ENG_ID_OUTPORT2; + u8 auth_next_eng = SA_ENG_ID_OUTPORT2; + u32 *word_ptr = (u32 *)cmdl; + int i; + + /* Clear the command label */ + memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32))); + + /* Iniialize the command update structure */ + memzero_explicit(upd_info, sizeof(*upd_info)); + + if (cfg->enc_eng_id && cfg->auth_eng_id) { + if (cfg->enc) { + auth_offset = SA_CMDL_HEADER_SIZE_BYTES; + enc_next_eng = cfg->auth_eng_id; + + if (cfg->iv_size) + auth_offset += cfg->iv_size; + } else { + enc_offset = SA_CMDL_HEADER_SIZE_BYTES; + auth_next_eng = cfg->enc_eng_id; + } + } + + if (cfg->enc_eng_id) { + upd_info->flags |= SA_CMDL_UPD_ENC; + upd_info->enc_size.index = enc_offset >> 2; + upd_info->enc_offset.index = upd_info->enc_size.index + 1; + /* Encryption command label */ + cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng; + + /* Encryption modes requiring IV */ + if (cfg->iv_size) { + upd_info->flags |= SA_CMDL_UPD_ENC_IV; + upd_info->enc_iv.index = + (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2; + upd_info->enc_iv.size = cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + + cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] = + (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3)); + total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size; + } else { + cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; + } + } + + if (cfg->auth_eng_id) { + upd_info->flags |= SA_CMDL_UPD_AUTH; + upd_info->auth_size.index = auth_offset >> 2; + upd_info->auth_offset.index = upd_info->auth_size.index + 1; + cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng; + cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] = + SA_CMDL_HEADER_SIZE_BYTES; + total += SA_CMDL_HEADER_SIZE_BYTES; + } + + total = roundup(total, 8); + + for (i = 0; i < total / 4; i++) + word_ptr[i] = swab32(word_ptr[i]); + + return total; +} + +/* Update Command label */ +static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl, + struct sa_cmdl_upd_info *upd_info) +{ + int i = 0, j; + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) { + cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->enc_size.index] |= req->enc_size; + cmdl[upd_info->enc_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->enc_offset.index] |= + ((u32)req->enc_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + + if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) { + __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index]; + u32 *enc_iv = (u32 *)req->enc_iv; + + for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) { + data[j] = cpu_to_be32(*enc_iv); + enc_iv++; + } + } + } + + if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) { + cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK; + cmdl[upd_info->auth_size.index] |= req->auth_size; + cmdl[upd_info->auth_offset.index] &= + ~SA_CMDL_SOP_BYPASS_LEN_MASK; + cmdl[upd_info->auth_offset.index] |= + ((u32)req->auth_offset << + __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK)); + if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) { + sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index], + req->auth_iv, + (upd_info->auth_iv.size > 8)); + } + if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) { + int offset = (req->auth_size & 0xF) ? 4 : 0; + + memcpy(&cmdl[upd_info->aux_key_info.index], + &upd_info->aux_key[offset], 16); + } + } +} + +/* Format SWINFO words to be sent to SA */ +static +void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys, + u8 cmdl_present, u8 cmdl_offset, u8 flags, + u8 hash_size, u32 *swinfo) +{ + swinfo[0] = sc_id; + swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK)); + if (likely(cmdl_present)) + swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) << + __ffs(SA_SW0_CMDL_INFO_MASK)); + swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK)); + + swinfo[0] |= SA_SW0_DEST_INFO_PRESENT; + swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL); + swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32); + swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH)); +} + +/* Dump the security context */ +static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) +{ +#ifdef DEBUG + dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, buf, SA_CTX_MAX_SZ, false); +#endif +} + +static +int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, + u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz, + struct algo_data *ad, u8 enc, u32 *swinfo) +{ + int enc_sc_offset = 0; + int auth_sc_offset = 0; + u8 *sc_buf = ctx->sc; + u16 sc_id = ctx->sc_id; + u8 first_engine = 0; + + memzero_explicit(sc_buf, SA_CTX_MAX_SZ); + + if (ad->auth_eng.eng_id) { + if (enc) + first_engine = ad->enc_eng.eng_id; + else + first_engine = ad->auth_eng.eng_id; + + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size; + sc_buf[1] = SA_SCCTL_FE_AUTH_ENC; + if (!ad->hash_size) + return -EINVAL; + ad->hash_size = roundup(ad->hash_size, 8); + + } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) { + enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ; + first_engine = ad->enc_eng.eng_id; + sc_buf[1] = SA_SCCTL_FE_ENC; + ad->hash_size = ad->iv_out_size; + } + + /* SCCTL Owner info: 0=host, 1=CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; + memcpy(&sc_buf[2], &sc_id, 2); + sc_buf[4] = 0x0; + sc_buf[5] = PRIV_ID; + sc_buf[6] = PRIV; + sc_buf[7] = 0x0; + + /* Prepare context for encryption engine */ + if (ad->enc_eng.sc_size) { + if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc, + &sc_buf[enc_sc_offset])) + return -EINVAL; + } + + /* Prepare context for authentication engine */ + if (ad->auth_eng.sc_size) + sa_set_sc_auth(ad, auth_key, auth_key_sz, + &sc_buf[auth_sc_offset]); + + /* Set the ownership of context to CP_ACE */ + sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80; + + /* swizzle the security context */ + sa_swiz_128(sc_buf, SA_CTX_MAX_SZ); + + sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0, + SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo); + + sa_dump_sc(sc_buf, ctx->sc_phys); + + return 0; +} + +/* Free the per direction context memory */ +static void sa_free_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + + bn = ctx->sc_id - data->sc_id_start; + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + if (ctx->sc) { + dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys); + ctx->sc = NULL; + } +} + +static int sa_init_ctx_info(struct sa_ctx_info *ctx, + struct sa_crypto_data *data) +{ + unsigned long bn; + int err; + + spin_lock(&data->scid_lock); + bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX); + __set_bit(bn, data->ctx_bm); + data->sc_id++; + spin_unlock(&data->scid_lock); + + ctx->sc_id = (u16)(data->sc_id_start + bn); + + ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys); + if (!ctx->sc) { + dev_err(&data->pdev->dev, "Failed to allocate SC memory\n"); + err = -ENOMEM; + goto scid_rollback; + } + + return 0; + +scid_rollback: + spin_lock(&data->scid_lock); + __clear_bit(bn, data->ctx_bm); + data->sc_id--; + spin_unlock(&data->scid_lock); + + return err; +} + +static void sa_cipher_cra_exit(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); + + crypto_free_sync_skcipher(ctx->fallback.skcipher); +} + +static int sa_cipher_cra_init(struct crypto_skcipher *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + const char *name = crypto_tfm_alg_name(&tfm->base); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + ctx->dev_data = data; + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + ctx->fallback.skcipher = + crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.skcipher)) { + dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name); + return PTR_ERR(ctx->fallback.skcipher); + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + return 0; +} + +static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen, struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int cmdl_len; + struct sa_cmdl_cfg cfg; + int ret; + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + + memzero_explicit(&cfg, sizeof(cfg)); + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.iv_size = crypto_skcipher_ivsize(tfm); + + crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->fallback.skcipher, + tfm->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen); + if (ret) + return ret; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1, + &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0, + &ctx->dec.epib[1])) + goto badkey; + + cfg.enc_eng_id = ad->enc_eng.eng_id; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->dec.cmdl_size = cmdl_len; + ctx->iv_idx = ad->iv_idx; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_cbc_enc_array[key_idx]; + ad.mci_dec = mci_cbc_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.iv_idx = 4; + ad.iv_out_size = 16; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + int key_idx = (keylen >> 3) - 2; + + if (key_idx >= 3) + return -EINVAL; + + ad.mci_enc = mci_ecb_enc_array[key_idx]; + ad.mci_dec = mci_ecb_dec_array[key_idx]; + ad.inv_key = true; + ad.ealg_id = SA_EALG_ID_AES_ECB; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_cbc_3des_enc_array; + ad.mci_dec = mci_cbc_3des_dec_array; + ad.ealg_id = SA_EALG_ID_3DES_CBC; + ad.iv_idx = 6; + ad.iv_out_size = 8; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.mci_enc = mci_ecb_3des_enc_array; + ad.mci_dec = mci_ecb_3des_dec_array; + + return sa_cipher_setkey(tfm, key, keylen, &ad); +} + +static void sa_aes_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct skcipher_request *req; + int sglen; + u32 *result; + __be32 *mdptr; + size_t ml, pl; + int i; + enum dma_data_direction dir_src; + bool diff_dst; + + req = container_of(rxd->req, struct skcipher_request, base); + sglen = sg_nents_for_len(req->src, req->cryptlen); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + if (req->iv) { + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, + &ml); + result = (u32 *)req->iv; + + for (i = 0; i < (rxd->enc_iv_size / 4); i++) + result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]); + } + + dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(req->dst, req->cryptlen); + + dma_unmap_sg(rxd->ddev, req->dst, sglen, + DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + skcipher_request_complete(req, 0); +} + +static void +sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib) +{ + u32 *out, *in; + int i; + + for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++) + *out++ = *in++; + + mdptr[4] = (0xFFFF << 16); + for (out = &mdptr[5], in = psdata, i = 0; + i < pslen / sizeof(u32); i++) + *out++ = *in++; +} + +static int sa_run(struct sa_req *req) +{ + struct sa_rx_data *rxd; + gfp_t gfp_flags; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev); + struct device *ddev; + struct dma_chan *dma_rx; + int sg_nents, src_nents, dst_nents; + int mapped_src_nents, mapped_dst_nents; + struct scatterlist *src, *dst; + size_t pl, ml, split_size; + struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec; + int ret; + struct dma_async_tx_descriptor *tx_out; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + rxd = kzalloc(sizeof(*rxd), gfp_flags); + if (!rxd) + return -ENOMEM; + + if (req->src != req->dst) { + diff_dst = true; + dir_src = DMA_TO_DEVICE; + } else { + diff_dst = false; + dir_src = DMA_BIDIRECTIONAL; + } + + /* + * SA2UL has an interesting feature where the receive DMA channel + * is selected based on the data passed to the engine. Within the + * transition range, there is also a space where it is impossible + * to determine where the data will end up, and this should be + * avoided. This will be handled by the SW fallback mechanism by + * the individual algorithm implementations. + */ + if (req->size >= 256) + dma_rx = pdata->dma_rx2; + else + dma_rx = pdata->dma_rx1; + + ddev = dma_rx->device->dev; + + memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size); + + sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info); + + if (req->type != CRYPTO_ALG_TYPE_AHASH) { + if (req->enc) + req->type |= + (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT); + else + req->type |= + (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT); + } + + cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type; + + /* + * Map the packets, first we check if the data fits into a single + * sg entry and use that if possible. If it does not fit, we check + * if we need to do sg_split to align the scatterlist data on the + * actual data size being processed by the crypto engine. + */ + src = req->src; + sg_nents = sg_nents_for_len(src, req->size); + + split_size = req->size; + + if (sg_nents == 1 && split_size <= req->src->length) { + src = &rxd->rx_sg; + sg_init_table(src, 1); + sg_set_page(src, sg_page(req->src), split_size, + req->src->offset); + src_nents = 1; + dma_map_sg(ddev, src, sg_nents, dir_src); + } else { + mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents, + dir_src); + ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size, + &src, &src_nents, gfp_flags); + if (ret) { + src_nents = sg_nents; + src = req->src; + } else { + rxd->split_src_sg = src; + } + } + + if (!diff_dst) { + dst_nents = src_nents; + dst = src; + } else { + dst_nents = sg_nents_for_len(req->dst, req->size); + + if (dst_nents == 1 && split_size <= req->dst->length) { + dst = &rxd->tx_sg; + sg_init_table(dst, 1); + sg_set_page(dst, sg_page(req->dst), split_size, + req->dst->offset); + dst_nents = 1; + dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents, + DMA_FROM_DEVICE); + ret = sg_split(req->dst, mapped_dst_nents, 0, 1, + &split_size, &dst, &dst_nents, + gfp_flags); + if (ret) { + dst_nents = dst_nents; + dst = req->dst; + } else { + rxd->split_dst_sg = dst; + } + } + } + + if (unlikely(src_nents != sg_nents)) { + dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n"); + ret = -EIO; + goto err_cleanup; + } + + rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxd->tx_in) { + dev_err(pdata->dev, "IN prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + rxd->req = (void *)req->base; + rxd->enc = req->enc; + rxd->ddev = ddev; + rxd->src = src; + rxd->dst = dst; + rxd->iv_idx = req->ctx->iv_idx; + rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size; + rxd->tx_in->callback = req->callback; + rxd->tx_in->callback_param = rxd; + + tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src, + src_nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!tx_out) { + dev_err(pdata->dev, "OUT prep_slave_sg() failed\n"); + ret = -EINVAL; + goto err_cleanup; + } + + /* + * Prepare metadata for DMA engine. This essentially describes the + * crypto algorithm to be used, data sizes, different keys etc. + */ + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml); + + sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * + sizeof(u32))), cmdl, sizeof(sa_ctx->epib), + sa_ctx->epib); + + ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32)); + dmaengine_desc_set_metadata_len(tx_out, req->mdata_size); + + dmaengine_submit(tx_out); + dmaengine_submit(rxd->tx_in); + + dma_async_issue_pending(dma_rx); + dma_async_issue_pending(pdata->dma_tx); + + return -EINPROGRESS; + +err_cleanup: + dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE); + kfree(rxd->split_src_sg); + + if (req->src != req->dst) { + dst_nents = sg_nents_for_len(req->dst, req->size); + dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + kfree(rxd); + + return ret; +} + +static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc) +{ + struct sa_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct crypto_alg *alg = req->base.tfm->__crt_alg; + struct sa_req sa_req = { 0 }; + int ret; + + if (!req->cryptlen) + return 0; + + if (req->cryptlen % alg->cra_blocksize) + return -EINVAL; + + /* Use SW fallback if the data size is not supported */ + if (req->cryptlen > SA_MAX_DATA_SZ || + (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN && + req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) { + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher); + + skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + if (enc) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + return ret; + } + + sa_req.size = req->cryptlen; + sa_req.enc_size = req->cryptlen; + sa_req.src = req->src; + sa_req.dst = req->dst; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER; + sa_req.enc = enc; + sa_req.callback = sa_aes_dma_in_callback; + sa_req.mdata_size = 44; + sa_req.base = &req->base; + sa_req.ctx = ctx; + + return sa_run(&sa_req); +} + +static int sa_encrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 1); +} + +static int sa_decrypt(struct skcipher_request *req) +{ + return sa_cipher_run(req, req->iv, 0); +} + +static void sa_sha_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct ahash_request *req; + struct crypto_ahash *tfm; + unsigned int authsize; + int i, sg_nents; + size_t ml, pl; + u32 *result; + __be32 *mdptr; + + req = container_of(rxd->req, struct ahash_request, base); + tfm = crypto_ahash_reqtfm(req); + authsize = crypto_ahash_digestsize(tfm); + + mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + result = (u32 *)req->result; + + for (i = 0; i < (authsize / 4); i++) + result[i] = be32_to_cpu(mdptr[i + 4]); + + sg_nents = sg_nents_for_len(req->src, req->nbytes); + dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE); + + kfree(rxd->split_src_sg); + + kfree(rxd); + + ahash_request_complete(req, 0); +} + +static int zero_message_process(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int sa_digest_size = crypto_ahash_digestsize(tfm); + + switch (sa_digest_size) { + case SHA1_DIGEST_SIZE: + memcpy(req->result, sha1_zero_message_hash, sa_digest_size); + break; + case SHA256_DIGEST_SIZE: + memcpy(req->result, sha256_zero_message_hash, sa_digest_size); + break; + case SHA512_DIGEST_SIZE: + memcpy(req->result, sha512_zero_message_hash, sa_digest_size); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sa_sha_run(struct ahash_request *req) +{ + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_req sa_req = { 0 }; + size_t auth_len; + + auth_len = req->nbytes; + + if (!auth_len) + return zero_message_process(req); + + if (auth_len > SA_MAX_DATA_SZ || + (auth_len >= SA_UNSAFE_DATA_SZ_MIN && + auth_len <= SA_UNSAFE_DATA_SZ_MAX)) { + struct ahash_request *subreq = &rctx->fallback_req; + int ret = 0; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + crypto_ahash_init(subreq); + + subreq->nbytes = auth_len; + subreq->src = req->src; + subreq->result = req->result; + + ret |= crypto_ahash_update(subreq); + + subreq->nbytes = 0; + + ret |= crypto_ahash_final(subreq); + + return ret; + } + + sa_req.size = auth_len; + sa_req.auth_size = auth_len; + sa_req.src = req->src; + sa_req.dst = req->src; + sa_req.enc = true; + sa_req.type = CRYPTO_ALG_TYPE_AHASH; + sa_req.callback = sa_sha_dma_in_callback; + sa_req.mdata_size = 28; + sa_req.ctx = ctx; + sa_req.base = &req->base; + + return sa_run(&sa_req); +} + +static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad) +{ + int bs = crypto_shash_blocksize(ctx->shash); + int cmdl_len; + struct sa_cmdl_cfg cfg; + + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + + memset(ctx->authkey, 0, bs); + memset(&cfg, 0, sizeof(cfg)); + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = 0; + cfg.akey = NULL; + cfg.akey_len = 0; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0, + &ctx->enc.epib[1])) + goto badkey; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + goto badkey; + + ctx->enc.cmdl_size = cmdl_len; + + return 0; + +badkey: + dev_err(sa_k3_dev, "%s: badkey\n", __func__); + return -EINVAL; +} + +static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memset(ctx, 0, sizeof(*ctx)); + ctx->dev_data = data; + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + if (alg_base) { + ctx->shash = crypto_alloc_shash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", + alg_base); + return PTR_ERR(ctx->shash); + } + /* for fallback */ + ctx->fallback.ahash = + crypto_alloc_ahash(alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback.ahash)) { + dev_err(ctx->dev_data->dev, + "Could not load fallback driver\n"); + return PTR_ERR(ctx->fallback.ahash); + } + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sa_sha_req_ctx) + + crypto_ahash_reqsize(ctx->fallback.ahash)); + + return 0; +} + +static int sa_sha_digest(struct ahash_request *req) +{ + return sa_sha_run(req); +} + +static int sa_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n", + crypto_ahash_digestsize(tfm), (u64)rctx); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int sa_sha_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int sa_sha_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int sa_sha_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sa_sha_import(struct ahash_request *req, const void *in) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int sa_sha_export(struct ahash_request *req, void *out) +{ + struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct ahash_request *subreq = &rctx->fallback_req; + + ahash_request_set_tfm(subreq, ctx->fallback.ahash); + subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(subreq, out); +} + +static int sa_sha1_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha1"); + + ad.aalg_id = SA_AALG_ID_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha256_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha256"); + + ad.aalg_id = SA_AALG_ID_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static int sa_sha512_cra_init(struct crypto_tfm *tfm) +{ + struct algo_data ad = { 0 }; + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + sa_sha_cra_init_alg(tfm, "sha512"); + + ad.aalg_id = SA_AALG_ID_SHA2_512; + ad.hash_size = SHA512_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512; + + sa_sha_setup(ctx, &ad); + + return 0; +} + +static void sa_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH) + sa_free_ctx_info(&ctx->enc, data); + + crypto_free_shash(ctx->shash); + crypto_free_ahash(ctx->fallback.ahash); +} + +static void sa_aead_dma_in_callback(void *data) +{ + struct sa_rx_data *rxd = (struct sa_rx_data *)data; + struct aead_request *req; + struct crypto_aead *tfm; + unsigned int start; + unsigned int authsize; + u8 auth_tag[SA_MAX_AUTH_TAG_SZ]; + size_t pl, ml; + int i, sglen; + int err = 0; + u16 auth_len; + u32 *mdptr; + bool diff_dst; + enum dma_data_direction dir_src; + + req = container_of(rxd->req, struct aead_request, base); + tfm = crypto_aead_reqtfm(req); + start = req->assoclen + req->cryptlen; + authsize = crypto_aead_authsize(tfm); + + diff_dst = (req->src != req->dst) ? true : false; + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml); + for (i = 0; i < (authsize / 4); i++) + mdptr[i + 4] = swab32(mdptr[i + 4]); + + auth_len = req->assoclen + req->cryptlen; + if (!rxd->enc) + auth_len -= authsize; + + sglen = sg_nents_for_len(rxd->src, auth_len); + dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src); + kfree(rxd->split_src_sg); + + if (diff_dst) { + sglen = sg_nents_for_len(rxd->dst, auth_len); + dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE); + kfree(rxd->split_dst_sg); + } + + if (rxd->enc) { + scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize, + 1); + } else { + start -= authsize; + scatterwalk_map_and_copy(auth_tag, req->src, start, authsize, + 0); + + err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0; + } + + kfree(rxd); + + aead_request_complete(req, err); +} + +static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash, + const char *fallback) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + int ret; + + memzero_explicit(ctx, sizeof(*ctx)); + + ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->shash)) { + dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash); + return PTR_ERR(ctx->shash); + } + + ctx->fallback.aead = crypto_alloc_aead(fallback, 0, + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback.aead)) { + dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n", + fallback); + return PTR_ERR(ctx->fallback.aead); + } + + crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->fallback.aead)); + + ret = sa_init_ctx_info(&ctx->enc, data); + if (ret) + return ret; + + ret = sa_init_ctx_info(&ctx->dec, data); + if (ret) { + sa_free_ctx_info(&ctx->enc, data); + return ret; + } + + dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n", + __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys, + ctx->dec.sc_id, &ctx->dec.sc_phys); + + return ret; +} + +static int sa_cra_init_aead_sha1(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha1", + "authenc(hmac(sha1-ce),cbc(aes-ce))"); +} + +static int sa_cra_init_aead_sha256(struct crypto_aead *tfm) +{ + return sa_cra_init_aead(tfm, "sha256", + "authenc(hmac(sha256-ce),cbc(aes-ce))"); +} + +static void sa_exit_tfm_aead(struct crypto_aead *tfm) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev); + + crypto_free_shash(ctx->shash); + crypto_free_aead(ctx->fallback.aead); + + sa_free_ctx_info(&ctx->enc, data); + sa_free_ctx_info(&ctx->dec, data); +} + +/* AEAD algorithm configuration interface function */ +static int sa_aead_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen, + struct algo_data *ad) +{ + struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_authenc_keys keys; + int cmdl_len; + struct sa_cmdl_cfg cfg; + int key_idx; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + return -EINVAL; + + /* Convert the key size (16/24/32) to the key size index (0/1/2) */ + key_idx = (keys.enckeylen >> 3) - 2; + if (key_idx >= 3) + return -EINVAL; + + ad->ctx = ctx; + ad->enc_eng.eng_id = SA_ENG_ID_EM1; + ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ; + ad->auth_eng.eng_id = SA_ENG_ID_AM1; + ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ; + ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx]; + ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx]; + ad->inv_key = true; + ad->keyed_mac = true; + ad->ealg_id = SA_EALG_ID_AES_CBC; + ad->prep_iopad = sa_prepare_iopads; + + memset(&cfg, 0, sizeof(cfg)); + cfg.enc = true; + cfg.aalg = ad->aalg_id; + cfg.enc_eng_id = ad->enc_eng.eng_id; + cfg.auth_eng_id = ad->auth_eng.eng_id; + cfg.iv_size = crypto_aead_ivsize(authenc); + cfg.akey = keys.authkey; + cfg.akey_len = keys.authkeylen; + + /* Setup Encryption Security Context & Command label template */ + if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 1, &ctx->enc.epib[1])) + return -EINVAL; + + cmdl_len = sa_format_cmdl_gen(&cfg, + (u8 *)ctx->enc.cmdl, + &ctx->enc.cmdl_upd_info); + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->enc.cmdl_size = cmdl_len; + + /* Setup Decryption Security Context & Command label template */ + if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen, + keys.authkey, keys.authkeylen, + ad, 0, &ctx->dec.epib[1])) + return -EINVAL; + + cfg.enc = false; + cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl, + &ctx->dec.cmdl_upd_info); + + if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32))) + return -EINVAL; + + ctx->dec.cmdl_size = cmdl_len; + + crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(ctx->fallback.aead, + crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + crypto_aead_setkey(ctx->fallback.aead, key, keylen); + + return 0; +} + +static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); + + return crypto_aead_setauthsize(ctx->fallback.aead, authsize); +} + +static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA1; + ad.hash_size = SHA1_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc, + const u8 *key, unsigned int keylen) +{ + struct algo_data ad = { 0 }; + + ad.ealg_id = SA_EALG_ID_AES_CBC; + ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256; + ad.hash_size = SHA256_DIGEST_SIZE; + ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256; + + return sa_aead_setkey(authenc, key, keylen, &ad); +} + +static int sa_aead_run(struct aead_request *req, u8 *iv, int enc) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm); + struct sa_req sa_req = { 0 }; + size_t auth_size, enc_size; + + enc_size = req->cryptlen; + auth_size = req->assoclen + req->cryptlen; + + if (!enc) { + enc_size -= crypto_aead_authsize(tfm); + auth_size -= crypto_aead_authsize(tfm); + } + + if (auth_size > SA_MAX_DATA_SZ || + (auth_size >= SA_UNSAFE_DATA_SZ_MIN && + auth_size <= SA_UNSAFE_DATA_SZ_MAX)) { + struct aead_request *subreq = aead_request_ctx(req); + int ret; + + aead_request_set_tfm(subreq, ctx->fallback.aead); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + ret = enc ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); + return ret; + } + + sa_req.enc_offset = req->assoclen; + sa_req.enc_size = enc_size; + sa_req.auth_size = auth_size; + sa_req.size = auth_size; + sa_req.enc_iv = iv; + sa_req.type = CRYPTO_ALG_TYPE_AEAD; + sa_req.enc = enc; + sa_req.callback = sa_aead_dma_in_callback; + sa_req.mdata_size = 52; + sa_req.base = &req->base; + sa_req.ctx = ctx; + sa_req.src = req->src; + sa_req.dst = req->dst; + + return sa_run(&sa_req); +} + +/* AEAD algorithm encrypt interface function */ +static int sa_aead_encrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 1); +} + +/* AEAD algorithm decrypt interface function */ +static int sa_aead_decrypt(struct aead_request *req) +{ + return sa_aead_run(req, req->iv, 0); +} + +static struct sa_alg_tmpl sa_algs[] = { + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sa_aes_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sa_aes_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = sa_3des_cbc_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.skcipher = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-sa2ul", + .base.cra_priority = 30000, + .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct sa_tfm_ctx), + .base.cra_module = THIS_MODULE, + .init = sa_cipher_cra_init, + .exit = sa_cipher_cra_exit, + .min_keysize = 3 * DES_KEY_SIZE, + .max_keysize = 3 * DES_KEY_SIZE, + .setkey = sa_3des_ecb_setkey, + .encrypt = sa_encrypt, + .decrypt = sa_decrypt, + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha1_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha1_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha256_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha256_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .alg.ahash = { + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-sa2ul", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sa_sha512_cra_init, + .cra_exit = sa_sha_cra_exit, + }, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct sa_sha_req_ctx) + + sizeof(struct sha512_state), + .init = sa_sha_init, + .update = sa_sha_update, + .final = sa_sha_final, + .finup = sa_sha_finup, + .digest = sa_sha_digest, + .export = sa_sha_export, + .import = sa_sha_import, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha1, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha1_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, + { + .type = CRYPTO_ALG_TYPE_AEAD, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256),cbc(aes))-sa2ul", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sa_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0, + .cra_priority = 3000, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + + .init = sa_cra_init_aead_sha256, + .exit = sa_exit_tfm_aead, + .setkey = sa_aead_cbc_sha256_setkey, + .setauthsize = sa_aead_setauthsize, + .encrypt = sa_aead_encrypt, + .decrypt = sa_aead_decrypt, + }, + }, +}; + +/* Register the algorithms in crypto framework */ +static void sa_register_algos(const struct device *dev) +{ + char *alg_name; + u32 type; + int i, err; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + alg_name = sa_algs[i].alg.skcipher.base.cra_name; + err = crypto_register_skcipher(&sa_algs[i].alg.skcipher); + } else if (type == CRYPTO_ALG_TYPE_AHASH) { + alg_name = sa_algs[i].alg.ahash.halg.base.cra_name; + err = crypto_register_ahash(&sa_algs[i].alg.ahash); + } else if (type == CRYPTO_ALG_TYPE_AEAD) { + alg_name = sa_algs[i].alg.aead.base.cra_name; + err = crypto_register_aead(&sa_algs[i].alg.aead); + } else { + dev_err(dev, + "un-supported crypto algorithm (%d)", + sa_algs[i].type); + continue; + } + + if (err) + dev_err(dev, "Failed to register '%s'\n", alg_name); + else + sa_algs[i].registered = true; + } +} + +/* Unregister the algorithms in crypto framework */ +static void sa_unregister_algos(const struct device *dev) +{ + u32 type; + int i; + + for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + type = sa_algs[i].type; + if (!sa_algs[i].registered) + continue; + if (type == CRYPTO_ALG_TYPE_SKCIPHER) + crypto_unregister_skcipher(&sa_algs[i].alg.skcipher); + else if (type == CRYPTO_ALG_TYPE_AHASH) + crypto_unregister_ahash(&sa_algs[i].alg.ahash); + else if (type == CRYPTO_ALG_TYPE_AEAD) + crypto_unregister_aead(&sa_algs[i].alg.aead); + + sa_algs[i].registered = false; + } +} + +static int sa_init_mem(struct sa_crypto_data *dev_data) +{ + struct device *dev = &dev_data->pdev->dev; + /* Setup dma pool for security context buffers */ + dev_data->sc_pool = dma_pool_create("keystone-sc", dev, + SA_CTX_MAX_SZ, 64, 0); + if (!dev_data->sc_pool) { + dev_err(dev, "Failed to create dma pool"); + return -ENOMEM; + } + + return 0; +} + +static int sa_dma_init(struct sa_crypto_data *dd) +{ + int ret; + struct dma_slave_config cfg; + + dd->dma_rx1 = NULL; + dd->dma_tx = NULL; + dd->dma_rx2 = NULL; + + ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48)); + if (ret) + return ret; + + dd->dma_rx1 = dma_request_chan(dd->dev, "rx1"); + if (IS_ERR(dd->dma_rx1)) { + if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx1 DMA channel\n"); + return PTR_ERR(dd->dma_rx1); + } + + dd->dma_rx2 = dma_request_chan(dd->dev, "rx2"); + if (IS_ERR(dd->dma_rx2)) { + dma_release_channel(dd->dma_rx1); + if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request rx2 DMA channel\n"); + return PTR_ERR(dd->dma_rx2); + } + + dd->dma_tx = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->dma_tx)) { + if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER) + dev_err(dd->dev, "Unable to request tx DMA channel\n"); + ret = PTR_ERR(dd->dma_tx); + goto err_dma_tx; + } + + memzero_explicit(&cfg, sizeof(cfg)); + + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 4; + cfg.dst_maxburst = 4; + + ret = dmaengine_slave_config(dd->dma_rx1, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_rx2, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", + ret); + return ret; + } + + ret = dmaengine_slave_config(dd->dma_tx, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } + + return 0; + +err_dma_tx: + dma_release_channel(dd->dma_rx1); + dma_release_channel(dd->dma_rx2); + + return ret; +} + +static int sa_link_child(struct device *dev, void *data) +{ + struct device *parent = data; + + device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER); + + return 0; +} + +static int sa_ul_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + static void __iomem *saul_base; + struct sa_crypto_data *dev_data; + u32 val; + int ret; + + dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); + if (!dev_data) + return -ENOMEM; + + sa_k3_dev = dev; + dev_data->dev = dev; + dev_data->pdev = pdev; + platform_set_drvdata(pdev, dev_data); + dev_set_drvdata(sa_k3_dev, dev_data); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, + ret); + return ret; + } + + sa_init_mem(dev_data); + ret = sa_dma_init(dev_data); + if (ret) + goto disable_pm_runtime; + + spin_lock_init(&dev_data->scid_lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + saul_base = devm_ioremap_resource(dev, res); + + dev_data->base = saul_base; + val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; + + writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + + sa_register_algos(dev); + + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + goto release_dma; + + device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child); + + return 0; + +release_dma: + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + +disable_pm_runtime: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int sa_ul_remove(struct platform_device *pdev) +{ + struct sa_crypto_data *dev_data = platform_get_drvdata(pdev); + + sa_unregister_algos(&pdev->dev); + + dma_release_channel(dev_data->dma_rx2); + dma_release_channel(dev_data->dma_rx1); + dma_release_channel(dev_data->dma_tx); + + dma_pool_destroy(dev_data->sc_pool); + + platform_set_drvdata(pdev, NULL); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id of_match[] = { + {.compatible = "ti,j721e-sa2ul",}, + {.compatible = "ti,am654-sa2ul",}, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver sa_ul_driver = { + .probe = sa_ul_probe, + .remove = sa_ul_remove, + .driver = { + .name = "saul-crypto", + .of_match_table = of_match, + }, +}; +module_platform_driver(sa_ul_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h new file mode 100644 index 000000000000..7f7e3fe60d11 --- /dev/null +++ b/drivers/crypto/sa2ul.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * K3 SA2UL crypto accelerator driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Keerthy + * Vitaly Andrianov + * Tero Kristo + */ + +#ifndef _K3_SA2UL_ +#define _K3_SA2UL_ + +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include <linux/hw_random.h> +#include <crypto/aes.h> + +#define SA_ENGINE_ENABLE_CONTROL 0x1000 + +struct sa_tfm_ctx; +/* + * SA_ENGINE_ENABLE_CONTROL register bits + */ +#define SA_EEC_ENCSS_EN 0x00000001 +#define SA_EEC_AUTHSS_EN 0x00000002 +#define SA_EEC_TRNG_EN 0x00000008 +#define SA_EEC_PKA_EN 0x00000010 +#define SA_EEC_CTXCACH_EN 0x00000080 +#define SA_EEC_CPPI_PORT_IN_EN 0x00000200 +#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800 + +/* + * Encoding used to identify the typo of crypto operation + * performed on the packet when the packet is returned + * by SA + */ +#define SA_REQ_SUBTYPE_ENC 0x0001 +#define SA_REQ_SUBTYPE_DEC 0x0002 +#define SA_REQ_SUBTYPE_SHIFT 16 +#define SA_REQ_SUBTYPE_MASK 0xffff + +/* Number of 32 bit words in EPIB */ +#define SA_DMA_NUM_EPIB_WORDS 4 + +/* Number of 32 bit words in PS data */ +#define SA_DMA_NUM_PS_WORDS 16 +#define NKEY_SZ 3 +#define MCI_SZ 27 + +/* + * Maximum number of simultaeneous security contexts + * supported by the driver + */ +#define SA_MAX_NUM_CTX 512 + +/* + * Assumption: CTX size is multiple of 32 + */ +#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \ + ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0) + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +/* Next Engine Select code in CP_ACE */ +#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */ +#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */ +#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */ +#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */ +#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */ + +/* + * Command Label Definitions + */ +#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */ +#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */ +/* 16-bit Length of Data to be processed */ +#define SA_CMDL_OFFSET_DATA_LEN 2 +#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */ +#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */ +#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */ +#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */ +#define SA_CMDL_OFFSET_OPTION_BYTE 8 + +#define SA_CMDL_HEADER_SIZE_BYTES 8 + +#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72 +#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \ + SA_CMDL_OPTION_BYTES_MAX_SIZE) + +/* SWINFO word-0 flags */ +#define SA_SW_INFO_FLAG_EVICT 0x0001 +#define SA_SW_INFO_FLAG_TEAR 0x0002 +#define SA_SW_INFO_FLAG_NOPD 0x0004 + +/* + * This type represents the various packet types to be processed + * by the PHP engine in SA. + * It is used to identify the corresponding PHP processing function. + */ +#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */ +#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */ +/* IPSec Encapsulating Security Payload */ +#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3 +/* Indicates that it is in data mode, It may not be used by PHP */ +#define SA_CTX_PE_PKT_TYPE_NONE 4 +#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */ +#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */ + +#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */ +#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */ +/* Size of security context for PHP engine */ +#define SA_CTX_PHP_PE_CTX_SZ 64 + +#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ) + +/* + * Encoding of F/E control in SCCTL + * Bit 0-1: Fetch PHP Bytes + * Bit 2-3: Fetch Encryption/Air Ciphering Bytes + * Bit 4-5: Fetch Authentication Bytes or Encr pass 2 + * Bit 6-7: Evict PHP Bytes + * + * where 00 = 0 bytes + * 01 = 64 bytes + * 10 = 96 bytes + * 11 = 128 bytes + */ +#define SA_CTX_DMA_SIZE_0 0 +#define SA_CTX_DMA_SIZE_64 1 +#define SA_CTX_DMA_SIZE_96 2 +#define SA_CTX_DMA_SIZE_128 3 + +/* + * Byte offset of the owner word in SCCTL + * in the security context + */ +#define SA_CTX_SCCTL_OWNER_OFFSET 0 + +#define SA_CTX_ENC_KEY_OFFSET 32 +#define SA_CTX_ENC_AUX1_OFFSET 64 +#define SA_CTX_ENC_AUX2_OFFSET 96 +#define SA_CTX_ENC_AUX3_OFFSET 112 +#define SA_CTX_ENC_AUX4_OFFSET 128 + +#define SA_SCCTL_FE_AUTH_ENC 0x65 +#define SA_SCCTL_FE_ENC 0x8D + +#define SA_ALIGN_MASK (sizeof(u32) - 1) +#define SA_ALIGNED __aligned(32) + +#define SA_AUTH_SW_CTRL_MD5 1 +#define SA_AUTH_SW_CTRL_SHA1 2 +#define SA_AUTH_SW_CTRL_SHA224 3 +#define SA_AUTH_SW_CTRL_SHA256 4 +#define SA_AUTH_SW_CTRL_SHA384 5 +#define SA_AUTH_SW_CTRL_SHA512 6 + +/* SA2UL can only handle maximum data size of 64KB */ +#define SA_MAX_DATA_SZ U16_MAX + +/* + * SA2UL can provide unpredictable results with packet sizes that fall + * the following range, so avoid using it. + */ +#define SA_UNSAFE_DATA_SZ_MIN 240 +#define SA_UNSAFE_DATA_SZ_MAX 256 + +/** + * struct sa_crypto_data - Crypto driver instance data + * @base: Base address of the register space + * @pdev: Platform device pointer + * @sc_pool: security context pool + * @dev: Device pointer + * @scid_lock: secure context ID lock + * @sc_id_start: starting index for SC ID + * @sc_id_end: Ending index for SC ID + * @sc_id: Security Context ID + * @ctx_bm: Bitmap to keep track of Security context ID's + * @ctx: SA tfm context pointer + * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes + * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes + * @dma_tx: Pointer to DMA TX channel + */ +struct sa_crypto_data { + void __iomem *base; + struct platform_device *pdev; + struct dma_pool *sc_pool; + struct device *dev; + spinlock_t scid_lock; /* lock for SC-ID allocation */ + /* Security context data */ + u16 sc_id_start; + u16 sc_id_end; + u16 sc_id; + unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX, + BITS_PER_LONG)]; + struct sa_tfm_ctx *ctx; + struct dma_chan *dma_rx1; + struct dma_chan *dma_rx2; + struct dma_chan *dma_tx; +}; + +/** + * struct sa_cmdl_param_info: Command label parameters info + * @index: Index of the parameter in the command label format + * @offset: the offset of the parameter + * @size: Size of the parameter + */ +struct sa_cmdl_param_info { + u16 index; + u16 offset; + u16 size; +}; + +/* Maximum length of Auxiliary data in 32bit words */ +#define SA_MAX_AUX_DATA_WORDS 8 + +/** + * struct sa_cmdl_upd_info: Command label updation info + * @flags: flags in command label + * @submode: Encryption submodes + * @enc_size: Size of first pass encryption size + * @enc_size2: Size of second pass encryption size + * @enc_offset: Encryption payload offset in the packet + * @enc_iv: Encryption initialization vector for pass2 + * @enc_iv2: Encryption initialization vector for pass2 + * @aad: Associated data + * @payload: Payload info + * @auth_size: Authentication size for pass 1 + * @auth_size2: Authentication size for pass 2 + * @auth_offset: Authentication payload offset + * @auth_iv: Authentication initialization vector + * @aux_key_info: Authentication aux key information + * @aux_key: Aux key for authentication + */ +struct sa_cmdl_upd_info { + u16 flags; + u16 submode; + struct sa_cmdl_param_info enc_size; + struct sa_cmdl_param_info enc_size2; + struct sa_cmdl_param_info enc_offset; + struct sa_cmdl_param_info enc_iv; + struct sa_cmdl_param_info enc_iv2; + struct sa_cmdl_param_info aad; + struct sa_cmdl_param_info payload; + struct sa_cmdl_param_info auth_size; + struct sa_cmdl_param_info auth_size2; + struct sa_cmdl_param_info auth_offset; + struct sa_cmdl_param_info auth_iv; + struct sa_cmdl_param_info aux_key_info; + u32 aux_key[SA_MAX_AUX_DATA_WORDS]; +}; + +/* + * Number of 32bit words appended after the command label + * in PSDATA to identify the crypto request context. + * word-0: Request type + * word-1: pointer to request + */ +#define SA_PSDATA_CTX_WORDS 4 + +/* Maximum size of Command label in 32 words */ +#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS) + +/** + * struct sa_ctx_info: SA context information + * @sc: Pointer to security context + * @sc_phys: Security context physical address that is passed on to SA2UL + * @sc_id: Security context ID + * @cmdl_size: Command label size + * @cmdl: Command label for a particular iteration + * @cmdl_upd_info: structure holding command label updation info + * @epib: Extended protocol information block words + */ +struct sa_ctx_info { + u8 *sc; + dma_addr_t sc_phys; + u16 sc_id; + u16 cmdl_size; + u32 cmdl[SA_MAX_CMDL_WORDS]; + struct sa_cmdl_upd_info cmdl_upd_info; + /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */ + u32 epib[SA_DMA_NUM_EPIB_WORDS]; +}; + +/** + * struct sa_tfm_ctx: TFM context structure + * @dev_data: struct sa_crypto_data pointer + * @enc: struct sa_ctx_info for encryption + * @dec: struct sa_ctx_info for decryption + * @keylen: encrption/decryption keylength + * @iv_idx: Initialization vector index + * @key: encryption key + * @fallback: SW fallback algorithm + */ +struct sa_tfm_ctx { + struct sa_crypto_data *dev_data; + struct sa_ctx_info enc; + struct sa_ctx_info dec; + struct sa_ctx_info auth; + int keylen; + int iv_idx; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u8 authkey[SHA512_BLOCK_SIZE]; + struct crypto_shash *shash; + /* for fallback */ + union { + struct crypto_sync_skcipher *skcipher; + struct crypto_ahash *ahash; + struct crypto_aead *aead; + } fallback; +}; + +/** + * struct sa_sha_req_ctx: Structure used for sha request + * @dev_data: struct sa_crypto_data pointer + * @cmdl: Complete command label with psdata and epib included + * @fallback_req: SW fallback request container + */ +struct sa_sha_req_ctx { + struct sa_crypto_data *dev_data; + u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS]; + struct ahash_request fallback_req; +}; + +enum sa_submode { + SA_MODE_GEN = 0, + SA_MODE_CCM, + SA_MODE_GCM, + SA_MODE_GMAC +}; + +/* Encryption algorithms */ +enum sa_ealg_id { + SA_EALG_ID_NONE = 0, /* No encryption */ + SA_EALG_ID_NULL, /* NULL encryption */ + SA_EALG_ID_AES_CTR, /* AES Counter mode */ + SA_EALG_ID_AES_F8, /* AES F8 mode */ + SA_EALG_ID_AES_CBC, /* AES CBC mode */ + SA_EALG_ID_DES_CBC, /* DES CBC mode */ + SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */ + SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */ + SA_EALG_ID_GCM, /* Galois Counter mode */ + SA_EALG_ID_AES_ECB, + SA_EALG_ID_LAST +}; + +/* Authentication algorithms */ +enum sa_aalg_id { + SA_AALG_ID_NONE = 0, /* No Authentication */ + SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */ + SA_AALG_ID_MD5, /* MD5 mode */ + SA_AALG_ID_SHA1, /* SHA1 mode */ + SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */ + SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */ + SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */ + SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */ + SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */ + SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */ + SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */ + SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */ + SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */ + SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */ + SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */ +}; + +/* + * Mode control engine algorithms used to index the + * mode control instruction tables + */ +enum sa_eng_algo_id { + SA_ENG_ALGO_ECB = 0, + SA_ENG_ALGO_CBC, + SA_ENG_ALGO_CFB, + SA_ENG_ALGO_OFB, + SA_ENG_ALGO_CTR, + SA_ENG_ALGO_F8, + SA_ENG_ALGO_F8F9, + SA_ENG_ALGO_GCM, + SA_ENG_ALGO_GMAC, + SA_ENG_ALGO_CCM, + SA_ENG_ALGO_CMAC, + SA_ENG_ALGO_CBCMAC, + SA_NUM_ENG_ALGOS +}; + +/** + * struct sa_eng_info: Security accelerator engine info + * @eng_id: Engine ID + * @sc_size: security context size + */ +struct sa_eng_info { + u8 eng_id; + u16 sc_size; +}; + +#endif /* _K3_SA2UL_ */ diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 466e30bd529c..0c8cb23ae708 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -146,11 +146,12 @@ struct sahara_ctx { /* AES-specific context */ int keylen; u8 key[AES_KEYSIZE_128]; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct sahara_aes_reqctx { unsigned long mode; + struct skcipher_request fallback_req; // keep at the end }; /* @@ -617,10 +618,10 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, /* * The requested key size is not supported by HW, do a fallback. */ - crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); + return crypto_skcipher_setkey(ctx->fallback, key, keylen); } static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) @@ -651,21 +652,19 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_ecb_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT); @@ -673,21 +672,19 @@ static int sahara_aes_ecb_encrypt(struct skcipher_request *req) static int sahara_aes_ecb_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, 0); @@ -695,21 +692,19 @@ static int sahara_aes_ecb_decrypt(struct skcipher_request *req) static int sahara_aes_cbc_encrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_encrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_encrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); @@ -717,21 +712,19 @@ static int sahara_aes_cbc_encrypt(struct skcipher_request *req) static int sahara_aes_cbc_decrypt(struct skcipher_request *req) { + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_sync_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - err = crypto_skcipher_decrypt(subreq); - skcipher_request_zero(subreq); - return err; + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + return crypto_skcipher_decrypt(&rctx->fallback_req); } return sahara_aes_crypt(req, FLAGS_CBC); @@ -742,14 +735,15 @@ static int sahara_aes_init_tfm(struct crypto_skcipher *tfm) const char *name = crypto_tfm_alg_name(&tfm->base); struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->fallback = crypto_alloc_sync_skcipher(name, 0, + ctx->fallback = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } - crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) + + crypto_skcipher_reqsize(ctx->fallback)); return 0; } @@ -758,7 +752,7 @@ static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm) { struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } static u32 sahara_sha_init_hdr(struct sahara_dev *dev, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c6db7f698c4..7c547352a862 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2264,7 +2264,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2285,7 +2286,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2306,7 +2308,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2330,7 +2333,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha1-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -2352,7 +2356,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2373,7 +2378,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2394,7 +2400,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2418,7 +2425,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha224-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -2440,7 +2448,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2461,7 +2470,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2482,7 +2492,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2506,7 +2517,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha256-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -2528,7 +2540,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2549,7 +2562,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha384-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -2571,7 +2585,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2592,7 +2607,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-sha512-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -2614,7 +2630,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2635,7 +2652,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2655,7 +2673,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2678,7 +2697,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_driver_name = "authenc-hmac-md5-" "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, }, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -2699,7 +2719,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = skcipher_aes_setkey, @@ -2712,7 +2733,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-talitos", .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2727,7 +2749,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-talitos", .base.cra_blocksize = 1, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, @@ -2742,7 +2765,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = skcipher_des_setkey, @@ -2755,7 +2779,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-talitos", .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -2770,7 +2795,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = skcipher_des3_setkey, @@ -2784,7 +2810,8 @@ static struct talitos_alg_template driver_algs[] = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-3des-talitos", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, @@ -2804,7 +2831,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "md5", .cra_driver_name = "md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2819,7 +2847,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha1", .cra_driver_name = "sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2834,7 +2863,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha224", .cra_driver_name = "sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2849,7 +2879,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha256", .cra_driver_name = "sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2864,7 +2895,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha384", .cra_driver_name = "sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2879,7 +2911,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "sha512", .cra_driver_name = "sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2894,7 +2927,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(md5)", .cra_driver_name = "hmac-md5-talitos", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2909,7 +2943,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-talitos", .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2924,7 +2959,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-talitos", .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2939,7 +2975,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-talitos", .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2954,7 +2991,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-talitos", .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2969,7 +3007,8 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-talitos", .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index c24f2db8d5e8..a5ee8c2fb4e0 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -545,7 +545,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) * * Initialize structures. */ -static int hash_init(struct ahash_request *req) +static int ux500_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_ctx *ctx = crypto_ahash_ctx(tfm); @@ -1359,7 +1359,7 @@ static int ahash_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha256_init(struct ahash_request *req) @@ -1372,7 +1372,7 @@ static int ahash_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HASH; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int ahash_sha1_digest(struct ahash_request *req) @@ -1425,7 +1425,7 @@ static int hmac_sha1_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA1_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha256_init(struct ahash_request *req) @@ -1438,7 +1438,7 @@ static int hmac_sha256_init(struct ahash_request *req) ctx->config.oper_mode = HASH_OPER_MODE_HMAC; ctx->digestsize = SHA256_DIGEST_SIZE; - return hash_init(req); + return ux500_hash_init(req); } static int hmac_sha1_digest(struct ahash_request *req) @@ -1515,7 +1515,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha1_digest, @@ -1538,7 +1538,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HASH, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = ahash_sha256_digest, @@ -1561,7 +1561,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA1, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha1_digest, @@ -1585,7 +1585,7 @@ static struct hash_algo_template hash_algs[] = { .conf.algorithm = HASH_ALGO_SHA256, .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, + .init = ux500_hash_init, .update = ahash_update, .final = ahash_final, .digest = hmac_sha256_digest, diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index cb8a6ea2a4bc..b2601958282e 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -597,7 +597,8 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "virtio_crypto_aes_cbc", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx), .base.cra_module = THIS_MODULE, diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index c8a962c62663..77e744eaedd0 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -498,11 +498,11 @@ free_vqs: } #endif -static unsigned int features[] = { +static const unsigned int features[] = { /* none */ }; -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, { 0 }, }; diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index cd11558893cd..27079354dbe9 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -364,6 +364,7 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = { .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = ZYNQMP_AES_BLK_SIZE, |