summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 17:38:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 17:38:39 -0700
commitd4c90396ed7ef9b4e4d221e008e54be8bea8307f (patch)
tree5611f1f27eec16edfeb6a3fd73a8ef7dbfd037b4 /drivers/crypto
parentf36fc04e4cdda9e4c72ee504e7dc638f9a168863 (diff)
parentbf433416e67597ba105ece55b3136557874945db (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.3: API: - the AEAD interface transition is now complete. - add top-level skcipher interface. Drivers: - x86-64 acceleration for chacha20/poly1305. - add sunxi-ss Allwinner Security System crypto accelerator. - add RSA algorithm to qat driver. - add SRIOV support to qat driver. - add LS1021A support to caam. - add i.MX6 support to caam" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits) crypto: algif_aead - fix for multiple operations on AF_ALG sockets crypto: qat - enable legacy VFs MPI: Fix mpi_read_buffer crypto: qat - silence a static checker warning crypto: vmx - Fixing opcode issue crypto: caam - Use the preferred style for memory allocations crypto: caam - Propagate the real error code in caam_probe crypto: caam - Fix the error handling in caam_probe crypto: caam - fix writing to JQCR_MS when using service interface crypto: hash - Add AHASH_REQUEST_ON_STACK crypto: testmgr - Use new skcipher interface crypto: skcipher - Add top-level skcipher interface crypto: cmac - allow usage in FIPS mode crypto: sahara - Use dmam_alloc_coherent crypto: caam - Add support for LS1021A crypto: qat - Don't move data inside output buffer crypto: vmx - Fixing GHASH Key issue on little endian crypto: vmx - Fixing AES-CTR counter bug crypto: null - Add missing Kconfig tristate for NULL2 crypto: nx - Add forward declaration for struct crypto_aead ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/caam/Kconfig10
-rw-r--r--drivers/crypto/caam/caamalg.c2701
-rw-r--r--drivers/crypto/caam/caamhash.c69
-rw-r--r--drivers/crypto/caam/caamrng.c26
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c154
-rw-r--r--drivers/crypto/caam/desc.h23
-rw-r--r--drivers/crypto/caam/desc_constr.h2
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c30
-rw-r--r--drivers/crypto/caam/regs.h64
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h25
-rw-r--r--drivers/crypto/ccp/ccp-platform.c2
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c312
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/nx/Kconfig17
-rw-r--r--drivers/crypto/nx/Makefile8
-rw-r--r--drivers/crypto/nx/nx-842-crypto.c580
-rw-r--r--drivers/crypto/nx/nx-842-platform.c84
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c42
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c139
-rw-r--r--drivers/crypto/nx/nx-842.c554
-rw-r--r--drivers/crypto/nx/nx-842.h65
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c151
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c21
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c64
-rw-r--r--drivers/crypto/nx/nx.c30
-rw-r--r--drivers/crypto/nx/nx.h9
-rw-r--r--drivers/crypto/omap-aes.c86
-rw-r--r--drivers/crypto/picoxcell_crypto.c677
-rw-r--r--drivers/crypto/qat/Kconfig15
-rw-r--r--drivers/crypto/qat/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/.gitignore1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h46
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c290
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c9
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h53
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c286
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c)37
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c104
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c438
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h146
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c309
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c13
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h5
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_pke.h112
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c338
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c652
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c26
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c14
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsakey.asn15
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c27
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile5
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c145
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c38
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h12
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c97
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.h9
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c139
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/Makefile5
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c172
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h68
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c393
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.h (renamed from drivers/crypto/qat/qat_dh895xcc/qat_admin.c)70
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_isr.c258
-rw-r--r--drivers/crypto/sahara.c46
-rw-r--r--drivers/crypto/sunxi-ss/Makefile2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c542
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c425
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c492
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h201
-rw-r--r--drivers/crypto/talitos.c618
-rw-r--r--drivers/crypto/talitos.h8
-rw-r--r--drivers/crypto/vmx/aes.c3
-rw-r--r--drivers/crypto/vmx/aes_cbc.c3
-rw-r--r--drivers/crypto/vmx/aes_ctr.c11
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl34
-rw-r--r--drivers/crypto/vmx/ghash.c4
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl6
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl1
91 files changed, 8753 insertions, 4023 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4044125fb5d5..07bc7aa6b224 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
hashing algorithms.
+config CRYPTO_DEV_SUN4I_SS
+ tristate "Support for Allwinner Security System cryptographic accelerator"
+ depends on ARCH_SUNXI
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_BLKCIPHER
+ help
+ Some Allwinner SoC have a crypto accelerator named
+ Security System. Select this if you want to use it.
+ The Security System handle AES/DES/3DES ciphers in CBC mode
+ and SHA1 and MD5 hash algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i-ss.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index e35c07a8da85..c3ced6fbd1b8 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 3b28e8c3de28..192a8fa325c1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (core_dev->dev->ce_base == 0)
+ if (!core_dev->dev->ce_base)
return 0;
writel(PPC4XX_INTERRUPT_CLR,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e286e285aa8a..5652a53415dc 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_MXC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+config CRYPTO_DEV_FSL_CAAM_IMX
+ def_bool SOC_IMX6 || SOC_IMX7D
+ depends on CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_LE
+ def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
+ depends on CRYPTO_DEV_FSL_CAAM
+
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index daca933a82ec..ba79d638f782 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -68,27 +68,29 @@
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 5)
/* length of descriptors text */
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
@@ -111,6 +113,20 @@
#endif
static struct list_head alg_list;
+struct caam_alg_entry {
+ int class1_alg_type;
+ int class2_alg_type;
+ int alg_op;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
{
@@ -145,18 +161,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
}
/*
- * For aead encrypt and decrypt, read iv for both classes
- */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
-{
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ivoffset << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
-}
-
-/*
* For ablkcipher encrypt and decrypt, read from req->src and
* write to req->dst
*/
@@ -170,13 +174,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/*
- * If all data, including src (with assoc and iv) or dst (with iv only) are
- * contiguous
- */
-#define GIV_SRC_CONTIG 1
-#define GIV_DST_CONTIG (1 << 1)
-
-/*
* per-session context
*/
struct caam_ctx {
@@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
- unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
@@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_encrypt shared descriptor */
+ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
KEY_DEST_MDHA_SPLIT | KEY_ENC);
set_jump_tgt_here(desc, key_jump_cmd);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /*
- * NULL encryption; IV is zero
- * assoclen = (assoclen + cryptlen) - cryptlen
- */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Prepare to read and write cryptlen bytes */
+ /* Prepare to read and write cryptlen + assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
- /* old_aead_decrypt shared descriptor */
+ /* aead_decrypt shared descriptor */
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
@@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + ivsize);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
+ /* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Prepare to read and write cryptlen bytes */
+ /* Prepare to read and write cryptlen + assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
- const char *alg_name = crypto_tfm_alg_name(ctfm);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline;
u32 geniv, moveiv;
@@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
u32 *desc;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
-
- if (!ctx->authsize)
- return 0;
+ const bool is_rfc3686 = alg->caam.rfc3686;
/* NULL encryption / decryption */
if (!ctx->enckeylen)
@@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (is_rfc3686)
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ if (alg->caam.geniv)
+ goto skip_enc;
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_encrypt shared descriptor */
+ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
@@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* assoclen + cryptlen = seqinlen - ivsize */
- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
- aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
+ FIFOLDST_VLF);
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
/* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
/* Write ICV */
@@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_decrypt shared descriptor */
+ /* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
/* Note: Context registers are saved. */
@@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + ivsize);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
-
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_dec_op1(desc, ctx->class1_alg_type);
/* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
/* Load ICV */
@@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
@@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+ if (is_rfc3686)
+ goto copy_iv;
+
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
@@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
(ivsize << MOVE_LEN_SHIFT));
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+copy_iv:
/* Copy IV to class 1 context */
append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
(ctx1_iv_off << MOVE_OFFSET_SHIFT) |
@@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* ivsize + cryptlen = seqoutlen - authsize */
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
- /* assoclen = seqinlen - (ivsize + cryptlen) */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
@@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
@@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+skip_givenc:
return 0;
}
@@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
/* Write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Store payload data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void old_aead_unmap(struct device *dev,
- struct aead_edesc *edesc,
- struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
-
- dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
- DMA_TO_DEVICE, edesc->assoc_chained);
-
- caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->src_chained, edesc->dst_nents,
- edesc->dst_chained, edesc->iv_dma, ivsize,
- edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
-}
-
static void ablkcipher_unmap(struct device *dev,
struct ablkcipher_edesc *edesc,
struct ablkcipher_request *req)
@@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
-#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
-
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
- edesc = (struct aead_edesc *)((char *)desc -
- offsetof(struct aead_edesc, hw_desc));
-
- if (err)
- caam_jr_strstatus(jrdev, err);
-
- old_aead_unmap(jrdev, edesc, req);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
- edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->cryptlen +
- ctx->authsize + 4, 1);
-#endif
-
- kfree(edesc);
-
- aead_request_complete(req, err);
-}
-
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
-#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
-
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
- edesc = (struct aead_edesc *)((char *)desc -
- offsetof(struct aead_edesc, hw_desc));
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen - ctx->authsize, 1);
-#endif
-
- if (err)
- caam_jr_strstatus(jrdev, err);
-
- old_aead_unmap(jrdev, edesc, req);
-
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
- err = -EBADMSG;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + req->assoclen +
- ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
- ctx->authsize + 36, 1);
- if (!err && edesc->sec4_sg_bytes) {
- struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
- print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
- sg->length + ctx->authsize + 16, 1);
- }
-#endif
-
- kfree(edesc);
-
- aead_request_complete(req, err);
-}
-
static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -2035,91 +1921,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
/*
* Fill in aead job descriptor
*/
-static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
- struct aead_edesc *edesc,
- struct aead_request *req,
- bool all_contig, bool encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
- int authsize = ctx->authsize;
- u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
- bool is_gcm = false;
-
-#ifdef DEBUG
- debug("assoclen %d cryptlen %d authsize %d\n",
- req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
- desc_bytes(sh_desc), 1);
-#endif
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (all_contig) {
- if (is_gcm)
- src_dma = edesc->iv_dma;
- else
- src_dma = sg_dma_address(req->assoc);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
- (edesc->src_nents ? : 1);
- in_options = LDST_SGF;
- }
-
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
- in_options);
-
- if (likely(req->src == req->dst)) {
- if (all_contig) {
- dst_dma = sg_dma_address(req->src);
- } else {
- dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- ((edesc->assoc_nents ? : 1) + 1);
- out_options = LDST_SGF;
- }
- } else {
- if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- }
- if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
- out_options);
- else
- append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
- out_options);
-}
-
-/*
- * Fill in aead job descriptor
- */
static void init_aead_job(struct aead_request *req,
struct aead_edesc *edesc,
bool all_contig, bool encrypt)
@@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req,
/* End of blank commands */
}
-/*
- * Fill in aead givencrypt job descriptor
- */
-static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct aead_edesc *edesc,
- struct aead_request *req,
- int contig)
+static void init_authenc_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ struct caam_aead_alg, aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
- int authsize = ctx->authsize;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
- bool is_gcm = false;
-
-#ifdef DEBUG
- debug("assoclen %d cryptlen %d authsize %d\n",
- req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
- desc_bytes(sh_desc), 1);
-#endif
+ u32 ivoffset = 0;
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ivoffset = 16;
- if (contig & GIV_SRC_CONTIG) {
- if (is_gcm)
- src_dma = edesc->iv_dma;
- else
- src_dma = sg_dma_address(req->assoc);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
- in_options);
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686)
+ ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
- if (contig & GIV_DST_CONTIG) {
- dst_dma = edesc->iv_dma;
- } else {
- if (likely(req->src == req->dst)) {
- dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- (edesc->assoc_nents +
- (is_gcm ? 1 + edesc->src_nents : 0));
- out_options = LDST_SGF;
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- }
+ init_aead_job(req, edesc, all_contig, encrypt);
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
- out_options);
+ if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+ append_load_as_imm(desc, req->iv, ivsize,
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ivoffset << LDST_OFFSET_SHIFT));
}
/*
@@ -2392,150 +2156,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
/*
* allocate and map the aead extended descriptor
*/
-static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
- int desc_bytes,
- bool *all_contig_ptr,
- bool encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0;
- struct aead_edesc *edesc;
- dma_addr_t iv_dma = 0;
- int sgc;
- bool all_contig = true;
- bool assoc_chained = false, src_chained = false, dst_chained = false;
- int ivsize = crypto_aead_ivsize(aead);
- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- unsigned int authsize = ctx->authsize;
- bool is_gcm = false;
-
- assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-
- if (unlikely(req->dst != req->src)) {
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- dst_nents = sg_count(req->dst,
- req->cryptlen +
- (encrypt ? authsize : (-authsize)),
- &dst_chained);
- } else {
- src_nents = sg_count(req->src,
- req->cryptlen +
- (encrypt ? authsize : 0),
- &src_chained);
- }
-
- sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
- if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
- } else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
- }
-
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- return ERR_PTR(-ENOMEM);
- }
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- /*
- * Check if data are contiguous.
- * GCM expected input sequence: IV, AAD, text
- * All other - expected input sequence: AAD, IV, text
- */
- if (is_gcm)
- all_contig = (!assoc_nents &&
- iv_dma + ivsize == sg_dma_address(req->assoc) &&
- !src_nents && sg_dma_address(req->assoc) +
- req->assoclen == sg_dma_address(req->src));
- else
- all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
- req->assoclen == iv_dma && !src_nents &&
- iv_dma + ivsize == sg_dma_address(req->src));
- if (!all_contig) {
- assoc_nents = assoc_nents ? : 1;
- src_nents = src_nents ? : 1;
- sec4_sg_len = assoc_nents + 1 + src_nents;
- }
-
- sec4_sg_len += dst_nents;
-
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->assoc_nents = assoc_nents;
- edesc->assoc_chained = assoc_chained;
- edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
- edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
- edesc->iv_dma = iv_dma;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- *all_contig_ptr = all_contig;
-
- sec4_sg_index = 0;
- if (!all_contig) {
- if (!is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
-
- if (is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- sg_to_sec4_sg_last(req->src,
- src_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += src_nents;
- }
- if (dst_nents) {
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return ERR_PTR(-ENOMEM);
- }
-
- return edesc;
-}
-
-/*
- * allocate and map the aead extended descriptor
- */
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int desc_bytes, bool *all_contig_ptr,
bool encrypt)
@@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
}
-static int old_aead_encrypt(struct aead_request *req)
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, true);
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+ &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
- old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
- all_contig, true);
+ init_authenc_job(req, edesc, all_contig, true);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- old_aead_unmap(jrdev, edesc, req);
+ aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req)
return ret;
}
-static int old_aead_decrypt(struct aead_request *req)
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, false);
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+ &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->cryptlen, 1);
+ req->assoclen + req->cryptlen, 1);
#endif
/* Create and submit job descriptor*/
- old_init_aead_job(ctx->sh_desc_dec,
- ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+ init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,232 +2423,29 @@ static int old_aead_decrypt(struct aead_request *req)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- old_aead_unmap(jrdev, edesc, req);
+ aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the aead extended descriptor for aead givencrypt
- */
-static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
- *greq, int desc_bytes,
- u32 *contig_ptr)
-{
- struct aead_request *req = &greq->areq;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0;
- struct aead_edesc *edesc;
- dma_addr_t iv_dma = 0;
- int sgc;
- u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
- int ivsize = crypto_aead_ivsize(aead);
- bool assoc_chained = false, src_chained = false, dst_chained = false;
- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- bool is_gcm = false;
-
- assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
- &dst_chained);
-
- sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
- if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
- } else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
- }
-
- iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- return ERR_PTR(-ENOMEM);
- }
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- /*
- * Check if data are contiguous.
- * GCM expected input sequence: IV, AAD, text
- * All other - expected input sequence: AAD, IV, text
- */
-
- if (is_gcm) {
- if (assoc_nents || iv_dma + ivsize !=
- sg_dma_address(req->assoc) || src_nents ||
- sg_dma_address(req->assoc) + req->assoclen !=
- sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
- } else {
- if (assoc_nents ||
- sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
- src_nents || iv_dma + ivsize != sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
- }
-
- if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
- contig &= ~GIV_DST_CONTIG;
-
- if (!(contig & GIV_SRC_CONTIG)) {
- assoc_nents = assoc_nents ? : 1;
- src_nents = src_nents ? : 1;
- sec4_sg_len += assoc_nents + 1 + src_nents;
- if (req->src == req->dst &&
- (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
- contig &= ~GIV_DST_CONTIG;
- }
-
- /*
- * Add new sg entries for GCM output sequence.
- * Expected output sequence: IV, encrypted text.
- */
- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
- sec4_sg_len += 1 + src_nents;
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = dst_nents ? : 1;
- sec4_sg_len += 1 + dst_nents;
- }
-
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->assoc_nents = assoc_nents;
- edesc->assoc_chained = assoc_chained;
- edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
- edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
- edesc->iv_dma = iv_dma;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- *contig_ptr = contig;
-
- sec4_sg_index = 0;
- if (!(contig & GIV_SRC_CONTIG)) {
- if (!is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
-
- if (is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += src_nents;
- }
-
- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
-
- if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return ERR_PTR(-ENOMEM);
- }
-
- return edesc;
-}
-
-static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
+static int aead_givdecrypt(struct aead_request *req)
{
- struct aead_request *req = &areq->areq;
- struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 contig;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &contig);
-
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->cryptlen, 1);
-#endif
+ unsigned int ivsize = crypto_aead_ivsize(aead);
- /* Create and submit job descriptor*/
- init_aead_giv_job(ctx->sh_desc_givenc,
- ctx->sh_desc_givenc_dma, edesc, req, contig);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- old_aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ req->cryptlen -= ivsize;
+ req->assoclen += ivsize;
- return ret;
-}
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
- return old_aead_encrypt(&areq->areq);
+ return aead_decrypt(req);
}
/*
@@ -3072,8 +2503,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(*edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3347,7 +2778,6 @@ struct caam_alg_template {
u32 type;
union {
struct ablkcipher_alg ablkcipher;
- struct old_aead_alg aead;
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
@@ -3355,753 +2785,1426 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
/* single-pass ipsec_esp descriptor */
{
- .name = "authenc(hmac(md5),ecb(cipher_null))",
- .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha1),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha224),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha256),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha384),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha512),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(aes))",
- .driver_name = "authenc-hmac-md5-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),cbc(aes))",
- .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(aes))",
- .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(aes))",
- .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(aes))",
- .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
-
{
- .name = "authenc(hmac(sha512),cbc(aes))",
- .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(des3_ede))",
- .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ }
},
{
- .name = "authenc(hmac(sha1),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(des))",
- .driver_name = "authenc-hmac-md5-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),cbc(des))",
- .driver_name = "authenc-hmac-sha1-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(des))",
- .driver_name = "authenc-hmac-sha224-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(des))",
- .driver_name = "authenc-hmac-sha256-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(des))",
- .driver_name = "authenc-hmac-sha384-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),cbc(des))",
- .driver_name = "authenc-hmac-sha512-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
- /* ablkcipher descriptor */
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- },
- {
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- }
-};
-
-struct caam_alg_entry {
- int class1_alg_type;
- int class2_alg_type;
- int alg_op;
-};
-
-struct caam_aead_alg {
- struct aead_alg aead;
- struct caam_alg_entry caam;
- bool registered;
-};
-
-static struct caam_aead_alg driver_aeads[] = {
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
{
.aead = {
.base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aes-caam",
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = rfc4106_setkey,
- .setauthsize = rfc4106_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
},
},
{
.aead = {
.base = {
- .cra_name = "rfc4543(gcm(aes))",
- .cra_driver_name = "rfc4543-gcm-aes-caam",
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = rfc4543_setkey,
- .setauthsize = rfc4543_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
},
},
- /* Galois Counter Mode */
{
.aead = {
.base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-caam",
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = gcm_setkey,
- .setauthsize = gcm_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 12,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
},
},
};
@@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
struct caam_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
break;
- case CRYPTO_ALG_TYPE_AEAD:
- alg->cra_type = &crypto_aead_type;
- alg->cra_aead = template->template_aead;
- break;
}
t_alg->caam.class1_alg_type = template->class1_alg_type;
@@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
+ struct caam_drv_private *priv;
int i = 0, err = 0;
+ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void)
INIT_LIST_HEAD(&alg_list);
- /* register crypto algorithms the device supports */
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ /* If MD is present, limit digest size based on LP256 */
+ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ md_limit = SHA256_DIGEST_SIZE;
+
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ struct caam_alg_template *alg = driver_algs + i;
+ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
- t_alg = caam_alg_alloc(&driver_algs[i]);
+ t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
@@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if (alg_aai == OP_ALG_AAI_GCM)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+ if (c2_alg_sel &&
+ (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+ continue;
caam_aead_alg_init(t_alg);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index f9c78751989e..94433b9fc200 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -127,7 +127,7 @@ struct caam_hash_state {
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_1;
- u8 caam_ctx[MAX_CTX_LEN];
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
- *buflen, last_buflen);
+ *next_buflen, *buflen);
if (src_nents) {
src_map_to_sec4_sg(jrdev, req->src, src_nents,
@@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
- DESC_JOB_IO_LEN, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template,
struct ahash_alg *halg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
int i = 0, err = 0;
+ struct caam_drv_private *priv;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ u32 cha_inst, cha_vid;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void)
if (!priv)
return -ENODEV;
+ /*
+ * Register crypto algorithms the device supports. First, identify
+ * presence and attributes of MD block.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+ return -ENODEV;
+
+ /* Limit digest size based on LP256 */
+ if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
- /* TODO: check if h/w supports alg */
struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* If MD size is not supported by device, skip registration */
+ if (alg->template_ahash.halg.digestsize > md_limit)
+ continue;
/* register hmac version */
- t_alg = caam_hash_alloc(&driver_hash[i], true);
+ t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
@@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(&driver_hash[i], false);
+ t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 5095337205b8..9b92af2c7241 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
atomic_set(&bd->empty, BUF_NOT_EMPTY);
complete(&bd->filled);
+
+ /* Buffer refilled, invalidate cache */
+ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
@@ -311,7 +315,7 @@ static int __init caam_rng_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
+ struct caam_drv_private *priv;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -338,20 +342,32 @@ static int __init caam_rng_init(void)
if (!priv)
return -ENODEV;
+ /* Check for an instantiated RNG before registration */
+ if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+ return -ENODEV;
+
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
- if (!rng_ctx)
- return -ENOMEM;
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ if (!rng_ctx) {
+ err = -ENOMEM;
+ goto free_caam_alloc;
+ }
err = caam_init_rng(rng_ctx, dev);
if (err)
- return err;
+ goto free_rng_ctx;
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+ kfree(rng_ctx);
+free_caam_alloc:
+ caam_jr_free(dev);
+ return err;
}
module_init(caam_rng_init);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f57f395db33f..b6955ecdfb3f 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+#include <linux/clk.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index efacab7539ef..8abb4bc548cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,6 +16,24 @@
#include "error.h"
/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+ char *clk_name)
+{
+ return devm_clk_get(dev, clk_name);
+}
+#else
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+ char *clk_name)
+{
+ return NULL;
+}
+#endif
+
+/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
@@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- wr_reg32(&deco->jr_ctl_hi, flags);
+ setbits32(&deco->jr_ctl_hi, flags);
timeout = 10000000;
do {
@@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
- u32 *desc, status, rdsta_val;
+ u32 *desc, status = 0, rdsta_val;
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
@@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* CAAM eras), then try again.
*/
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
- if (status || !(rdsta_val & (1 << sh_idx)))
+ if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+ !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
break;
@@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev)
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
- int ring, ret = 0;
+ int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
@@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(ctrl);
- return ret;
+ /* shut clocks off before finalizing shutdown */
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+
+ return 0;
}
/*
@@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
int caam_get_era(void)
{
struct device_node *caam_node;
- for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
- const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
- "fsl,sec-era",
- NULL);
- return prop ? *prop : -ENOTSUPP;
- }
+ int ret;
+ u32 prop;
- return -ENOTSUPP;
+ caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+ of_node_put(caam_node);
+
+ return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
}
EXPORT_SYMBOL(caam_get_era);
@@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
+ struct clk *clk;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev)
int pg_size;
int BLOCK_OFFSET = 0;
- ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
- GFP_KERNEL);
+ ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
@@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
+ /* Enable clocking */
+ clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM ipg clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_ipg = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "mem");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_mem = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM aclk clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_aclk = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
+
+ ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+ ret);
+ goto disable_caam_ipg;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+ goto disable_caam_mem;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
+
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
if (ctrl == NULL) {
dev_err(dev, "caam: of_iomap() failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto disable_caam_emi_slow;
}
/* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
@@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- setbits32(&ctrl->mcr, MCFGR_WDENABLE |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
+ MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
- sizeof(struct platform_device *) * rspec,
- GFP_KERNEL);
+ ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+ sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
- iounmap(ctrl);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto iounmap_ctrl;
}
ring = 0;
@@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev)
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
- caam_remove(pdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto caam_remove;
}
cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
@@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev)
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
- caam_remove(pdev);
- return ret;
+ goto caam_remove;
}
/*
* Set handles init'ed by this module as the complement of the
@@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev)
&ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
+
+caam_remove:
+ caam_remove(pdev);
+iounmap_ctrl:
+ iounmap(ctrl);
+disable_caam_emi_slow:
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
+ return ret;
}
static struct of_device_id caam_match[] = {
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index d397ff9d56fd..983d663ef671 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,12 +8,29 @@
#ifndef DESC_H
#define DESC_H
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK 0x00001fff
+
struct sec4_sg_entry {
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ u32 rsvd1;
+ dma_addr_t ptr;
+#else
u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
- u8 reserved;
+ u8 rsvd2;
u8 buf_pool_id;
u16 offset;
};
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 9f79fd7bd4d7..98d07de24fc4 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -367,7 +367,7 @@ do { \
if (upper) \
append_u64(desc, data); \
else \
- append_u32(desc, data); \
+ append_u32(desc, lower_32_bits(data)); \
} while (0)
#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 89b94cc9e7a2..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -91,6 +91,11 @@ struct caam_drv_private {
Handles of the RNG4 block are initialized
by this driver */
+ struct clk *caam_ipg;
+ struct clk *caam_mem;
+ struct clk *caam_aclk;
+ struct clk *caam_emi_slow;
+
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b8b5d47acd7a..f7e0d8d4c3da 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg)
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
+ /*
+ * Make sure all information from the job has been obtained
+ * before telling CAAM that the job has been removed from the
+ * output ring.
+ */
+ mb();
+
/* set done */
wr_reg32(&jrp->rregs->outring_rmvd, 1);
@@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ /*
+ * Guarantee that the descriptor's DMA address has been written to
+ * the next slot in the ring before the write index is updated, since
+ * other cores may update this index independently.
+ */
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+ /*
+ * Ensure that all job information has been written before
+ * notifying CAAM that a new job was added to the input ring.
+ */
+ wmb();
+
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_bh(&jrp->inplock);
@@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev)
goto out_free_irq;
error = -ENOMEM;
- jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- &inpbusaddr, GFP_KERNEL);
+ jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+ JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
if (!jrp->inpring)
goto out_free_irq;
- jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+ jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
if (!jrp->outring)
goto out_free_inpring;
- jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
- GFP_KERNEL);
+ jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
if (!jrp->entinfo)
goto out_free_outring;
@@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
- GFP_KERNEL);
+ jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 672c97489505..a8a79975682f 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -65,9 +65,31 @@
*
*/
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARM */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+
+#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v) out_arch(l, le32, a, v)
+#define in_le32(a) in_arch(l, le32, a)
+
+#define out_be32(a, v) out_arch(l, be32, a, v)
+#define in_be32(a) in_arch(l, be32, a)
+
+#define clrsetbits(type, addr, clear, set) \
+ out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+#endif
+
#ifdef __BIG_ENDIAN
#define wr_reg32(reg, data) out_be32(reg, data)
#define rd_reg32(reg) in_be32(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) out_be64(reg, data)
#define rd_reg64(reg) in_be64(reg)
@@ -76,6 +98,7 @@
#ifdef __LITTLE_ENDIAN
#define wr_reg32(reg, data) __raw_writel(data, reg)
#define rd_reg32(reg) __raw_readl(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) __raw_writeq(data, reg)
#define rd_reg64(reg) __raw_readq(reg)
@@ -85,20 +108,31 @@
/*
* The only users of these wr/rd_reg64 functions is the Job Ring (JR).
- * The DMA address registers in the JR are a pair of 32-bit registers.
- * The layout is:
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
*
* base + 0x0000 : most-significant 32 bits
* base + 0x0004 : least-significant 32 bits
*
* The 32-bit version of this core therefore has to write to base + 0x0004
- * to set the 32-bit wide DMA address. This seems to be independent of the
- * endianness of the written/read data.
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ * base + 0x0000 : least-significant 32 bits
+ * base + 0x0004 : most-significant 32 bits
*/
#ifndef CONFIG_64BIT
+#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
+ defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
#define REG64_MS32(reg) ((u32 __iomem *)(reg))
#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
+#else
+#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
+#define REG64_LS32(reg) ((u32 __iomem *)(reg))
+#endif
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
@@ -133,18 +167,28 @@ struct jr_outentry {
#define CHA_NUM_MS_DECONUM_SHIFT 24
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
-/* CHA Version IDs */
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
#define CHA_ID_LS_AES_SHIFT 0
-#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
-#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
#define CHA_ID_LS_ARC4_SHIFT 8
#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -395,10 +439,16 @@ struct caam_ctrl {
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT)
/* AXI write cache control */
#define MCFGR_AWCACHE_SHIFT 8
#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT)
/* AXI pipeline depth */
#define MCFGR_AXIPIPE_SHIFT 4
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b68b74cc7b77..18cd6d1f5870 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
{
sec4_sg_ptr->ptr = dma;
sec4_sg_ptr->len = len;
- sec4_sg_ptr->reserved = 0;
sec4_sg_ptr->buf_pool_id = 0;
sec4_sg_ptr->offset = offset;
#ifdef DEBUG
@@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained(
{
if (unlikely(chained)) {
int i;
+ struct scatterlist *tsg = sg;
+
+ /*
+ * Use a local copy of the sg pointer to avoid moving the
+ * head of the list pointed to by sg as we walk the list.
+ */
for (i = 0; i < nents; i++) {
- dma_unmap_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
+ dma_unmap_sg(dev, tsg, 1, dir);
+ tsg = sg_next(tsg);
}
} else if (nents) {
dma_unmap_sg(dev, sg, nents, dir);
@@ -119,19 +124,23 @@ static inline int dma_map_sg_chained(
struct device *dev, struct scatterlist *sg, unsigned int nents,
enum dma_data_direction dir, bool chained)
{
- struct scatterlist *first = sg;
-
if (unlikely(chained)) {
int i;
+ struct scatterlist *tsg = sg;
+
+ /*
+ * Use a local copy of the sg pointer to avoid moving the
+ * head of the list pointed to by sg as we walk the list.
+ */
for (i = 0; i < nents; i++) {
- if (!dma_map_sg(dev, sg, 1, dir)) {
- dma_unmap_sg_chained(dev, first, i, dir,
+ if (!dma_map_sg(dev, tsg, 1, dir)) {
+ dma_unmap_sg_chained(dev, sg, i, dir,
chained);
nents = 0;
break;
}
- sg = sg_next(sg);
+ tsg = sg_next(tsg);
}
} else
nents = dma_map_sg(dev, sg, nents, dir);
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index f2e6de361fd1..bb241c3ab6b9 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
{ "AMDI0C00", 0 },
{ },
};
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#endif
#ifdef CONFIG_OF
@@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a" },
{ },
};
+MODULE_DEVICE_TABLE(of, ccp_of_match);
#endif
static struct platform_driver ccp_platform_driver = {
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index ad47d0d61098..68e8aa90fe01 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
if (!hdev->dma_lch) {
- dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+ dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
return -EBUSY;
}
dma_conf.direction = DMA_MEM_TO_DEV;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 402631a19a11..8f2790353281 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -156,7 +156,8 @@ struct ablk_ctx {
};
struct aead_ctx {
- struct buffer_desc *buffer;
+ struct buffer_desc *src;
+ struct buffer_desc *dst;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
@@ -198,6 +199,15 @@ struct ixp_alg {
int registered;
};
+struct ixp_aead_alg {
+ struct aead_alg crypto;
+ const struct ix_hash_algo *hash;
+ u32 cfg_enc;
+ u32 cfg_dec;
+
+ int registered;
+};
+
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
@@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
- int decryptlen = req->cryptlen - authsize;
+ int decryptlen = req->assoclen + req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->src, decryptlen, authsize, 1);
+ req->dst, decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
@@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys)
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
- free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
@@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm)
return init_tfm(tfm);
}
-static int init_tfm_aead(struct crypto_tfm *tfm)
+static int init_tfm_aead(struct crypto_aead *tfm)
{
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct aead_ctx));
- return init_tfm(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+ return init_tfm(crypto_aead_tfm(tfm));
}
static void exit_tfm(struct crypto_tfm *tfm)
@@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
free_sa_dir(&ctx->decrypt);
}
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+ exit_tfm(crypto_aead_tfm(tfm));
+}
+
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key, int key_len)
{
@@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
return ret;
}
-static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
- unsigned int nbytes)
-{
- int offset = 0;
-
- if (!nbytes)
- return 0;
-
- for (;;) {
- if (start < offset + sg->length)
- break;
-
- offset += sg->length;
- sg = sg_next(sg);
- }
- return (start + nbytes > offset + sg->length);
-}
-
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
@@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
+ enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+ unsigned int lastlen;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
@@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt,
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
- crypt->auth_len = req->assoclen + ivsize + cryptlen;
+ crypt->auth_len = req->assoclen + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ req_ctx->dst = NULL;
+
if (req->src != req->dst) {
- BUG(); /* -ENOTSUP because of my laziness */
+ struct buffer_desc dst_hook;
+
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ src_direction = DMA_TO_DEVICE;
+
+ buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+ &dst_hook, flags, DMA_FROM_DEVICE);
+ req_ctx->dst = dst_hook.next;
+ crypt->dst_buf = dst_hook.phys_next;
+
+ if (!buf)
+ goto free_buf_dst;
+
+ if (encrypt) {
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+ }
}
- /* ASSOC data */
- buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
- flags, DMA_TO_DEVICE);
- req_ctx->buffer = src_hook.next;
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
if (!buf)
- goto out;
- /* IV */
- sg_init_table(&req_ctx->ivlist, 1);
- sg_set_buf(&req_ctx->ivlist, iv, ivsize);
- buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
- DMA_BIDIRECTIONAL);
- if (!buf)
- goto free_chain;
- if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+ goto free_buf_src;
+
+ if (!encrypt || !req_ctx->dst) {
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+ }
+
+ if (unlikely(lastlen < authsize)) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_chain;
+ goto free_buf_src;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt,
} else {
req_ctx->hmac_virt = NULL;
}
- /* Crypt */
- buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
- DMA_BIDIRECTIONAL);
- if (!buf)
- goto free_hmac_virt;
- if (!req_ctx->hmac_virt) {
- crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
- }
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_hmac_virt:
- if (req_ctx->hmac_virt) {
- dma_pool_free(buffer_pool, req_ctx->hmac_virt,
- crypt->icv_rev_aes);
- }
-free_chain:
- free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-out:
+
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dst:
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
@@ -1173,40 +1181,12 @@ badkey:
static int aead_encrypt(struct aead_request *req)
{
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- return aead_perform(req, 1, req->assoclen + ivsize,
- req->cryptlen, req->iv);
+ return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- return aead_perform(req, 0, req->assoclen + ivsize,
- req->cryptlen, req->iv);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
- struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned len, ivsize = crypto_aead_ivsize(tfm);
- __be64 seq;
-
- /* copied from eseqiv.c */
- if (!ctx->salted) {
- get_random_bytes(ctx->salt, ivsize);
- ctx->salted = 1;
- }
- memcpy(req->areq.iv, ctx->salt, ivsize);
- len = ivsize;
- if (ivsize > sizeof(u64)) {
- memset(req->giv, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + ivsize - len, &seq, len);
- return aead_perform(&req->areq, 1, req->areq.assoclen,
- req->areq.cryptlen +ivsize, req->giv);
+ return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
}
static struct ixp_alg ixp4xx_algos[] = {
@@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = {
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des))",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des))",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
@@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void)
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
continue;
}
- if (!ixp4xx_algos[i].hash) {
- /* block ciphers */
- cra->cra_type = &crypto_ablkcipher_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- if (!cra->cra_ablkcipher.setkey)
- cra->cra_ablkcipher.setkey = ablk_setkey;
- if (!cra->cra_ablkcipher.encrypt)
- cra->cra_ablkcipher.encrypt = ablk_encrypt;
- if (!cra->cra_ablkcipher.decrypt)
- cra->cra_ablkcipher.decrypt = ablk_decrypt;
- cra->cra_init = init_tfm_ablk;
- } else {
- /* authenc */
- cra->cra_type = &crypto_aead_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- cra->cra_aead.setkey = aead_setkey;
- cra->cra_aead.setauthsize = aead_setauthsize;
- cra->cra_aead.encrypt = aead_encrypt;
- cra->cra_aead.decrypt = aead_decrypt;
- cra->cra_aead.givencrypt = aead_givencrypt;
- cra->cra_init = init_tfm_aead;
- }
+
+ /* block ciphers */
+ cra->cra_type = &crypto_ablkcipher_type;
+ cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ if (!cra->cra_ablkcipher.setkey)
+ cra->cra_ablkcipher.setkey = ablk_setkey;
+ if (!cra->cra_ablkcipher.encrypt)
+ cra->cra_ablkcipher.encrypt = ablk_encrypt;
+ if (!cra->cra_ablkcipher.decrypt)
+ cra->cra_ablkcipher.decrypt = ablk_decrypt;
+ cra->cra_init = init_tfm_ablk;
+
cra->cra_ctxsize = sizeof(struct ixp_ctx);
cra->cra_module = THIS_MODULE;
cra->cra_alignmask = 3;
@@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void)
else
ixp4xx_algos[i].registered = 1;
}
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ continue;
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+ continue;
+
+ /* authenc */
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ cra->setkey = aead_setkey;
+ cra->setauthsize = aead_setauthsize;
+ cra->encrypt = aead_encrypt;
+ cra->decrypt = aead_decrypt;
+ cra->init = init_tfm_aead;
+ cra->exit = exit_tfm_aead;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+
+ if (crypto_register_aead(cra))
+ printk(KERN_ERR "Failed to register '%s'\n",
+ cra->base.cra_driver_name);
+ else
+ ixp4xx_aeads[i].registered = 1;
+ }
return 0;
}
@@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void)
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ if (ixp4xx_aeads[i].registered)
+ crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+ }
+
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 1c6f98dd88f4..0643e3366e33 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
},
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index e421c96c763a..ad7552a6998c 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT
config CRYPTO_DEV_NX_COMPRESS
tristate "Compression acceleration support"
default y
+ select CRYPTO_ALGAPI
+ select 842_DECOMPRESS
help
Support for PowerPC Nest (NX) compression acceleration. This
module supports acceleration for compressing memory with the 842
- algorithm. One of the platform drivers must be selected also.
- If you choose 'M' here, this module will be called nx_compress.
+ algorithm using the cryptographic API. One of the platform
+ drivers must be selected also. If you choose 'M' here, this
+ module will be called nx_compress.
if CRYPTO_DEV_NX_COMPRESS
@@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV
algorithm. This supports NX hardware on the PowerNV platform.
If you choose 'M' here, this module will be called nx_compress_powernv.
-config CRYPTO_DEV_NX_COMPRESS_CRYPTO
- tristate "Compression acceleration cryptographic interface"
- select CRYPTO_ALGAPI
- select 842_DECOMPRESS
- default y
- help
- Support for PowerPC Nest (NX) accelerators using the cryptographic
- API. If you choose 'M' here, this module will be called
- nx_compress_crypto.
-
endif
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index e1684f5adb11..b727821c8ed4 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
-nx-compress-platform-objs := nx-842-platform.o
nx-compress-pseries-objs := nx-842-pseries.o
nx-compress-powernv-objs := nx-842-powernv.o
-nx-compress-crypto-objs := nx-842-crypto.o
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c
deleted file mode 100644
index d53a1dcd7b4e..000000000000
--- a/drivers/crypto/nx/nx-842-crypto.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Cryptographic API for the NX-842 hardware compression.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) IBM Corporation, 2011-2015
- *
- * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
- * Seth Jennings <sjenning@linux.vnet.ibm.com>
- *
- * Rewrite: Dan Streetman <ddstreet@ieee.org>
- *
- * This is an interface to the NX-842 compression hardware in PowerPC
- * processors. Most of the complexity of this drvier is due to the fact that
- * the NX-842 compression hardware requires the input and output data buffers
- * to be specifically aligned, to be a specific multiple in length, and within
- * specific minimum and maximum lengths. Those restrictions, provided by the
- * nx-842 driver via nx842_constraints, mean this driver must use bounce
- * buffers and headers to correct misaligned in or out buffers, and to split
- * input buffers that are too large.
- *
- * This driver will fall back to software decompression if the hardware
- * decompression fails, so this driver's decompression should never fail as
- * long as the provided compressed buffer is valid. Any compressed buffer
- * created by this driver will have a header (except ones where the input
- * perfectly matches the constraints); so users of this driver cannot simply
- * pass a compressed buffer created by this driver over to the 842 software
- * decompression library. Instead, users must use this driver to decompress;
- * if the hardware fails or is unavailable, the compressed buffer will be
- * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
- * software decompression library.
- *
- * This does not fall back to software compression, however, since the caller
- * of this function is specifically requesting hardware compression; if the
- * hardware compression fails, the caller can fall back to software
- * compression, and the raw 842 compressed buffer that the software compressor
- * creates can be passed to this driver for hardware decompression; any
- * buffer without our specific header magic is assumed to be a raw 842 buffer
- * and passed directly to the hardware. Note that the software compression
- * library will produce a compressed buffer that is incompatible with the
- * hardware decompressor if the original input buffer length is not a multiple
- * of 8; if such a compressed buffer is passed to this driver for
- * decompression, the hardware will reject it and this driver will then pass
- * it over to the software library for decompression.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/sw842.h>
-#include <linux/ratelimit.h>
-
-#include "nx-842.h"
-
-/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
- * template (see lib/842/842.h), so this magic number will never appear at
- * the start of a raw 842 compressed buffer. That is important, as any buffer
- * passed to us without this magic is assumed to be a raw 842 compressed
- * buffer, and passed directly to the hardware to decompress.
- */
-#define NX842_CRYPTO_MAGIC (0xf842)
-#define NX842_CRYPTO_GROUP_MAX (0x20)
-#define NX842_CRYPTO_HEADER_SIZE(g) \
- (sizeof(struct nx842_crypto_header) + \
- sizeof(struct nx842_crypto_header_group) * (g))
-#define NX842_CRYPTO_HEADER_MAX_SIZE \
- NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
-
-/* bounce buffer size */
-#define BOUNCE_BUFFER_ORDER (2)
-#define BOUNCE_BUFFER_SIZE \
- ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
-
-/* try longer on comp because we can fallback to sw decomp if hw is busy */
-#define COMP_BUSY_TIMEOUT (250) /* ms */
-#define DECOMP_BUSY_TIMEOUT (50) /* ms */
-
-struct nx842_crypto_header_group {
- __be16 padding; /* unused bytes at start of group */
- __be32 compressed_length; /* compressed bytes in group */
- __be32 uncompressed_length; /* bytes after decompression */
-} __packed;
-
-struct nx842_crypto_header {
- __be16 magic; /* NX842_CRYPTO_MAGIC */
- __be16 ignore; /* decompressed end bytes to ignore */
- u8 groups; /* total groups in this header */
- struct nx842_crypto_header_group group[];
-} __packed;
-
-struct nx842_crypto_param {
- u8 *in;
- unsigned int iremain;
- u8 *out;
- unsigned int oremain;
- unsigned int ototal;
-};
-
-static int update_param(struct nx842_crypto_param *p,
- unsigned int slen, unsigned int dlen)
-{
- if (p->iremain < slen)
- return -EOVERFLOW;
- if (p->oremain < dlen)
- return -ENOSPC;
-
- p->in += slen;
- p->iremain -= slen;
- p->out += dlen;
- p->oremain -= dlen;
- p->ototal += dlen;
-
- return 0;
-}
-
-struct nx842_crypto_ctx {
- u8 *wmem;
- u8 *sbounce, *dbounce;
-
- struct nx842_crypto_header header;
- struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
-};
-
-static int nx842_crypto_init(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
- ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
- ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
- if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void nx842_crypto_exit(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
-}
-
-static int read_constraints(struct nx842_constraints *c)
-{
- int ret;
-
- ret = nx842_constraints(c);
- if (ret) {
- pr_err_ratelimited("could not get nx842 constraints : %d\n",
- ret);
- return ret;
- }
-
- /* limit maximum, to always have enough bounce buffer to decompress */
- if (c->maximum > BOUNCE_BUFFER_SIZE) {
- c->maximum = BOUNCE_BUFFER_SIZE;
- pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
- }
-
- return 0;
-}
-
-static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
-{
- int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-
- /* compress should have added space for header */
- if (s > be16_to_cpu(hdr->group[0].padding)) {
- pr_err("Internal error: no space for header\n");
- return -EINVAL;
- }
-
- memcpy(buf, hdr, s);
-
- print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
-
- return 0;
-}
-
-static int compress(struct nx842_crypto_ctx *ctx,
- struct nx842_crypto_param *p,
- struct nx842_crypto_header_group *g,
- struct nx842_constraints *c,
- u16 *ignore,
- unsigned int hdrsize)
-{
- unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
- unsigned int adj_slen = slen;
- u8 *src = p->in, *dst = p->out;
- int ret, dskip = 0;
- ktime_t timeout;
-
- if (p->iremain == 0)
- return -EOVERFLOW;
-
- if (p->oremain == 0 || hdrsize + c->minimum > dlen)
- return -ENOSPC;
-
- if (slen % c->multiple)
- adj_slen = round_up(slen, c->multiple);
- if (slen < c->minimum)
- adj_slen = c->minimum;
- if (slen > c->maximum)
- adj_slen = slen = c->maximum;
- if (adj_slen > slen || (u64)src % c->alignment) {
- adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
- slen = min(slen, BOUNCE_BUFFER_SIZE);
- if (adj_slen > slen)
- memset(ctx->sbounce + slen, 0, adj_slen - slen);
- memcpy(ctx->sbounce, src, slen);
- src = ctx->sbounce;
- slen = adj_slen;
- pr_debug("using comp sbounce buffer, len %x\n", slen);
- }
-
- dst += hdrsize;
- dlen -= hdrsize;
-
- if ((u64)dst % c->alignment) {
- dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
- dst += dskip;
- dlen -= dskip;
- }
- if (dlen % c->multiple)
- dlen = round_down(dlen, c->multiple);
- if (dlen < c->minimum) {
-nospc:
- dst = ctx->dbounce;
- dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
- dlen = round_down(dlen, c->multiple);
- dskip = 0;
- pr_debug("using comp dbounce buffer, len %x\n", dlen);
- }
- if (dlen > c->maximum)
- dlen = c->maximum;
-
- tmplen = dlen;
- timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
- do {
- dlen = tmplen; /* reset dlen, if we're retrying */
- ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
- /* possibly we should reduce the slen here, instead of
- * retrying with the dbounce buffer?
- */
- if (ret == -ENOSPC && dst != ctx->dbounce)
- goto nospc;
- } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
- if (ret)
- return ret;
-
- dskip += hdrsize;
-
- if (dst == ctx->dbounce)
- memcpy(p->out + dskip, dst, dlen);
-
- g->padding = cpu_to_be16(dskip);
- g->compressed_length = cpu_to_be32(dlen);
- g->uncompressed_length = cpu_to_be32(slen);
-
- if (p->iremain < slen) {
- *ignore = slen - p->iremain;
- slen = p->iremain;
- }
-
- pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
- slen, *ignore, dlen, dskip);
-
- return update_param(p, slen, dskip + dlen);
-}
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr = &ctx->header;
- struct nx842_crypto_param p;
- struct nx842_constraints c;
- unsigned int groups, hdrsize, h;
- int ret, n;
- bool add_header;
- u16 ignore = 0;
-
- p.in = (u8 *)src;
- p.iremain = slen;
- p.out = dst;
- p.oremain = *dlen;
- p.ototal = 0;
-
- *dlen = 0;
-
- ret = read_constraints(&c);
- if (ret)
- return ret;
-
- groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
- DIV_ROUND_UP(p.iremain, c.maximum));
- hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
-
- /* skip adding header if the buffers meet all constraints */
- add_header = (p.iremain % c.multiple ||
- p.iremain < c.minimum ||
- p.iremain > c.maximum ||
- (u64)p.in % c.alignment ||
- p.oremain % c.multiple ||
- p.oremain < c.minimum ||
- p.oremain > c.maximum ||
- (u64)p.out % c.alignment);
-
- hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
- hdr->groups = 0;
- hdr->ignore = 0;
-
- while (p.iremain > 0) {
- n = hdr->groups++;
- if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
- return -ENOSPC;
-
- /* header goes before first group */
- h = !n && add_header ? hdrsize : 0;
-
- if (ignore)
- pr_warn("interal error, ignore is set %x\n", ignore);
-
- ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
- if (ret)
- return ret;
- }
-
- if (!add_header && hdr->groups > 1) {
- pr_err("Internal error: No header but multiple groups\n");
- return -EINVAL;
- }
-
- /* ignore indicates the input stream needed to be padded */
- hdr->ignore = cpu_to_be16(ignore);
- if (ignore)
- pr_debug("marked %d bytes as ignore\n", ignore);
-
- if (add_header)
- ret = nx842_crypto_add_header(hdr, dst);
- if (ret)
- return ret;
-
- *dlen = p.ototal;
-
- pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
-
- return 0;
-}
-
-static int decompress(struct nx842_crypto_ctx *ctx,
- struct nx842_crypto_param *p,
- struct nx842_crypto_header_group *g,
- struct nx842_constraints *c,
- u16 ignore,
- bool usehw)
-{
- unsigned int slen = be32_to_cpu(g->compressed_length);
- unsigned int required_len = be32_to_cpu(g->uncompressed_length);
- unsigned int dlen = p->oremain, tmplen;
- unsigned int adj_slen = slen;
- u8 *src = p->in, *dst = p->out;
- u16 padding = be16_to_cpu(g->padding);
- int ret, spadding = 0, dpadding = 0;
- ktime_t timeout;
-
- if (!slen || !required_len)
- return -EINVAL;
-
- if (p->iremain <= 0 || padding + slen > p->iremain)
- return -EOVERFLOW;
-
- if (p->oremain <= 0 || required_len - ignore > p->oremain)
- return -ENOSPC;
-
- src += padding;
-
- if (!usehw)
- goto usesw;
-
- if (slen % c->multiple)
- adj_slen = round_up(slen, c->multiple);
- if (slen < c->minimum)
- adj_slen = c->minimum;
- if (slen > c->maximum)
- goto usesw;
- if (slen < adj_slen || (u64)src % c->alignment) {
- /* we can append padding bytes because the 842 format defines
- * an "end" template (see lib/842/842_decompress.c) and will
- * ignore any bytes following it.
- */
- if (slen < adj_slen)
- memset(ctx->sbounce + slen, 0, adj_slen - slen);
- memcpy(ctx->sbounce, src, slen);
- src = ctx->sbounce;
- spadding = adj_slen - slen;
- slen = adj_slen;
- pr_debug("using decomp sbounce buffer, len %x\n", slen);
- }
-
- if (dlen % c->multiple)
- dlen = round_down(dlen, c->multiple);
- if (dlen < required_len || (u64)dst % c->alignment) {
- dst = ctx->dbounce;
- dlen = min(required_len, BOUNCE_BUFFER_SIZE);
- pr_debug("using decomp dbounce buffer, len %x\n", dlen);
- }
- if (dlen < c->minimum)
- goto usesw;
- if (dlen > c->maximum)
- dlen = c->maximum;
-
- tmplen = dlen;
- timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
- do {
- dlen = tmplen; /* reset dlen, if we're retrying */
- ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
- } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
- if (ret) {
-usesw:
- /* reset everything, sw doesn't have constraints */
- src = p->in + padding;
- slen = be32_to_cpu(g->compressed_length);
- spadding = 0;
- dst = p->out;
- dlen = p->oremain;
- dpadding = 0;
- if (dlen < required_len) { /* have ignore bytes */
- dst = ctx->dbounce;
- dlen = BOUNCE_BUFFER_SIZE;
- }
- pr_info_ratelimited("using software 842 decompression\n");
- ret = sw842_decompress(src, slen, dst, &dlen);
- }
- if (ret)
- return ret;
-
- slen -= spadding;
-
- dlen -= ignore;
- if (ignore)
- pr_debug("ignoring last %x bytes\n", ignore);
-
- if (dst == ctx->dbounce)
- memcpy(p->out, dst, dlen);
-
- pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
- slen, padding, dlen, ignore);
-
- return update_param(p, slen + padding, dlen);
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr;
- struct nx842_crypto_param p;
- struct nx842_constraints c;
- int n, ret, hdr_len;
- u16 ignore = 0;
- bool usehw = true;
-
- p.in = (u8 *)src;
- p.iremain = slen;
- p.out = dst;
- p.oremain = *dlen;
- p.ototal = 0;
-
- *dlen = 0;
-
- if (read_constraints(&c))
- usehw = false;
-
- hdr = (struct nx842_crypto_header *)src;
-
- /* If it doesn't start with our header magic number, assume it's a raw
- * 842 compressed buffer and pass it directly to the hardware driver
- */
- if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
- struct nx842_crypto_header_group g = {
- .padding = 0,
- .compressed_length = cpu_to_be32(p.iremain),
- .uncompressed_length = cpu_to_be32(p.oremain),
- };
-
- ret = decompress(ctx, &p, &g, &c, 0, usehw);
- if (ret)
- return ret;
-
- *dlen = p.ototal;
-
- return 0;
- }
-
- if (!hdr->groups) {
- pr_err("header has no groups\n");
- return -EINVAL;
- }
- if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
- pr_err("header has too many groups %x, max %x\n",
- hdr->groups, NX842_CRYPTO_GROUP_MAX);
- return -EINVAL;
- }
-
- hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
- if (hdr_len > slen)
- return -EOVERFLOW;
-
- memcpy(&ctx->header, src, hdr_len);
- hdr = &ctx->header;
-
- for (n = 0; n < hdr->groups; n++) {
- /* ignore applies to last group */
- if (n + 1 == hdr->groups)
- ignore = be16_to_cpu(hdr->ignore);
-
- ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
- if (ret)
- return ret;
- }
-
- *dlen = p.ototal;
-
- pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
-
- return 0;
-}
-
-static struct crypto_alg alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
-};
-
-static int __init nx842_crypto_mod_init(void)
-{
- return crypto_register_alg(&alg);
-}
-module_init(nx842_crypto_mod_init);
-
-static void __exit nx842_crypto_mod_exit(void)
-{
- crypto_unregister_alg(&alg);
-}
-module_exit(nx842_crypto_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c
deleted file mode 100644
index 664f13dd06ed..000000000000
--- a/drivers/crypto/nx/nx-842-platform.c
+++ /dev/null
@@ -1,84 +0,0 @@
-
-#include "nx-842.h"
-
-/* this is needed, separate from the main nx-842.c driver, because that main
- * driver loads the platform drivers during its init(), and it expects one
- * (or none) of the platform drivers to set this pointer to its driver.
- * That means this pointer can't be in the main nx-842 driver, because it
- * wouldn't be accessible until after the main driver loaded, which wouldn't
- * be possible as it's waiting for the platform driver to load. So place it
- * here.
- */
-static struct nx842_driver *driver;
-static DEFINE_SPINLOCK(driver_lock);
-
-struct nx842_driver *nx842_platform_driver(void)
-{
- return driver;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver);
-
-bool nx842_platform_driver_set(struct nx842_driver *_driver)
-{
- bool ret = false;
-
- spin_lock(&driver_lock);
-
- if (!driver) {
- driver = _driver;
- ret = true;
- } else
- WARN(1, "can't set platform driver, already set to %s\n",
- driver->name);
-
- spin_unlock(&driver_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
-
-/* only call this from the platform driver exit function */
-void nx842_platform_driver_unset(struct nx842_driver *_driver)
-{
- spin_lock(&driver_lock);
-
- if (driver == _driver)
- driver = NULL;
- else if (driver)
- WARN(1, "can't unset platform driver %s, currently set to %s\n",
- _driver->name, driver->name);
- else
- WARN(1, "can't unset platform driver, already unset\n");
-
- spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
-
-bool nx842_platform_driver_get(void)
-{
- bool ret = false;
-
- spin_lock(&driver_lock);
-
- if (driver)
- ret = try_module_get(driver->owner);
-
- spin_unlock(&driver_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
-
-void nx842_platform_driver_put(void)
-{
- spin_lock(&driver_lock);
-
- if (driver)
- module_put(driver->owner);
-
- spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression platform driver");
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 33b3b0abf4ae..3750e13d8721 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -26,6 +26,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
@@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem,
}
/* successful completion */
- pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
+ pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+ be32_to_cpu(csb->count),
(unsigned long)ktime_us_delta(now, start));
return 0;
@@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_powernv_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
static __init int nx842_powernv_init(void)
{
struct device_node *dn;
+ int ret;
/* verify workmem size/align restrictions */
BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
@@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
- pr_info("loading\n");
-
for_each_compatible_node(dn, NULL, "ibm,power-nx")
nx842_powernv_probe(dn);
- if (!nx842_ct) {
- pr_err("no coprocessors found\n");
+ if (!nx842_ct)
return -ENODEV;
- }
- if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
struct nx842_coproc *coproc, *n;
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
@@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void)
kfree(coproc);
}
- return -EEXIST;
+ return ret;
}
- pr_info("loaded\n");
-
return 0;
}
module_init(nx842_powernv_init);
@@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void)
{
struct nx842_coproc *coproc, *n;
- nx842_platform_driver_unset(&nx842_powernv_driver);
+ crypto_unregister_alg(&nx842_powernv_alg);
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
list_del(&coproc->list);
kfree(coproc);
}
-
- pr_info("unloaded\n");
}
module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 3040a6091bf2..f4cbde03c6ad 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -29,6 +29,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
static struct nx842_constraints nx842_pseries_constraints = {
.alignment = DDE_BUFFER_ALIGN,
@@ -99,11 +101,6 @@ struct nx842_workmem {
#define NX842_HW_PAGE_SIZE (4096)
#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
-enum nx842_status {
- UNAVAILABLE,
- AVAILABLE
-};
-
struct ibm_nx842_counters {
atomic64_t comp_complete;
atomic64_t comp_failed;
@@ -121,7 +118,6 @@ static struct nx842_devdata {
unsigned int max_sg_len;
unsigned int max_sync_size;
unsigned int max_sync_sg;
- enum nx842_status status;
} __rcu *devdata;
static DEFINE_SPINLOCK(devdata_mutex);
@@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev,
switch (csb->completion_code) {
case 0: /* Completed without error */
break;
- case 64: /* Target bytes > Source bytes during compression */
+ case 64: /* Compression ok, but output larger than input */
+ dev_dbg(dev, "%s: output size larger than input size\n",
+ __func__);
+ break;
case 13: /* Output buffer too small */
- dev_dbg(dev, "%s: Compression output larger than input\n",
+ dev_dbg(dev, "%s: Out of space in output buffer\n",
__func__);
return -ENOSPC;
case 66: /* Input data contains an illegal template field */
@@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
devdata->max_sync_size = 0;
devdata->max_sync_sg = 0;
devdata->max_sg_len = 0;
- devdata->status = UNAVAILABLE;
return 0;
} else
return -ENOENT;
}
/**
- * nx842_OF_upd_status -- Update the device info from OF status prop
+ * nx842_OF_upd_status -- Check the device info from OF status prop
*
* The status property indicates if the accelerator is enabled. If the
* device is in the OF tree it indicates that the hardware is present.
* The status field indicates if the device is enabled when the status
* is 'okay'. Otherwise the device driver will be disabled.
*
- * @devdata - struct nx842_devdata to update
* @prop - struct property point containing the maxsyncop for the update
*
* Returns:
* 0 - Device is available
- * -EINVAL - Device is not available
+ * -ENODEV - Device is not available
*/
-static int nx842_OF_upd_status(struct nx842_devdata *devdata,
- struct property *prop) {
- int ret = 0;
+static int nx842_OF_upd_status(struct property *prop)
+{
const char *status = (const char *)prop->value;
- if (!strncmp(status, "okay", (size_t)prop->length)) {
- devdata->status = AVAILABLE;
- } else {
- dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
- __func__, status);
- devdata->status = UNAVAILABLE;
- }
+ if (!strncmp(status, "okay", (size_t)prop->length))
+ return 0;
+ if (!strncmp(status, "disabled", (size_t)prop->length))
+ return -ENODEV;
+ dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
- return ret;
+ return -EINVAL;
}
/**
@@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop)
int ret = 0;
unsigned long flags;
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop)
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
spin_unlock_irqrestore(&devdata_mutex, flags);
+ kfree(new_devdata);
return -ENODEV;
}
- new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
- if (!new_devdata) {
- dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
- ret = -ENOMEM;
- goto error_out;
- }
-
memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
new_devdata->counters = old_devdata->counters;
@@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop)
goto out;
/* Perform property updates */
- ret = nx842_OF_upd_status(new_devdata, status);
+ ret = nx842_OF_upd_status(status);
if (ret)
goto error_out;
@@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = {
.decompress = nx842_pseries_decompress,
};
-static int __init nx842_probe(struct vio_dev *viodev,
- const struct vio_device_id *id)
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_pseries_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+ const struct vio_device_id *id)
{
struct nx842_devdata *old_devdata, *new_devdata = NULL;
unsigned long flags;
int ret = 0;
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
+ new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+ GFP_NOFS);
+ if (!new_devdata->counters) {
+ kfree(new_devdata);
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev,
dev_set_drvdata(&viodev->dev, NULL);
- new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
- if (!new_devdata) {
- dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
- ret = -ENOMEM;
- goto error_unlock;
- }
-
- new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
- GFP_NOFS);
- if (!new_devdata->counters) {
- dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
- ret = -ENOMEM;
- goto error_unlock;
- }
-
new_devdata->vdev = viodev;
new_devdata->dev = &viodev->dev;
nx842_OF_set_defaults(new_devdata);
@@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev,
of_reconfig_notifier_register(&nx842_of_nb);
ret = nx842_OF_upd(NULL);
- if (ret && ret != -ENODEV) {
- dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
- ret = -1;
+ if (ret)
+ goto error;
+
+ ret = crypto_register_alg(&nx842_pseries_alg);
+ if (ret) {
+ dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
goto error;
}
@@ -1043,7 +1053,7 @@ error:
return ret;
}
-static int __exit nx842_remove(struct vio_dev *viodev)
+static int nx842_remove(struct vio_dev *viodev)
{
struct nx842_devdata *old_devdata;
unsigned long flags;
@@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev)
pr_info("Removing IBM Power 842 compression device\n");
sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+ crypto_unregister_alg(&nx842_pseries_alg);
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = {
static struct vio_driver nx842_vio_driver = {
.name = KBUILD_MODNAME,
.probe = nx842_probe,
- .remove = __exit_p(nx842_remove),
+ .remove = nx842_remove,
.get_desired_dma = nx842_get_desired_dma,
.id_table = nx842_vio_driver_ids,
};
-static int __init nx842_init(void)
+static int __init nx842_pseries_init(void)
{
struct nx842_devdata *new_devdata;
int ret;
- pr_info("Registering IBM Power 842 compression driver\n");
-
if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
return -ENODEV;
@@ -1095,7 +1105,6 @@ static int __init nx842_init(void)
pr_err("Could not allocate memory for device data\n");
return -ENOMEM;
}
- new_devdata->status = UNAVAILABLE;
RCU_INIT_POINTER(devdata, new_devdata);
ret = vio_register_driver(&nx842_vio_driver);
@@ -1106,24 +1115,18 @@ static int __init nx842_init(void)
return ret;
}
- if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
- vio_unregister_driver(&nx842_vio_driver);
- kfree(new_devdata);
- return -EEXIST;
- }
-
return 0;
}
-module_init(nx842_init);
+module_init(nx842_pseries_init);
-static void __exit nx842_exit(void)
+static void __exit nx842_pseries_exit(void)
{
struct nx842_devdata *old_devdata;
unsigned long flags;
- pr_info("Exiting IBM Power 842 compression driver\n");
- nx842_platform_driver_unset(&nx842_pseries_driver);
+ crypto_unregister_alg(&nx842_pseries_alg);
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void)
vio_unregister_driver(&nx842_vio_driver);
}
-module_exit(nx842_exit);
+module_exit(nx842_pseries_exit);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6e5e0d60d0c8..046c1c45411b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1,10 +1,5 @@
/*
- * Driver frontend for IBM Power 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- *
- * Designer of the Power data compression engine:
- * Bulent Abali <abali@us.ibm.com>
+ * Cryptographic API for the NX-842 hardware compression.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,89 +10,522 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ * Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors. Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths. Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid. Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library. Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware. Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "nx-842.h"
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+#include "nx-842.h"
-/**
- * nx842_constraints
- *
- * This provides the driver's constraints. Different nx842 implementations
- * may have varying requirements. The constraints are:
- * @alignment: All buffers should be aligned to this
- * @multiple: All buffer lengths should be a multiple of this
- * @minimum: Buffer lengths must not be less than this amount
- * @maximum: Buffer lengths must not be more than this amount
- *
- * The constraints apply to all buffers and lengths, both input and output,
- * for both compression and decompression, except for the minimum which
- * only applies to compression input and decompression output; the
- * compressed data can be less than the minimum constraint. It can be
- * assumed that compressed data will always adhere to the multiple
- * constraint.
- *
- * The driver may succeed even if these constraints are violated;
- * however the driver can return failure or suffer reduced performance
- * if any constraint is not met.
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer. That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
*/
-int nx842_constraints(struct nx842_constraints *c)
+#define NX842_CRYPTO_MAGIC (0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g) \
+ (sizeof(struct nx842_crypto_header) + \
+ sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE \
+ NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER (2)
+#define BOUNCE_BUFFER_SIZE \
+ ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT (250) /* ms */
+#define DECOMP_BUSY_TIMEOUT (50) /* ms */
+
+struct nx842_crypto_param {
+ u8 *in;
+ unsigned int iremain;
+ u8 *out;
+ unsigned int oremain;
+ unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+ unsigned int slen, unsigned int dlen)
{
- memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
+ if (p->iremain < slen)
+ return -EOVERFLOW;
+ if (p->oremain < dlen)
+ return -ENOSPC;
+
+ p->in += slen;
+ p->iremain -= slen;
+ p->out += dlen;
+ p->oremain -= dlen;
+ p->ototal += dlen;
+
return 0;
}
-EXPORT_SYMBOL_GPL(nx842_constraints);
-/**
- * nx842_workmem_size
- *
- * Get the amount of working memory the driver requires.
- */
-size_t nx842_workmem_size(void)
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
{
- return nx842_platform_driver()->workmem_size;
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spin_lock_init(&ctx->lock);
+ ctx->driver = driver;
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
+ return -ENOMEM;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(nx842_workmem_size);
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
-int nx842_compress(const unsigned char *in, unsigned int ilen,
- unsigned char *out, unsigned int *olen, void *wmem)
+void nx842_crypto_exit(struct crypto_tfm *tfm)
{
- return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
}
-EXPORT_SYMBOL_GPL(nx842_compress);
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
-int nx842_decompress(const unsigned char *in, unsigned int ilen,
- unsigned char *out, unsigned int *olen, void *wmem)
+static void check_constraints(struct nx842_constraints *c)
{
- return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
+ /* limit maximum, to always have enough bounce buffer to decompress */
+ if (c->maximum > BOUNCE_BUFFER_SIZE)
+ c->maximum = BOUNCE_BUFFER_SIZE;
}
-EXPORT_SYMBOL_GPL(nx842_decompress);
-static __init int nx842_init(void)
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
{
- request_module("nx-compress-powernv");
- request_module("nx-compress-pseries");
+ int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
- /* we prevent loading if there's no platform driver, and we get the
- * module that set it so it won't unload, so we don't need to check
- * if it's set in any of the above functions
- */
- if (!nx842_platform_driver_get()) {
- pr_err("no nx842 driver found.\n");
- return -ENODEV;
+ /* compress should have added space for header */
+ if (s > be16_to_cpu(hdr->group[0].padding)) {
+ pr_err("Internal error: no space for header\n");
+ return -EINVAL;
}
+ memcpy(buf, hdr, s);
+
+ print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
return 0;
}
-module_init(nx842_init);
-static void __exit nx842_exit(void)
+static int compress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 *ignore,
+ unsigned int hdrsize)
+{
+ unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ int ret, dskip = 0;
+ ktime_t timeout;
+
+ if (p->iremain == 0)
+ return -EOVERFLOW;
+
+ if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+ return -ENOSPC;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ adj_slen = slen = c->maximum;
+ if (adj_slen > slen || (u64)src % c->alignment) {
+ adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+ slen = min(slen, BOUNCE_BUFFER_SIZE);
+ if (adj_slen > slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ slen = adj_slen;
+ pr_debug("using comp sbounce buffer, len %x\n", slen);
+ }
+
+ dst += hdrsize;
+ dlen -= hdrsize;
+
+ if ((u64)dst % c->alignment) {
+ dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+ dst += dskip;
+ dlen -= dskip;
+ }
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < c->minimum) {
+nospc:
+ dst = ctx->dbounce;
+ dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+ dlen = round_down(dlen, c->multiple);
+ dskip = 0;
+ pr_debug("using comp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+ /* possibly we should reduce the slen here, instead of
+ * retrying with the dbounce buffer?
+ */
+ if (ret == -ENOSPC && dst != ctx->dbounce)
+ goto nospc;
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret)
+ return ret;
+
+ dskip += hdrsize;
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out + dskip, dst, dlen);
+
+ g->padding = cpu_to_be16(dskip);
+ g->compressed_length = cpu_to_be32(dlen);
+ g->uncompressed_length = cpu_to_be32(slen);
+
+ if (p->iremain < slen) {
+ *ignore = slen - p->iremain;
+ slen = p->iremain;
+ }
+
+ pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+ slen, *ignore, dlen, dskip);
+
+ return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ unsigned int groups, hdrsize, h;
+ int ret, n;
+ bool add_header;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+ DIV_ROUND_UP(p.iremain, c.maximum));
+ hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+ spin_lock_bh(&ctx->lock);
+
+ /* skip adding header if the buffers meet all constraints */
+ add_header = (p.iremain % c.multiple ||
+ p.iremain < c.minimum ||
+ p.iremain > c.maximum ||
+ (u64)p.in % c.alignment ||
+ p.oremain % c.multiple ||
+ p.oremain < c.minimum ||
+ p.oremain > c.maximum ||
+ (u64)p.out % c.alignment);
+
+ hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+ hdr->groups = 0;
+ hdr->ignore = 0;
+
+ while (p.iremain > 0) {
+ n = hdr->groups++;
+ ret = -ENOSPC;
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+ goto unlock;
+
+ /* header goes before first group */
+ h = !n && add_header ? hdrsize : 0;
+
+ if (ignore)
+ pr_warn("interal error, ignore is set %x\n", ignore);
+
+ ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!add_header && hdr->groups > 1) {
+ pr_err("Internal error: No header but multiple groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* ignore indicates the input stream needed to be padded */
+ hdr->ignore = cpu_to_be16(ignore);
+ if (ignore)
+ pr_debug("marked %d bytes as ignore\n", ignore);
+
+ if (add_header)
+ ret = nx842_crypto_add_header(hdr, dst);
+ if (ret)
+ goto unlock;
+
+ *dlen = p.ototal;
+
+ pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 ignore)
{
- nx842_platform_driver_put();
+ unsigned int slen = be32_to_cpu(g->compressed_length);
+ unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+ unsigned int dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ u16 padding = be16_to_cpu(g->padding);
+ int ret, spadding = 0, dpadding = 0;
+ ktime_t timeout;
+
+ if (!slen || !required_len)
+ return -EINVAL;
+
+ if (p->iremain <= 0 || padding + slen > p->iremain)
+ return -EOVERFLOW;
+
+ if (p->oremain <= 0 || required_len - ignore > p->oremain)
+ return -ENOSPC;
+
+ src += padding;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ goto usesw;
+ if (slen < adj_slen || (u64)src % c->alignment) {
+ /* we can append padding bytes because the 842 format defines
+ * an "end" template (see lib/842/842_decompress.c) and will
+ * ignore any bytes following it.
+ */
+ if (slen < adj_slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ spadding = adj_slen - slen;
+ slen = adj_slen;
+ pr_debug("using decomp sbounce buffer, len %x\n", slen);
+ }
+
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < required_len || (u64)dst % c->alignment) {
+ dst = ctx->dbounce;
+ dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+ pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen < c->minimum)
+ goto usesw;
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret) {
+usesw:
+ /* reset everything, sw doesn't have constraints */
+ src = p->in + padding;
+ slen = be32_to_cpu(g->compressed_length);
+ spadding = 0;
+ dst = p->out;
+ dlen = p->oremain;
+ dpadding = 0;
+ if (dlen < required_len) { /* have ignore bytes */
+ dst = ctx->dbounce;
+ dlen = BOUNCE_BUFFER_SIZE;
+ }
+ pr_info_ratelimited("using software 842 decompression\n");
+ ret = sw842_decompress(src, slen, dst, &dlen);
+ }
+ if (ret)
+ return ret;
+
+ slen -= spadding;
+
+ dlen -= ignore;
+ if (ignore)
+ pr_debug("ignoring last %x bytes\n", ignore);
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out, dst, dlen);
+
+ pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+ slen, padding, dlen, ignore);
+
+ return update_param(p, slen + padding, dlen);
}
-module_exit(nx842_exit);
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ int n, ret, hdr_len;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ hdr = (struct nx842_crypto_header *)src;
+
+ spin_lock_bh(&ctx->lock);
+
+ /* If it doesn't start with our header magic number, assume it's a raw
+ * 842 compressed buffer and pass it directly to the hardware driver
+ */
+ if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+ struct nx842_crypto_header_group g = {
+ .padding = 0,
+ .compressed_length = cpu_to_be32(p.iremain),
+ .uncompressed_length = cpu_to_be32(p.oremain),
+ };
+
+ ret = decompress(ctx, &p, &g, &c, 0);
+ if (ret)
+ goto unlock;
+
+ goto success;
+ }
+
+ if (!hdr->groups) {
+ pr_err("header has no groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+ pr_err("header has too many groups %x, max %x\n",
+ hdr->groups, NX842_CRYPTO_GROUP_MAX);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+ if (hdr_len > slen) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
+
+ memcpy(&ctx->header, src, hdr_len);
+ hdr = &ctx->header;
+
+ for (n = 0; n < hdr->groups; n++) {
+ /* ignore applies to last group */
+ if (n + 1 == hdr->groups)
+ ignore = be16_to_cpu(hdr->ignore);
+
+ ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+ if (ret)
+ goto unlock;
+ }
+
+success:
+ *dlen = p.ototal;
+
+ pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+ ret = 0;
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index ac0ea79d0f8b..a4eee3bba937 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,8 +3,9 @@
#define __NX_842_H__
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/sw842.h>
+#include <linux/crypto.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr)
#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m))
#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
+/**
+ * This provides the driver's constraints. Different nx842 implementations
+ * may have varying requirements. The constraints are:
+ * @alignment: All buffers should be aligned to this
+ * @multiple: All buffer lengths should be a multiple of this
+ * @minimum: Buffer lengths must not be less than this amount
+ * @maximum: Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint. It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
struct nx842_constraints {
int alignment;
int multiple;
@@ -126,19 +146,40 @@ struct nx842_driver {
void *wrkmem);
};
-struct nx842_driver *nx842_platform_driver(void);
-bool nx842_platform_driver_set(struct nx842_driver *driver);
-void nx842_platform_driver_unset(struct nx842_driver *driver);
-bool nx842_platform_driver_get(void);
-void nx842_platform_driver_put(void);
+struct nx842_crypto_header_group {
+ __be16 padding; /* unused bytes at start of group */
+ __be32 compressed_length; /* compressed bytes in group */
+ __be32 uncompressed_length; /* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ struct nx842_crypto_header_group group[];
+} __packed;
-size_t nx842_workmem_size(void);
+#define NX842_CRYPTO_GROUP_MAX (0x20)
-int nx842_constraints(struct nx842_constraints *constraints);
+struct nx842_crypto_ctx {
+ spinlock_t lock;
+
+ u8 *wmem;
+ u8 *sbounce, *dbounce;
+
+ struct nx842_crypto_header header;
+ struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+ struct nx842_driver *driver;
+};
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index e4311ce0cd78..73ef49922788 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
return -EINVAL;
}
- crypto_aead_crt(tfm)->authsize = authsize;
-
return 0;
}
@@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
return -EINVAL;
}
- crypto_aead_crt(tfm)->authsize = authsize;
-
return 0;
}
@@ -174,6 +170,7 @@ static int generate_pat(u8 *iv,
struct nx_crypto_ctx *nx_ctx,
unsigned int authsize,
unsigned int nbytes,
+ unsigned int assoclen,
u8 *out)
{
struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -200,16 +197,16 @@ static int generate_pat(u8 *iv,
* greater than 2^32.
*/
- if (!req->assoclen) {
+ if (!assoclen) {
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
- } else if (req->assoclen <= 14) {
+ } else if (assoclen <= 14) {
/* if associated data is 14 bytes or less, we do 1 GCM
* operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
* which is fed in through the source buffers here */
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
b1 = nx_ctx->priv.ccm.iauth_tag;
- iauth_len = req->assoclen;
- } else if (req->assoclen <= 65280) {
+ iauth_len = assoclen;
+ } else if (assoclen <= 65280) {
/* if associated data is less than (2^16 - 2^8), we construct
* B1 differently and feed in the associated data to a CCA
* operation */
@@ -223,7 +220,7 @@ static int generate_pat(u8 *iv,
}
/* generate B0 */
- rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+ rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
if (rc)
return rc;
@@ -233,22 +230,22 @@ static int generate_pat(u8 *iv,
*/
if (b1) {
memset(b1, 0, 16);
- if (req->assoclen <= 65280) {
- *(u16 *)b1 = (u16)req->assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+ if (assoclen <= 65280) {
+ *(u16 *)b1 = assoclen;
+ scatterwalk_map_and_copy(b1 + 2, req->src, 0,
iauth_len, SCATTERWALK_FROM_SG);
} else {
*(u16 *)b1 = (u16)(0xfffe);
- *(u32 *)&b1[2] = (u32)req->assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+ *(u32 *)&b1[2] = assoclen;
+ scatterwalk_map_and_copy(b1 + 6, req->src, 0,
iauth_len, SCATTERWALK_FROM_SG);
}
}
/* now copy any remaining AAD to scatterlist and call nx... */
- if (!req->assoclen) {
+ if (!assoclen) {
return rc;
- } else if (req->assoclen <= 14) {
+ } else if (assoclen <= 14) {
unsigned int len = 16;
nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
@@ -280,7 +277,7 @@ static int generate_pat(u8 *iv,
return rc;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
} else {
unsigned int processed = 0, to_process;
@@ -294,15 +291,15 @@ static int generate_pat(u8 *iv,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
- to_process = min_t(u32, req->assoclen - processed,
+ to_process = min_t(u32, assoclen - processed,
nx_ctx->ap->databytelen);
nx_insg = nx_walk_and_build(nx_ctx->in_sg,
nx_ctx->ap->sglen,
- req->assoc, processed,
+ req->src, processed,
&to_process);
- if ((to_process + processed) < req->assoclen) {
+ if ((to_process + processed) < assoclen) {
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
NX_FDM_INTERMEDIATE;
} else {
@@ -328,11 +325,10 @@ static int generate_pat(u8 *iv,
NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen,
- &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
processed += to_process;
- } while (processed < req->assoclen);
+ } while (processed < assoclen);
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
}
@@ -343,7 +339,8 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
+ struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* copy out the auth tag to compare with later */
scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes, authsize,
+ req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
+ &to_process, processed + req->assoclen,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -420,7 +417,8 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
+ struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
+ &to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* copy out the auth tag */
scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes, authsize,
+ req->dst, nbytes + req->assoclen, authsize,
SCATTERWALK_TO_SG);
out:
@@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
memcpy(iv + 4, req->iv, 8);
desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_encrypt(req, &desc);
+ return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
@@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
int rc;
desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
rc = crypto_ccm_check_iv(desc.info);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc);
+ return ccm_nx_encrypt(req, &desc, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
@@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
memcpy(iv + 4, req->iv, 8);
desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_decrypt(req, &desc);
+ return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
@@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
int rc;
desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
rc = crypto_ccm_check_iv(desc.info);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc);
+ return ccm_nx_decrypt(req, &desc, req->assoclen);
}
/* tell the block cipher walk routines that this is a stream cipher by
@@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
* during encrypt/decrypt doesn't solve this problem, because it calls
* blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
* but instead uses this tfm->blocksize. */
-struct crypto_alg nx_ccm_aes_alg = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm_aes_nx_set_key,
- .setauthsize = ccm_aes_nx_setauthsize,
- .encrypt = ccm_aes_nx_encrypt,
- .decrypt = ccm_aes_nx_decrypt,
- }
+struct aead_alg nx_ccm_aes_alg = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_aes_nx_set_key,
+ .setauthsize = ccm_aes_nx_setauthsize,
+ .encrypt = ccm_aes_nx_encrypt,
+ .decrypt = ccm_aes_nx_decrypt,
};
-struct crypto_alg nx_ccm4309_aes_alg = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "rfc4309-ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm4309_aes_nx_set_key,
- .setauthsize = ccm4309_aes_nx_setauthsize,
- .encrypt = ccm4309_aes_nx_encrypt,
- .decrypt = ccm4309_aes_nx_decrypt,
- .geniv = "seqiv",
- }
+struct aead_alg nx_ccm4309_aes_alg = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm4309_aes_nx_set_key,
+ .setauthsize = ccm4309_aes_nx_setauthsize,
+ .encrypt = ccm4309_aes_nx_encrypt,
+ .decrypt = ccm4309_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dd7e9f3f5b6b..898c0a280511 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
-struct crypto_alg nx_ctr_aes_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_aes_nx_set_key,
- .encrypt = ctr_aes_nx_crypt,
- .decrypt = ctr_aes_nx_crypt,
- }
-};
-
struct crypto_alg nx_ctr3686_aes_alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-nx",
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 92c993f08213..eee624f589b6 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,11 +21,9 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
@@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
@@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
char *nonce = nx_ctx->priv.gcm.nonce;
int rc;
@@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct aead_request *req,
- u8 *out)
+ u8 *out,
+ unsigned int assoclen)
{
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
- unsigned int nbytes = req->assoclen;
+ unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
@@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
processed += to_process;
} while (processed < nbytes);
@@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
int rc;
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *nx_sg;
- unsigned int nbytes = req->assoclen;
+ unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
@@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
processed += to_process;
} while (processed < nbytes);
@@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
int enc)
{
int rc;
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
char out[AES_BLOCK_SIZE];
struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,11 @@ out:
return rc;
}
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+ unsigned int assoclen)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
@@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
if (nbytes == 0) {
- if (req->assoclen == 0)
+ if (assoclen == 0)
rc = gcm_empty(req, &desc, enc);
else
- rc = gmac(req, &desc);
+ rc = gmac(req, &desc, assoclen);
if (rc)
goto out;
else
@@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
}
/* Process associated data */
- csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
- if (req->assoclen) {
- rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+ csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+ if (assoclen) {
+ rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ assoclen);
if (rc)
goto out;
}
@@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
req->src, &to_process,
processed + req->assoclen,
@@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
memcpy(iv, req->iv, 12);
- return gcm_aes_nx_crypt(req, 1);
+ return gcm_aes_nx_crypt(req, 1, req->assoclen);
}
static int gcm_aes_nx_decrypt(struct aead_request *req)
@@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
memcpy(iv, req->iv, 12);
- return gcm_aes_nx_crypt(req, 0);
+ return gcm_aes_nx_crypt(req, 0, req->assoclen);
}
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
@@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
- return gcm_aes_nx_crypt(req, 1);
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
}
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
@@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
- return gcm_aes_nx_crypt(req, 0);
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
/* tell the block cipher walk routines that this is a stream cipher by
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 436971343ff7..0794f1cc0018 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -596,13 +596,9 @@ static int nx_register_algs(void)
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
- if (rc)
- goto out_unreg_cbc;
-
rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
if (rc)
- goto out_unreg_ctr;
+ goto out_unreg_cbc;
rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
if (rc)
@@ -612,11 +608,11 @@ static int nx_register_algs(void)
if (rc)
goto out_unreg_gcm;
- rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
if (rc)
goto out_unreg_gcm4106;
- rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
if (rc)
goto out_unreg_ccm;
@@ -644,17 +640,15 @@ out_unreg_s256:
nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
NX_PROPS_SHA256);
out_unreg_ccm4309:
- nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
out_unreg_ccm:
- nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
out_unreg_gcm4106:
nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-out_unreg_ctr:
- nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
@@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
}
/* entry points from the crypto tfm initializers */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
{
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct nx_ccm_rctx));
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+ return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM);
}
@@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
nx_unregister_shash(&nx_shash_sha256_alg,
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
- nx_unregister_alg(&nx_ccm4309_aes_alg,
- NX_FC_AES, NX_MODE_AES_CCM);
- nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm4309_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
nx_unregister_aead(&nx_gcm4106_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_alg(&nx_ctr3686_aes_alg,
NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index cdff03a42ae7..9347878d4f30 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -149,8 +149,10 @@ struct nx_crypto_ctx {
} priv;
};
+struct crypto_aead;
+
/* prototypes */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
@@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg;
extern struct crypto_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr_aes_alg;
extern struct crypto_alg nx_ctr3686_aes_alg;
-extern struct crypto_alg nx_ccm_aes_alg;
-extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
extern struct shash_alg nx_shash_sha512_alg;
extern struct shash_alg nx_shash_sha256_alg;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9a28b7e07c71..eba23147c0ee 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -52,29 +52,30 @@
#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
-#define AES_REG_CTRL_CTR (1 << 6)
-#define AES_REG_CTRL_CBC (1 << 5)
-#define AES_REG_CTRL_KEY_SIZE (3 << 3)
-#define AES_REG_CTRL_DIRECTION (1 << 2)
-#define AES_REG_CTRL_INPUT_READY (1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
+#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 0
+#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
+#define AES_REG_CTRL_CTR BIT(6)
+#define AES_REG_CTRL_CBC BIT(5)
+#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION BIT(2)
+#define AES_REG_CTRL_INPUT_READY BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY BIT(0)
+#define AES_REG_CTRL_MASK GENMASK(24, 2)
#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE (1 << 6)
-#define AES_REG_MASK_START (1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
-#define AES_REG_MASK_DMA_IN_EN (1 << 2)
-#define AES_REG_MASK_SOFTRESET (1 << 1)
-#define AES_REG_AUTOIDLE (1 << 0)
+#define AES_REG_MASK_SIDLE BIT(6)
+#define AES_REG_MASK_START BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN BIT(3)
+#define AES_REG_MASK_DMA_IN_EN BIT(2)
+#define AES_REG_MASK_SOFTRESET BIT(1)
+#define AES_REG_AUTOIDLE BIT(0)
#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
- u32 val, mask = 0;
+ u32 val;
err = omap_aes_hw_init(dd);
if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
- if (dd->flags & FLAGS_CTR) {
+ if (dd->flags & FLAGS_CTR)
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
- mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
- }
+
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
- mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
- AES_REG_CTRL_KEY_SIZE;
-
- omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
return 0;
}
@@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
int len = 0;
+ if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+ return -EINVAL;
+
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
return -1;
@@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
- int pages;
+ int pages, total;
- pages = get_order(dd->total);
+ total = ALIGN(dd->total, AES_BLOCK_SIZE);
+ pages = get_order(total);
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
sg_init_table(&dd->in_sgl, 1);
- sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+ sg_set_buf(&dd->in_sgl, buf_in, total);
dd->in_sg = &dd->in_sgl;
sg_init_table(&dd->out_sgl, 1);
- sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+ sg_set_buf(&dd->out_sgl, buf_out, total);
dd->out_sg = &dd->out_sgl;
return 0;
@@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
unsigned long flags;
- int err, ret = 0;
+ int err, ret = 0, len;
spin_lock_irqsave(&dd->lock, flags);
if (req)
@@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->sgs_copied = 0;
}
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+ len = ALIGN(dd->total, AES_BLOCK_SIZE);
+ dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+ dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
rctx = ablkcipher_request_ctx(req);
@@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
void *buf_in, *buf_out;
- int pages;
+ int pages, len;
pr_debug("enter done_task\n");
@@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data)
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
- pages = get_order(dd->total_save);
+ len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+ pages = get_order(len);
free_pages((unsigned long)buf_in, pages);
free_pages((unsigned long)buf_out, pages);
}
@@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
- }
-
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
@@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = {
{
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
}
}
- dd->total -= AES_BLOCK_SIZE;
-
- BUG_ON(dd->total < 0);
+ dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
/* Clear IRQ status */
status &= ~AES_REG_IRQ_DATA_OUT;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4f56f3681abd..da36de26a4dc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -99,11 +99,16 @@ struct spacc_req {
dma_addr_t src_addr, dst_addr;
struct spacc_ddt *src_ddt, *dst_ddt;
void (*complete)(struct spacc_req *req);
+};
- /* AEAD specific bits. */
- u8 *giv;
- size_t giv_len;
- dma_addr_t giv_pa;
+struct spacc_aead {
+ unsigned long ctrl_default;
+ unsigned long type;
+ struct aead_alg alg;
+ struct spacc_engine *engine;
+ struct list_head entry;
+ int key_offs;
+ int iv_offs;
};
struct spacc_engine {
@@ -121,6 +126,9 @@ struct spacc_engine {
struct spacc_alg *algs;
unsigned num_algs;
struct list_head registered_algs;
+ struct spacc_aead *aeads;
+ unsigned num_aeads;
+ struct list_head registered_aeads;
size_t cipher_pg_sz;
size_t hash_pg_sz;
const char *name;
@@ -174,8 +182,6 @@ struct spacc_aead_ctx {
u8 cipher_key_len;
u8 hash_key_len;
struct crypto_aead *sw_cipher;
- size_t auth_size;
- u8 salt[AES_BLOCK_SIZE];
};
static int spacc_ablk_submit(struct spacc_req *req);
@@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
}
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+ return container_of(alg, struct spacc_aead, alg);
+}
+
static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
{
u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
@@ -310,120 +321,117 @@ out:
return NULL;
}
-static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+static int spacc_aead_make_ddts(struct aead_request *areq)
{
- struct aead_request *areq = container_of(req->req, struct aead_request,
- base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct spacc_req *req = aead_request_ctx(areq);
struct spacc_engine *engine = req->engine;
struct spacc_ddt *src_ddt, *dst_ddt;
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
- unsigned nents = sg_count(areq->src, areq->cryptlen);
unsigned total;
- dma_addr_t iv_addr;
+ unsigned int src_nents, dst_nents;
struct scatterlist *cur;
- int i, dst_ents, src_ents, assoc_ents;
- u8 *iv = giv ? giv : areq->iv;
+ int i, dst_ents, src_ents;
+
+ total = areq->assoclen + areq->cryptlen;
+ if (req->is_encrypt)
+ total += crypto_aead_authsize(aead);
+
+ src_nents = sg_count(areq->src, total);
+ if (src_nents + 1 > MAX_DDT_LEN)
+ return -E2BIG;
+
+ dst_nents = 0;
+ if (areq->src != areq->dst) {
+ dst_nents = sg_count(areq->dst, total);
+ if (src_nents + 1 > MAX_DDT_LEN)
+ return -E2BIG;
+ }
src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
if (!src_ddt)
- return -ENOMEM;
+ goto err;
dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
- if (!dst_ddt) {
- dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
- return -ENOMEM;
- }
+ if (!dst_ddt)
+ goto err_free_src;
req->src_ddt = src_ddt;
req->dst_ddt = dst_ddt;
- assoc_ents = dma_map_sg(engine->dev, areq->assoc,
- sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
- if (areq->src != areq->dst) {
- src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ if (dst_nents) {
+ src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
DMA_TO_DEVICE);
- dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+ if (!src_ents)
+ goto err_free_dst;
+
+ dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
DMA_FROM_DEVICE);
+
+ if (!dst_ents) {
+ dma_unmap_sg(engine->dev, areq->src, src_nents,
+ DMA_TO_DEVICE);
+ goto err_free_dst;
+ }
} else {
- src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
DMA_BIDIRECTIONAL);
- dst_ents = 0;
+ if (!src_ents)
+ goto err_free_dst;
+ dst_ents = src_ents;
}
/*
- * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
- * formed by the crypto block and sent as the ESP IV for IPSEC.
+ * Now map in the payload for the source and destination and terminate
+ * with the NULL pointers.
*/
- iv_addr = dma_map_single(engine->dev, iv, ivsize,
- giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- req->giv_pa = iv_addr;
+ for_each_sg(areq->src, cur, src_ents, i)
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
- /*
- * Map the associated data. For decryption we don't copy the
- * associated data.
- */
- total = areq->assoclen;
- for_each_sg(areq->assoc, cur, assoc_ents, i) {
+ /* For decryption we need to skip the associated data. */
+ total = req->is_encrypt ? 0 : areq->assoclen;
+ for_each_sg(areq->dst, cur, dst_ents, i) {
unsigned len = sg_dma_len(cur);
- if (len > total)
- len = total;
-
- total -= len;
+ if (len <= total) {
+ total -= len;
+ continue;
+ }
- ddt_set(src_ddt++, sg_dma_address(cur), len);
- if (req->is_encrypt)
- ddt_set(dst_ddt++, sg_dma_address(cur), len);
+ ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
}
- ddt_set(src_ddt++, iv_addr, ivsize);
-
- if (giv || req->is_encrypt)
- ddt_set(dst_ddt++, iv_addr, ivsize);
-
- /*
- * Now map in the payload for the source and destination and terminate
- * with the NULL pointers.
- */
- for_each_sg(areq->src, cur, src_ents, i) {
- ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
- if (areq->src == areq->dst)
- ddt_set(dst_ddt++, sg_dma_address(cur),
- sg_dma_len(cur));
- }
-
- for_each_sg(areq->dst, cur, dst_ents, i)
- ddt_set(dst_ddt++, sg_dma_address(cur),
- sg_dma_len(cur));
ddt_set(src_ddt, 0, 0);
ddt_set(dst_ddt, 0, 0);
return 0;
+
+err_free_dst:
+ dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+ dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+ return -ENOMEM;
}
static void spacc_aead_free_ddts(struct spacc_req *req)
{
struct aead_request *areq = container_of(req->req, struct aead_request,
base);
- struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
- struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ unsigned total = areq->assoclen + areq->cryptlen +
+ (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+ struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
struct spacc_engine *engine = aead_ctx->generic.engine;
- unsigned ivsize = alg->alg.cra_aead.ivsize;
- unsigned nents = sg_count(areq->src, areq->cryptlen);
+ unsigned nents = sg_count(areq->src, total);
if (areq->src != areq->dst) {
dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
dma_unmap_sg(engine->dev, areq->dst,
- sg_count(areq->dst, areq->cryptlen),
+ sg_count(areq->dst, total),
DMA_FROM_DEVICE);
} else
dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
- dma_unmap_sg(engine->dev, areq->assoc,
- sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-
- dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
-
dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
}
@@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
}
-/*
- * Set key for a DES operation in an AEAD cipher. This also performs weak key
- * checking if required.
- */
-static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 tmp[DES_EXPKEY_WORDS];
-
- if (unlikely(!des_ekey(tmp, key)) &&
- (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
-
- memcpy(ctx->cipher_key, key, len);
- ctx->cipher_key_len = len;
-
- return 0;
-}
-
-/* Set the key for the AES block cipher component of the AEAD transform. */
-static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
- /*
- * IPSec engine only supports 128 and 256 bit AES keys. If we get a
- * request for any other size (192 bits) then we need to do a software
- * fallback.
- */
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
- /*
- * Set the fallback transform to use the same request flags as
- * the hardware transform.
- */
- ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->sw_cipher->base.crt_flags |=
- tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- return crypto_aead_setkey(ctx->sw_cipher, key, len);
- }
-
- memcpy(ctx->cipher_key, key, len);
- ctx->cipher_key_len = len;
-
- return 0;
-}
-
static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
struct crypto_authenc_keys keys;
- int err = -EINVAL;
+ int err;
+
+ crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
@@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
- if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES)
- err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
- else
- err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
-
- if (err)
- goto badkey;
+ memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+ ctx->cipher_key_len = keys.enckeylen;
memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
ctx->hash_key_len = keys.authkeylen;
@@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
{
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
- ctx->auth_size = authsize;
-
- return 0;
+ return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
}
/*
@@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
* be completed in hardware because the hardware may not support certain key
* sizes. In these cases we need to complete the request in software.
*/
-static int spacc_aead_need_fallback(struct spacc_req *req)
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
{
- struct aead_request *aead_req;
- struct crypto_tfm *tfm = req->req->tfm;
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
- aead_req = container_of(req->req, struct aead_request, base);
/*
* If we have a non-supported key-length, then we need to do a
* software fallback.
@@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
{
struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
- int err;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (ctx->sw_cipher) {
- /*
- * Change the request to use the software fallback transform,
- * and once the ciphering has completed, put the old transform
- * back into the request.
- */
- aead_request_set_tfm(req, ctx->sw_cipher);
- err = is_encrypt ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
- aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
- } else
- err = -EINVAL;
+ aead_request_set_tfm(subreq, ctx->sw_cipher);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
- return err;
+ return is_encrypt ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
}
static void spacc_aead_complete(struct spacc_req *req)
@@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req)
static int spacc_aead_submit(struct spacc_req *req)
{
- struct crypto_tfm *tfm = req->req->tfm;
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl, proc_len, assoc_len;
struct aead_request *aead_req =
container_of(req->req, struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl, proc_len, assoc_len;
req->result = -EINPROGRESS;
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
- ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+ ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
ctx->hash_ctx, ctx->hash_key_len);
/* Set the source and destination DDT pointers. */
@@ -617,25 +568,15 @@ static int spacc_aead_submit(struct spacc_req *req)
proc_len = aead_req->cryptlen + assoc_len;
/*
- * If we aren't generating an IV, then we need to include the IV in the
- * associated data so that it is included in the hash.
- */
- if (!req->giv) {
- assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
- proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
- } else
- proc_len += req->giv_len;
-
- /*
* If we are decrypting, we need to take the length of the ICV out of
* the processing length.
*/
if (!req->is_encrypt)
- proc_len -= ctx->auth_size;
+ proc_len -= authsize;
writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
- writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+ writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
@@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine)
/*
* Setup an AEAD request for processing. This will configure the engine, load
* the context and then start the packet processing.
- *
- * @giv Pointer to destination address for a generated IV. If the
- * request does not need to generate an IV then this should be set to NULL.
*/
-static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+static int spacc_aead_setup(struct aead_request *req,
unsigned alg_type, bool is_encrypt)
{
- struct crypto_alg *alg = req->base.tfm->__crt_alg;
- struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_engine *engine = to_spacc_aead(alg)->engine;
struct spacc_req *dev_req = aead_request_ctx(req);
- int err = -EINPROGRESS;
+ int err;
unsigned long flags;
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- dev_req->giv = giv;
- dev_req->giv_len = ivsize;
dev_req->req = &req->base;
dev_req->is_encrypt = is_encrypt;
dev_req->result = -EBUSY;
dev_req->engine = engine;
dev_req->complete = spacc_aead_complete;
- if (unlikely(spacc_aead_need_fallback(dev_req)))
+ if (unlikely(spacc_aead_need_fallback(req) ||
+ ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
return spacc_aead_do_fallback(req, alg_type, is_encrypt);
- spacc_aead_make_ddts(dev_req, dev_req->giv);
+ if (err)
+ goto out;
err = -EINPROGRESS;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -728,70 +666,44 @@ out:
static int spacc_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
- return spacc_aead_setup(req, NULL, alg->type, 1);
-}
-
-static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- size_t ivsize = crypto_aead_ivsize(tfm);
- struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
- unsigned len;
- __be64 seq;
-
- memcpy(req->areq.iv, ctx->salt, ivsize);
- len = ivsize;
- if (ivsize > sizeof(u64)) {
- memset(req->giv, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + ivsize - len, &seq, len);
-
- return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+ return spacc_aead_setup(req, alg->type, 1);
}
static int spacc_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
- return spacc_aead_setup(req, NULL, alg->type, 0);
+ return spacc_aead_setup(req, alg->type, 0);
}
/*
* Initialise a new AEAD context. This is responsible for allocating the
* fallback cipher and initialising the context.
*/
-static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
struct spacc_engine *engine = spacc_alg->engine;
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
- ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher)) {
- dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->cra_name);
- ctx->sw_cipher = NULL;
- }
+ if (IS_ERR(ctx->sw_cipher))
+ return PTR_ERR(ctx->sw_cipher);
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- get_random_bytes(ctx->salt, sizeof(ctx->salt));
-
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct spacc_req));
+ crypto_aead_set_reqsize(
+ tfm,
+ max(sizeof(struct spacc_req),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher)));
return 0;
}
@@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm)
* Destructor for an AEAD context. This is called when the transform is freed
* and must free the fallback cipher.
*/
-static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- if (ctx->sw_cipher)
- crypto_free_aead(ctx->sw_cipher);
- ctx->sw_cipher = NULL;
+ crypto_free_aead(ctx->sw_cipher);
}
/*
@@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_exit = spacc_ablk_cra_exit,
},
},
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
{
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA |
+ SPA_CTRL_HASH_MODE_HMAC,
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
SPA_CTRL_HASH_ALG_SHA256 |
SPA_CTRL_HASH_MODE_HMAC,
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
SPA_CTRL_HASH_ALG_SHA256 |
SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
};
@@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev)
engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
engine->algs = ipsec_engine_algs;
engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
+ engine->aeads = ipsec_engine_aeads;
+ engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
@@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev)
engine->algs[i].alg.cra_name);
}
+ INIT_LIST_HEAD(&engine->registered_aeads);
+ for (i = 0; i < engine->num_aeads; ++i) {
+ engine->aeads[i].engine = engine;
+ err = crypto_register_aead(&engine->aeads[i].alg);
+ if (!err) {
+ list_add_tail(&engine->aeads[i].entry,
+ &engine->registered_aeads);
+ ret = 0;
+ }
+ if (err)
+ dev_err(engine->dev, "failed to register alg \"%s\"\n",
+ engine->aeads[i].alg.base.cra_name);
+ else
+ dev_dbg(engine->dev, "registered alg \"%s\"\n",
+ engine->aeads[i].alg.base.cra_name);
+ }
+
return ret;
}
static int spacc_remove(struct platform_device *pdev)
{
+ struct spacc_aead *aead, *an;
struct spacc_alg *alg, *next;
struct spacc_engine *engine = platform_get_drvdata(pdev);
del_timer_sync(&engine->packet_timeout);
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+ list_del(&aead->entry);
+ crypto_unregister_aead(&aead->alg);
+ }
+
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
list_del(&alg->entry);
crypto_unregister_alg(&alg->alg);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6fdb9e8b22a7..eefccf7b8be7 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_AKCIPHER
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select FW_LOADER
+ select ASN1
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
@@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
To compile this as a module, choose M here: the module
will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+ tristate "Support for Intel(R) DH895xCC Virtual Function"
+ depends on X86 && PCI
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xccvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index d11481be225e..a3ce0b70e32f 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644
index 000000000000..ee328374dba8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/.gitignore
@@ -0,0 +1 @@
+*-asn1.[ch]
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index e0424dc382fe..df20a9de1c58 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,3 +1,6 @@
+$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
+clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
+
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_ctl_drv.o \
@@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \
adf_accel_engine.o \
adf_aer.o \
adf_transport.o \
+ adf_admin.o \
+ adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
+ qat_rsakey-asn1.o \
+ qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5fe902967620..ca853d50b4b7 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -46,13 +46,17 @@
*/
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -79,6 +83,7 @@ struct adf_bar {
struct adf_accel_msix {
struct msix_entry *entries;
char **names;
+ u32 num_entries;
} __packed;
struct adf_accel_pci {
@@ -99,6 +104,7 @@ enum dev_sku_info {
DEV_SKU_2,
DEV_SKU_3,
DEV_SKU_4,
+ DEV_SKU_VF,
DEV_SKU_UNKNOWN,
};
@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
return "SKU3";
case DEV_SKU_4:
return "SKU4";
+ case DEV_SKU_VF:
+ return "SKUVF";
case DEV_SKU_UNKNOWN:
default:
break;
@@ -135,23 +143,29 @@ struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
uint32_t (*get_accel_mask)(uint32_t fuse);
uint32_t (*get_ae_mask)(uint32_t fuse);
+ uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+ uint32_t (*get_pf2vf_offset)(uint32_t i);
+ uint32_t (*get_vintmsk_offset)(uint32_t i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
- void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
- void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+ const uint32_t **cfg);
+ void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
const char *fw_name;
- uint32_t pci_dev_id;
+ const char *fw_mmp_name;
uint32_t fuses;
uint32_t accel_capabilities_mask;
uint16_t accel_mask;
@@ -163,6 +177,7 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
+ uint8_t min_iov_compat_ver;
} __packed;
/* CSR write macro */
@@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle *fw_loader;
const struct firmware *uof_fw;
+ const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+ struct adf_accel_dev *accel_dev;
+ struct tasklet_struct vf2pf_bh_tasklet;
+ struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct ratelimit_state vf2pf_ratelimit;
+ u32 vf_nr;
+ bool init;
};
struct adf_accel_dev {
@@ -199,6 +224,21 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ union {
+ struct {
+ /* vf_info is non-zero when SR-IOV is init'ed */
+ struct adf_accel_vf_info *vf_info;
+ } pf;
+ struct {
+ char *irq_name;
+ struct tasklet_struct pf2vf_bh_tasklet;
+ struct mutex vf2pf_lock; /* protect CSR access */
+ struct completion iov_msg_completion;
+ uint8_t compatible;
+ uint8_t pf_version;
+ } vf;
+ };
+ bool is_vf;
uint8_t accel_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index fdda8e7ae302..20b08bdcb146 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- void *uof_addr;
- uint32_t uof_size;
+ void *uof_addr, *mmp_addr;
+ u32 uof_size, mmp_size;
+ if (!hw_device->fw_name)
+ return 0;
+
+ if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+ hw_device->fw_mmp_name);
+ return -EFAULT;
+ }
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
- dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
hw_device->fw_name);
- return -EFAULT;
+ goto out_err;
}
uof_size = loader_data->uof_fw->size;
uof_addr = (void *)loader_data->uof_fw->data;
+ mmp_size = loader_data->mmp_fw->size;
+ mmp_addr = (void *)loader_data->mmp_fw->data;
+ qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
- dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
goto out_err;
}
return 0;
@@ -85,11 +97,17 @@ out_err:
void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return;
qat_uclo_del_uof_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
release_firmware(loader_data->uof_fw);
+ release_firmware(loader_data->mmp_fw);
loader_data->uof_fw = NULL;
+ loader_data->mmp_fw = NULL;
loader_data->fw_loader = NULL;
}
@@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
int adf_ae_init(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
if (!loader_data)
@@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
qat_hal_deinit(loader_data->fw_loader);
kfree(accel_dev->fw_loader);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 000000000000..147d755fed97
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,290 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+ dma_addr_t phy_addr;
+ dma_addr_t const_tbl_addr;
+ void *virt_addr;
+ void __iomem *mailbox_addr;
+ struct mutex lock; /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+ void *in, void *out)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+ int offset = ae * ADF_ADMINMSG_LEN * 2;
+ void __iomem *mailbox = admin->mailbox_addr;
+ int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+ int times, received;
+
+ mutex_lock(&admin->lock);
+
+ if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+ mutex_unlock(&admin->lock);
+ return -EAGAIN;
+ }
+
+ memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+ ADF_CSR_WR(mailbox, mb_offset, 1);
+ received = 0;
+ for (times = 0; times < 50; times++) {
+ msleep(20);
+ if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+ received = 1;
+ break;
+ }
+ }
+ if (received)
+ memcpy(out, admin->virt_addr + offset +
+ ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+ else
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg to accelerator\n");
+
+ mutex_unlock(&admin->lock);
+ return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ int i;
+
+ memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+ req.init_admin_cmd_id = cmd;
+
+ if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+ req.init_cfg_sz = 1024;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+ }
+ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+ memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+ if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+ resp.init_resp_hdr.status)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+ int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+ if (ret)
+ return ret;
+ return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *csr = pmisc->virt_addr;
+ void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+ u64 reg_val;
+
+ admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!admin)
+ return -ENOMEM;
+ admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
+ if (!admin->virt_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+ kfree(admin);
+ return -ENOMEM;
+ }
+
+ admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+ (void *) const_tab, 1024,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ admin->const_tbl_addr))) {
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+ kfree(admin);
+ return -ENOMEM;
+ }
+ reg_val = (u64)admin->phy_addr;
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+ mutex_init(&admin->lock);
+ admin->mailbox_addr = mailbox;
+ accel_dev->admin = admin;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+
+ if (!admin)
+ return;
+
+ if (admin->virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+
+ dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+ DMA_TO_DEVICE);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+ accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 2dbc733b8ab2..a57b4194de28 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
+ if (!parent)
+ parent = pdev;
+
if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
@@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = {
* QAT acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
{
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index ab65bc274561..d0879790561f 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
* The table stores device specific config values.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
{
@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+ if (!dev_cfg_data)
+ return;
+
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
* in the given acceleration device
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
@@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
* will be stored.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
{
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 88b82187ac35..c697fb1cdfb5 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -60,7 +60,7 @@
#define ADF_CFG_NO_DEVICE 0xFF
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
#define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
enum adf_cfg_val_type {
ADF_DEC,
@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
enum adf_device_type {
DEV_UNKNOWN = 0,
DEV_DH895XCC,
+ DEV_DH895XCCVF,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 27e16c09230b..7836dffc3d47 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -54,8 +54,8 @@
#include "icp_qat_hal.h"
#define ADF_MAJOR_VERSION 0
-#define ADF_MINOR_VERSION 1
-#define ADF_BUILD_VERSION 3
+#define ADF_MINOR_VERSION 2
+#define ADF_BUILD_VERSION 0
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
__stringify(ADF_MINOR_VERSION) "." \
__stringify(ADF_BUILD_VERSION)
@@ -91,9 +91,13 @@ struct service_hndl {
unsigned long start_status;
char *name;
struct list_head list;
- int admin;
};
+static inline int get_current_node(void)
+{
+ return topology_physical_package_id(smp_processor_id());
+}
+
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
@@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void);
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -141,10 +162,13 @@ int qat_crypto_unregister(void);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
int qat_algs_init(void);
void qat_algs_exit(void);
int qat_algs_register(void);
int qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e056b9e9bf8a..cd8a12af8ec5 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
}
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
- if (!accel_dev) {
- pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+ if (!accel_dev)
return -ENODEV;
- }
+
hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_aer();
qat_crypto_unregister();
qat_algs_exit();
+ adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 3f0ff9e7d840..8dfdb8f90797 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -50,21 +50,125 @@
#include "adf_common_drv.h"
static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static uint32_t num_devices;
+struct vf_id_map {
+ u32 bdf;
+ u32 id;
+ u32 fake_id;
+ bool attached;
+ struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+ return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+ PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+ (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+ return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+
+ if (ptr->bdf == bdf)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->fake_id == fake)
+ return ptr->id;
+ }
+ return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ * for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+ struct vf_id_map *map;
+ struct list_head *ptr, *tmp;
+
+ mutex_lock(&table_lock);
+ list_for_each_safe(ptr, tmp, &vfs_table) {
+ map = list_entry(ptr, struct vf_id_map, list);
+ if (map->bdf != -1)
+ num_devices--;
+
+ if (vf && map->bdf == -1)
+ continue;
+
+ list_del(ptr);
+ kfree(map);
+ }
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data: Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+ struct adf_hw_device_class *class = hw_data->dev_class;
+ struct list_head *itr;
+ int i = 0;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->hw_device->dev_class == class)
+ ptr->hw_device->instance_id = i++;
+
+ if (i == class->instances)
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
struct list_head *itr;
+ int ret = 0;
if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
}
mutex_lock(&table_lock);
- list_for_each(itr, &accel_table) {
- struct adf_accel_dev *ptr =
+ atomic_set(&accel_dev->ref_count, 0);
+
+ /* PF on host or VF on guest */
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ struct vf_id_map *map;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
- if (ptr == accel_dev) {
- mutex_unlock(&table_lock);
- return -EEXIST;
+ if (ptr == accel_dev) {
+ ret = -EEXIST;
+ goto unlock;
+ }
}
+
+ list_add_tail(&accel_dev->list, &accel_table);
+ accel_dev->accel_id = num_devices++;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ map->bdf = ~0;
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
+ } else if (accel_dev->is_vf && pf) {
+ /* VF on host */
+ struct adf_accel_vf_info *vf_info;
+ struct vf_id_map *map;
+
+ vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (map) {
+ struct vf_id_map *next;
+
+ accel_dev->accel_id = map->id;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->fake_id++;
+ map->attached = true;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id++;
+ next = list_next_entry(next, list);
+ }
+
+ ret = 0;
+ goto unlock;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ accel_dev->accel_id = num_devices++;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->bdf = adf_get_vf_num(accel_dev);
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
}
- atomic_set(&accel_dev->ref_count, 0);
- list_add_tail(&accel_dev->list, &accel_table);
- accel_dev->accel_id = num_devices++;
+unlock:
mutex_unlock(&table_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
/**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: void
*/
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
mutex_lock(&table_lock);
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ num_devices--;
+ } else if (accel_dev->is_vf && pf) {
+ struct vf_id_map *map, *next;
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (!map) {
+ dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+ goto unlock;
+ }
+ map->fake_id--;
+ map->attached = false;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id--;
+ next = list_next_entry(next, list);
+ }
+ }
+unlock:
list_del(&accel_dev->list);
- num_devices--;
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
+ int real_id;
mutex_lock(&table_lock);
+ real_id = adf_get_vf_real_id(id);
+ if (real_id < 0)
+ goto unlock;
+
+ id = real_id;
+
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
-
if (ptr->accel_id == id) {
mutex_unlock(&table_lock);
return ptr;
}
}
+unlock:
mutex_unlock(&table_lock);
return NULL;
}
@@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
return -ENODEV;
}
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
+ int vfs = 0;
- *num = 0;
- list_for_each(itr, &accel_table) {
- (*num)++;
+ mutex_lock(&table_lock);
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->bdf != ~0 && !ptr->attached)
+ vfs++;
}
+ mutex_unlock(&table_lock);
+ return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+ *num = num_devices - adf_get_num_dettached_vfs();
}
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{
return atomic_read(&accel_dev->ref_count) != 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
int adf_dev_get(struct adf_accel_dev *accel_dev)
{
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
return -EFAULT;
return 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
void adf_dev_put(struct adf_accel_dev *accel_dev)
{
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner);
}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
int adf_dev_started(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 1864bdb36f8f..6849422e04bb 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -44,9 +44,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
#define ADF_ARB_REQ_RING_NUM 8
@@ -58,7 +57,6 @@
#define ADF_ARB_RO_EN_OFFSET 0x090
#define ADF_ARB_WQCFG_OFFSET 0x100
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
@@ -89,10 +87,11 @@
int adf_init_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
- uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
- uint32_t arb, i;
- const uint32_t *thd_2_arb_cfg;
+ u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+ u32 arb, i;
+ const u32 *thd_2_arb_cfg;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
@@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
/* Setup worker queue registers */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
/* Map worker threads to service arbiters */
- adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+ hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return -EFAULT;
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
return 0;
}
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev: Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
ring->bank->ring_mask & 0xFF);
}
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
void adf_exit_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr;
unsigned int i;
@@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
/* Shutdown work queue */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, 0);
/* Unmap worker threads to service arbiters */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 245f43237a2d..ac37a89965ac 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
* Function adds the acceleration service to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_register(struct service_hndl *service)
{
@@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
* Function remove the acceleration service from the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_unregister(struct service_hndl *service)
{
@@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
* Initialize the ring data structures and the admin comms and arbitration
* services.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_init(struct adf_accel_dev *accel_dev)
{
@@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
*/
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to initialise service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise service %s\n",
@@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
+ hw_data->enable_vf2pf_comms(accel_dev);
return 0;
}
@@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
* is ready to be used.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_start(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
@@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_START)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to start service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->start_status);
+ if (hw_data->send_admin_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+ return -EFAULT;
}
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
"Failed to start service %s\n",
@@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_register()) {
+ if (!list_empty(&accel_dev->crypto_list) &&
+ (qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_stop(struct adf_accel_dev *accel_dev)
{
@@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_unregister())
+ if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
dev_err(&GET_DEV(accel_dev),
"Failed to unregister crypto algs\n");
+ if (!list_empty(&accel_dev->crypto_list))
+ qat_asym_algs_unregister();
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->start_status))
continue;
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->start_status);
}
}
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->start_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_STOP))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->start_status);
- }
if (wait)
msleep(100);
@@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->init_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
+ hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
@@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 000000000000..5fdbad809343
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,438 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET 0x3A000
+#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+ reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+ reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+ ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+ ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ u32 val, pf2vf_offset, count = 0;
+ u32 local_in_use_mask, local_in_use_pattern;
+ u32 remote_in_use_mask, remote_in_use_pattern;
+ struct mutex *lock; /* lock preventing concurrent acces of CSR */
+ u32 int_bit;
+ int ret = 0;
+
+ if (accel_dev->is_vf) {
+ pf2vf_offset = hw_data->get_pf2vf_offset(0);
+ lock = &accel_dev->vf.vf2pf_lock;
+ local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ int_bit = ADF_VF2PF_INT;
+ } else {
+ pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+ lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+ local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ int_bit = ADF_PF2VF_INT;
+ }
+
+ mutex_lock(lock);
+
+ /* Check if PF2VF CSR is in use by remote function */
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote function\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Attempt to get ownership of PF2VF CSR */
+ msg &= ~local_in_use_mask;
+ msg |= local_in_use_pattern;
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+ /* Wait in case remote func also attempting to get ownership */
+ msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & local_in_use_mask) != local_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote - collision detected\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
+ * remain in the PF2VF CSR for all writes including ACK from remote
+ * until this local function relinquishes the CSR. Send the message
+ * by interrupting the remote.
+ */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ do {
+ msleep(ADF_IOV_MSG_ACK_DELAY);
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+ if (val & int_bit) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ val &= ~int_bit;
+ ret = -EIO;
+ }
+
+ /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+ mutex_unlock(lock);
+ return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Message to send
+ * @vf_nr: VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ u32 count = 0;
+ int ret;
+
+ do {
+ ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+ if (ret)
+ msleep(ADF_IOV_MSG_RETRY_DELAY);
+ } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+ /* Read message from the VF */
+ msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+ /* To ACK, clear the VF2PFINT bit */
+ msg &= ~ADF_VF2PF_INT;
+ ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+ if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) VF2PF messages */
+ goto err;
+
+ switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Compatibility Version Request from VF%d vers=%u\n",
+ vf_nr + 1, vf_compat_ver);
+
+ if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) incompatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) compat with PF (vers %d) unkn.\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else {
+ dev_dbg(&GET_DEV(accel_dev),
+ "VF (vers %d) compatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ }
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ /* Set legacy major and minor version num */
+ resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+ 1 << ADF_PF2VF_MINORVERSION_SHIFT;
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = false;
+ }
+ break;
+ default:
+ goto err;
+ }
+
+ if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+ dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+ return;
+err:
+ dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+ vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 msg = 0;
+ int ret;
+
+ msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+ msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+ msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+ BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+ /* Send request from VF to PF */
+ ret = adf_iov_putmsg(accel_dev, msg, 0);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ /* Wait for response */
+ if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+ timeout)) {
+ dev_err(&GET_DEV(accel_dev),
+ "IOV request/response message timeout expired\n");
+ return -EIO;
+ }
+
+ /* Response from PF received, check compatibility */
+ switch (accel_dev->vf.compatible) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF and decides whether it is compatible */
+ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+ break;
+ /* fall through */
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ accel_dev->vf.pf_version,
+ ADF_PFVF_COMPATIBILITY_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 000000000000..5acd531a11ff
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT 2
+#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
+#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
+#define ADF_PF2VF_MINORVERSION_SHIFT 6
+#define ADF_PF2VF_MAJORVERSION_SHIFT 10
+#define ADF_PF2VF_VF_COMPATIBLE 1
+#define ADF_PF2VF_VF_INCOMPATIBLE 2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
+#define ADF_VF2PF_INT BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT 18
+#define ADF_VF2PF_MSGTYPE_INIT 0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_IOV_MSG_ACK_DELAY 2
+#define ADF_IOV_MSG_ACK_MAX_RETRY 100
+#define ADF_IOV_MSG_RETRY_DELAY 5
+#define ADF_IOV_MSG_MAX_RETRIES 3
+#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
+ ADF_IOV_MSG_ACK_MAX_RETRY + \
+ ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 000000000000..2f77a4a8cecb
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,309 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS 96
+
+#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS 12
+
+#define ME2FUNCTION_MAP_REG_SIZE 4
+#define ME2FUNCTION_MAP_VALID BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+ struct work_struct pf2vf_resp_work;
+ struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+ struct adf_pf2vf_resp *pf2vf_resp =
+ container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+ adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+ kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+ struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+ struct adf_pf2vf_resp *pf2vf_resp;
+
+ pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+ if (!pf2vf_resp)
+ return;
+
+ pf2vf_resp->vf_info = vf_info;
+ INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+ queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ struct adf_accel_vf_info *vf_info;
+ int i;
+ u32 reg;
+
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ if (!pf2vf_resp_wq)
+ return -ENOMEM;
+
+ for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ i++, vf_info++) {
+ /* This ptr will be populated when VFs will be created */
+ vf_info->accel_dev = accel_dev;
+ vf_info->vf_nr = i;
+
+ tasklet_init(&vf_info->vf2pf_bh_tasklet,
+ (void *)adf_vf2pf_bh_handler,
+ (unsigned long)vf_info);
+ mutex_init(&vf_info->pf2vf_lock);
+ ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ /* Enable VF to PF interrupts for all VFs */
+ adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+ /*
+ * Due to the hardware design, when SR-IOV and the ring arbiter
+ * are enabled all the VFs supported in hardware must be enabled in
+ * order for all the hardware resources (i.e. bundles) to be usable.
+ * When SR-IOV is enabled, each of the VFs will own one bundle.
+ */
+ return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function disables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+ u32 reg;
+ int i;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ adf_pf2vf_notify_restarting(accel_dev);
+
+ pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+ /* Disable VF to PF interrupts */
+ adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+ tasklet_disable(&vf->vf2pf_bh_tasklet);
+ tasklet_kill(&vf->vf2pf_bh_tasklet);
+ mutex_destroy(&vf->pf2vf_lock);
+ }
+
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ unsigned long val;
+ int ret;
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -EFAULT;
+ }
+
+ if (!iommu_present(&pci_bus_type)) {
+ dev_err(&pdev->dev,
+ "IOMMU must be enabled for SR-IOV to work\n");
+ return -EINVAL;
+ }
+
+ if (accel_dev->pf.vf_info) {
+ dev_info(&pdev->dev, "Already enabled for this device\n");
+ return -EINVAL;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev) ||
+ adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Device busy\n");
+ return -EBUSY;
+ }
+
+ if (adf_dev_stop(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to stop qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ adf_dev_shutdown(accel_dev);
+ }
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ return -EFAULT;
+ val = 0;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ return -EFAULT;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ /* Allocate memory for VF info structs */
+ accel_dev->pf.vf_info = kcalloc(totalvfs,
+ sizeof(struct adf_accel_vf_info),
+ GFP_KERNEL);
+ if (!accel_dev->pf.vf_info)
+ return -ENOMEM;
+
+ if (adf_dev_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ if (adf_dev_start(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ ret = adf_enable_sriov(accel_dev);
+ if (ret)
+ return ret;
+
+ return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index db2926bff8a5..3865ae8d96d9 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
+ if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+ dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+ return -EFAULT;
+ }
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
@@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
goto err;
/* Enable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_enable(ring);
+ adf_update_ring_arb(ring);
if (adf_ring_debugfs_add(ring, ring_name)) {
dev_err(&GET_DEV(accel_dev),
@@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
err:
adf_cleanup_ring(ring);
adf_unreserve_ring(bank, ring_num);
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
return ret;
}
void adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
- struct adf_accel_dev *accel_dev = bank->accel_dev;
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
@@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
adf_cleanup_ring(ring);
}
@@ -463,7 +466,7 @@ err:
* acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 160c9a36c919..6ad7e4e1edca 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -97,8 +97,9 @@
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
- ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
SIZE) & ~0x4)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index f1e30e24a419..46747f01b1d1 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp {
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 000000000000..0d7a9b51ce9f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ u64 content_desc_addr;
+ u32 content_desc_resrvd;
+ u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 service_type;
+ u8 hdr_flags;
+ u16 comn_req_flags;
+ u16 resrvd4;
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ u8 output_param_count;
+ u8 input_param_count;
+ u16 resrvd1;
+ u32 resrvd2;
+ u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 response_type;
+ u8 hdr_flags;
+ u16 comn_resp_flags;
+ u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+ QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+ ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+ QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index df427c0e9e7b..2bd913aceaeb 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -53,7 +53,6 @@
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/rng.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -113,9 +112,6 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
- uint8_t salt[AES_BLOCK_SIZE];
- spinlock_t lock; /* protects qat_alg_aead_ctx struct */
};
struct qat_alg_ablkcipher_ctx {
@@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx {
spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
-static int get_current_node(void)
-{
- return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
{
switch (qat_hash_alg) {
@@ -278,12 +269,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
struct icp_qat_hw_auth_algo_blk *hash =
@@ -358,12 +349,12 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
return 0;
}
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -515,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg)
return 0;
}
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
const uint8_t *key, unsigned int keylen)
{
struct crypto_authenc_keys keys;
int alg;
- if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
- return -EFAULT;
-
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
if (qat_alg_validate_key(keys.enckeylen, &alg))
goto bad_key;
- if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
goto error;
- if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
goto error;
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
error:
return -EFAULT;
@@ -567,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev;
- spin_lock(&ctx->lock);
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
@@ -581,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(node);
if (!inst) {
- spin_unlock(&ctx->lock);
return -EINVAL;
}
@@ -591,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
- spin_unlock(&ctx->lock);
return -ENOMEM;
}
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
- spin_unlock(&ctx->lock);
goto out_free_enc;
}
}
- spin_unlock(&ctx->lock);
- if (qat_alg_aead_init_sessions(ctx, key, keylen))
+ if (qat_alg_aead_init_sessions(tfm, key, keylen))
goto out_free_all;
return 0;
@@ -654,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
}
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
- struct scatterlist *assoc, int assoclen,
struct scatterlist *sgl,
- struct scatterlist *sglout, uint8_t *iv,
- uint8_t ivlen,
+ struct scatterlist *sglout,
struct qat_crypto_request *qat_req)
{
struct device *dev = &GET_DEV(inst->accel_dev);
- int i, bufs = 0, sg_nctr = 0;
- int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ int i, sg_nctr = 0;
+ int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
if (unlikely(!n))
return -EINVAL;
@@ -683,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, blp)))
goto err;
- for_each_sg(assoc, sg, assoc_n, i) {
- if (!sg->length)
- continue;
-
- if (!(assoclen > 0))
- break;
-
- bufl->bufers[bufs].addr =
- dma_map_single(dev, sg_virt(sg),
- min_t(int, assoclen, sg->length),
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- assoclen -= sg->length;
- }
-
- if (ivlen) {
- bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = ivlen;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- }
-
for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -724,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
sg_nctr++;
}
- bufl->num_bufs = sg_nctr + bufs;
+ bufl->num_bufs = sg_nctr;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -734,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
n = sg_nents(sglout);
sz_out = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -744,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err;
bufers = buflout->bufers;
- /* For out of place operation dma map only data and
- * reuse assoc mapping and iv */
- for (i = 0; i < bufs; i++) {
- bufers[i].len = bufl->bufers[i].len;
- bufers[i].addr = bufl->bufers[i].addr;
- }
for_each_sg(sglout, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -764,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].len = sg->length;
sg_nctr++;
}
- buflout->num_bufs = sg_nctr + bufs;
+ buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
@@ -778,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
err:
dev_err(dev, "Failed to map buf for dma\n");
sg_nctr = 0;
- for (i = 0; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
@@ -789,7 +737,7 @@ err:
kfree(bufl);
if (sgl != sglout && buflout) {
n = sg_nents(sglout);
- for (i = bufs; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
@@ -849,12 +797,10 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+ int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
- areq->src, areq->dst, areq->iv,
- AES_BLOCK_SIZE, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -868,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+ cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen +
- cipher_param->cipher_length + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -885,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
- int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -895,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
+ uint8_t *iv = areq->iv;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
- areq->src, areq->dst, iv, AES_BLOCK_SIZE,
- qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -914,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- if (enc_iv) {
- cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
- cipher_param->cipher_offset = areq->assoclen;
- } else {
- memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
- cipher_param->cipher_length = areq->cryptlen;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
- }
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+ cipher_param->cipher_length = areq->cryptlen;
+ cipher_param->cipher_offset = areq->assoclen;
+
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -936,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
return -EINPROGRESS;
}
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
- return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- __be64 seq;
-
- memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
- &seq, sizeof(uint64_t));
- return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const uint8_t *key,
unsigned int keylen)
@@ -1026,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1064,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1092,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
- return -EFAULT;
- spin_lock_init(&ctx->lock);
+ return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request));
- ctx->tfm = tfm;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ sizeof(struct qat_crypto_request));
return 0;
}
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- if (!IS_ERR(ctx->hash_tfm))
- crypto_free_shash(ctx->hash_tfm);
+ crypto_free_shash(ctx->hash_tfm);
if (!inst)
return;
@@ -1189,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
qat_crypto_put_instance(inst);
}
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha1_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+
+static struct aead_alg qat_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
+ .init = qat_alg_aead_sha1_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
}, {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha256_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
+ .init = qat_alg_aead_sha256_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
}, {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha512_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- },
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
-}, {
+ .init = qat_alg_aead_sha512_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
.cra_name = "cbc(aes)",
.cra_driver_name = "qat_aes_cbc",
.cra_priority = 4001,
@@ -1281,42 +1183,54 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- int ret = 0;
+ int ret = 0, i;
mutex_lock(&algs_lock);
- if (++active_devs == 1) {
- int i;
+ if (++active_devs != 1)
+ goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags =
- (qat_algs[i].cra_type == &crypto_aead_type) ?
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+ qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
- }
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ if (ret)
+ goto unlock;
+
+ for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+ qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+ ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ if (ret)
+ goto unreg_algs;
+
+unlock:
mutex_unlock(&algs_lock);
return ret;
+
+unreg_algs:
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ goto unlock;
}
int qat_algs_unregister(void)
{
- int ret = 0;
-
mutex_lock(&algs_lock);
- if (--active_devs == 0)
- ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ if (--active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
mutex_unlock(&algs_lock);
- return ret;
+ return 0;
}
int qat_algs_init(void)
{
- crypto_get_default_rng();
return 0;
}
void qat_algs_exit(void)
{
- crypto_put_default_rng();
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 000000000000..e87f51023ba4
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,652 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include "qat_rsakey-asn1.h"
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+ union {
+ struct {
+ dma_addr_t m;
+ dma_addr_t e;
+ dma_addr_t n;
+ } enc;
+ struct {
+ dma_addr_t c;
+ dma_addr_t d;
+ dma_addr_t n;
+ } dec;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+ union {
+ struct {
+ dma_addr_t c;
+ } enc;
+ struct {
+ dma_addr_t m;
+ } dec;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+ char *n;
+ char *e;
+ char *d;
+ dma_addr_t dma_n;
+ dma_addr_t dma_e;
+ dma_addr_t dma_d;
+ unsigned int key_sz;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_rsa_request {
+ struct qat_rsa_input_params in;
+ struct qat_rsa_output_params out;
+ dma_addr_t phy_in;
+ dma_addr_t phy_out;
+ char *src_align;
+ struct icp_qat_fw_pke_request req;
+ struct qat_rsa_ctx *ctx;
+ int err;
+} __aligned(64);
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct akcipher_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
+ struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+ char *ptr = areq->dst;
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
+ req->in.enc.m);
+ else
+ dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx->key_sz;
+ /* Need to set the corect length of the output */
+ while (!(*ptr) && areq->dst_len) {
+ areq->dst_len--;
+ ptr++;
+ }
+
+ if (areq->dst_len != req->ctx->key_sz)
+ memmove(areq->dst, ptr, areq->dst_len);
+
+ akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+ struct icp_qat_fw_pke_resp *resp = _resp;
+
+ qat_rsa_cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_EP_512;
+ case 1024:
+ return PKE_RSA_EP_1024;
+ case 1536:
+ return PKE_RSA_EP_1536;
+ case 2048:
+ return PKE_RSA_EP_2048;
+ case 3072:
+ return PKE_RSA_EP_3072;
+ case 4096:
+ return PKE_RSA_EP_4096;
+ default:
+ return 0;
+ };
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP1_512;
+ case 1024:
+ return PKE_RSA_DP1_1024;
+ case 1536:
+ return PKE_RSA_DP1_1536;
+ case 2048:
+ return PKE_RSA_DP1_2048;
+ case 3072:
+ return PKE_RSA_DP1_3072;
+ case 4096:
+ return PKE_RSA_DP1_4096;
+ default:
+ return 0;
+ };
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->e))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.enc.e = ctx->dma_e;
+ qat_req->in.enc.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ */
+ if (req->src_len < ctx->key_sz) {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.enc.m,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ memcpy(qat_req->src_align + shift, req->src, req->src_len);
+ } else {
+ qat_req->src_align = NULL;
+ qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
+ DMA_TO_DEVICE);
+ }
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
+ DMA_FROM_DEVICE);
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ if (unlikely((!qat_req->src_align &&
+ dma_mapping_error(dev, qat_req->in.enc.m)) ||
+ dma_mapping_error(dev, qat_req->out.enc.c) ||
+ dma_mapping_error(dev, qat_req->phy_in) ||
+ dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.enc.c))
+ dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->d))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.dec.d = ctx->dma_d;
+ qat_req->in.dec.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ */
+ if (req->src_len < ctx->key_sz) {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.dec.c,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ memcpy(qat_req->src_align + shift, req->src, req->src_len);
+ } else {
+ qat_req->src_align = NULL;
+ qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
+ DMA_TO_DEVICE);
+ }
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
+ DMA_FROM_DEVICE);
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ if (unlikely((!qat_req->src_align &&
+ dma_mapping_error(dev, qat_req->in.dec.c)) ||
+ dma_mapping_error(dev, qat_req->out.dec.m) ||
+ dma_mapping_error(dev, qat_req->phy_in) ||
+ dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.dec.m))
+ dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ctx->key_sz = vlen;
+ ret = -EINVAL;
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+ /* invalid key size provided */
+ if (!qat_rsa_enc_fn_id(ctx->key_sz))
+ goto err;
+
+ ret = -ENOMEM;
+ ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->n, ptr, ctx->key_sz);
+ return 0;
+err:
+ ctx->key_sz = 0;
+ ctx->n = NULL;
+ return ret;
+}
+
+int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->e = NULL;
+ return -EINVAL;
+ }
+
+ ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ if (!ctx->e) {
+ ctx->e = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+}
+
+int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ret = -EINVAL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ goto err;
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (vlen != 256 && vlen != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+
+ ret = -ENOMEM;
+ ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+err:
+ ctx->d = NULL;
+ return ret;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ int ret;
+
+ /* Free the old key if any */
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+
+ ctx->n = NULL;
+ ctx->e = NULL;
+ ctx->d = NULL;
+ ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ if (!ctx->n || !ctx->e) {
+ /* invalid key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+free:
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ ctx->d = NULL;
+ }
+ if (ctx->e) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ ctx->e = NULL;
+ }
+ if (ctx->n) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ ctx->n = NULL;
+ ctx->key_sz = 0;
+ }
+ return ret;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->key_sz = 0;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+ qat_crypto_put_instance(ctx->inst);
+ ctx->n = NULL;
+ ctx->d = NULL;
+ ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+ .encrypt = qat_rsa_enc,
+ .decrypt = qat_rsa_dec,
+ .sign = qat_rsa_dec,
+ .verify = qat_rsa_enc,
+ .setkey = qat_rsa_setkey,
+ .init = qat_rsa_init_tfm,
+ .exit = qat_rsa_exit_tfm,
+ .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "qat-rsa",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+ },
+};
+
+int qat_asym_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ }
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ crypto_unregister_akcipher(&rsa);
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3bd705ca5973..07c2f9f9d1fc 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
if (inst->pke_rx)
adf_remove_ring(inst->pke_rx);
- if (inst->rnd_tx)
- adf_remove_ring(inst->rnd_tx);
-
- if (inst->rnd_rx)
- adf_remove_ring(inst->rnd_rx);
-
list_del(list_ptr);
kfree(inst);
}
@@ -109,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
+
if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0) &&
- adf_dev_started(accel_dev))
+ adf_dev_started(accel_dev) &&
+ !list_empty(&accel_dev->crypto_list))
break;
accel_dev = NULL;
}
@@ -158,7 +154,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
INIT_LIST_HEAD(&accel_dev->crypto_list);
strlcpy(key, ADF_NUM_CY, sizeof(key));
-
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
return -EFAULT;
@@ -187,7 +182,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
if (kstrtoul(val, 10, &num_msg_sym))
goto err;
+
num_msg_sym = num_msg_sym >> 1;
+
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
goto err;
@@ -202,11 +199,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
msg_size, key, NULL, 0, &inst->sym_tx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, NULL, 0, &inst->rnd_tx))
- goto err;
-
msg_size = msg_size >> 1;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
@@ -220,15 +212,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->sym_rx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
- &inst->rnd_rx))
- goto err;
-
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
+ msg_size, key, qat_alg_asym_callback, 0,
&inst->pke_rx))
goto err;
}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index d503007b49e6..dc0273fe3620 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -57,8 +57,6 @@ struct qat_crypto_instance {
struct adf_etr_ring_data *sym_rx;
struct adf_etr_ring_data *pke_tx;
struct adf_etr_ring_data *pke_rx;
- struct adf_etr_ring_data *rnd_tx;
- struct adf_etr_ring_data *rnd_rx;
struct adf_accel_dev *accel_dev;
struct list_head list;
unsigned long state;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 274ff7e9de6e..8e711d1c3084 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
#define LOCAL_TO_XFER_REG_OFFSET 0x800
#define ICP_DH895XCC_EP_OFFSET 0x3a000
-#define ICP_DH895XCC_PMISC_BAR 1
int qat_hal_init(struct adf_accel_dev *accel_dev)
{
unsigned char ae;
@@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_loader_handle *handle;
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *bar =
+ struct adf_bar *misc_bar =
&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+ struct adf_bar *sram_bar =
+ &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_CAP_OFFSET;
- handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_AE_OFFSET;
- handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+ handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
+ ICP_DH895XCC_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
-
+ handle->hal_sram_addr_v = sram_bar->virt_addr;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
new file mode 100644
index 000000000000..97b0e02b600a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
@@ -0,0 +1,5 @@
+RsaKey ::= SEQUENCE {
+ n INTEGER ({ qat_rsa_get_n }),
+ e INTEGER ({ qat_rsa_get_e }),
+ d INTEGER ({ qat_rsa_get_d })
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 1e27f9f7fddf..c48f181e8941 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
- unsigned int i;
- struct icp_qat_uof_memvar_attr *mem_val_attr;
-
- mem_val_attr =
- (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
- sizeof(struct icp_qat_uof_initmem));
-
switch (init_mem->region) {
- case ICP_QAT_UOF_SRAM_REGION:
- if ((init_mem->addr + init_mem->num_in_bytes) >
- ICP_DH895XCC_PESRAM_BAR_SIZE) {
- pr_err("QAT: initmem on SRAM is out of range");
- return -EINVAL;
- }
- for (i = 0; i < init_mem->val_attr_num; i++) {
- qat_uclo_wr_sram_by_words(handle,
- init_mem->addr +
- mem_val_attr->offset_in_byte,
- &mem_val_attr->value, 4);
- mem_val_attr++;
- }
- break;
case ICP_QAT_UOF_LMEM_REGION:
if (qat_uclo_init_lmem_seg(handle, init_mem))
return -EINVAL;
@@ -990,6 +969,12 @@ out_err:
return -EFAULT;
}
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+}
+
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 25171c557043..8c79c543740f 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o \
adf_isr.o \
- adf_dh895xcc_hw_data.o \
- adf_hw_arbiter.o \
- qat_admin.o \
- adf_admin.o
+ adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
deleted file mode 100644
index e4666065c399..000000000000
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <adf_accel_devices.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
-
-#define ADF_ADMINMSG_LEN 32
-
-struct adf_admin_comms {
- dma_addr_t phy_addr;
- void *virt_addr;
- void __iomem *mailbox_addr;
- struct mutex lock; /* protects adf_admin_comms struct */
-};
-
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
- int offset = ae * ADF_ADMINMSG_LEN * 2;
- void __iomem *mailbox = admin->mailbox_addr;
- int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
- int times, received;
-
- mutex_lock(&admin->lock);
-
- if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
- mutex_unlock(&admin->lock);
- return -EAGAIN;
- }
-
- memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
- ADF_CSR_WR(mailbox, mb_offset, 1);
- received = 0;
- for (times = 0; times < 50; times++) {
- msleep(20);
- if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
- received = 1;
- break;
- }
- }
- if (received)
- memcpy(out, admin->virt_addr + offset +
- ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
- else
- dev_err(&GET_DEV(accel_dev),
- "Failed to send admin msg to accelerator\n");
-
- mutex_unlock(&admin->lock);
- return received ? 0 : -EFAULT;
-}
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin;
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
- void __iomem *csr = pmisc->virt_addr;
- void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
- uint64_t reg_val;
-
- admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- dev_to_node(&GET_DEV(accel_dev)));
- if (!admin)
- return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
- if (!admin->virt_addr) {
- dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
- kfree(admin);
- return -ENOMEM;
- }
- reg_val = (uint64_t)admin->phy_addr;
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
- mutex_init(&admin->lock);
- admin->mailbox_addr = mailbox;
- accel_dev->admin = admin;
- return 0;
-}
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
-
- if (!admin)
- return;
-
- if (admin->virt_addr)
- dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- admin->virt_addr, admin->phy_addr);
-
- mutex_destroy(&admin->lock);
- kfree(admin);
- accel_dev->admin = NULL;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index b1386922d7a2..ff54257eced4 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -45,8 +45,9 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
-#include "adf_common_drv.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
return ADF_DH895XCC_ETR_BAR;
}
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_SRAM_BAR;
+}
+
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
@@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
}
}
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+ return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+ return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- ADF_DH895XCC_SMIA0_MASK);
+ accel_dev->pf.vf_info ? 0 :
+ GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
ADF_DH895XCC_SMIA1_MASK);
}
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
hw_data->instance_id = dh895xcc_class.instances++;
hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
- hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
@@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
- hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 25269a9f24a2..88dffb297346 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,7 @@
#define ADF_DH895x_HW_DATA_H_
/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
@@ -79,10 +80,11 @@
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
-/* Admin Messages Registers */
-#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
-#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
-#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
-#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C)
+#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8)
+#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_mmp.bin"
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 1bde45b7a3c5..f8dd14f232c8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = adf_driver_name,
.probe = adf_probe,
- .remove = adf_remove
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
};
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
- adf_dev_shutdown(accel_dev);
-
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
@@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
}
if (accel_dev->hw_device) {
- switch (accel_dev->hw_device->pci_dev_id) {
+ switch (accel_pci_dev->pci_dev->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
break;
@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
break;
}
kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
- adf_devmgr_rm_dev(accel_dev);
- pci_release_regions(accel_pci_dev->pci_dev);
- pci_disable_device(accel_pci_dev->pci_dev);
- kfree(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
@@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 4;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
@@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 12;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
@@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret;
+ int ret, bar_mask;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev)) {
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
@@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
return -ENODEV;
}
- accel_pci_dev = &accel_dev->accel_pci_dev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
&hw_data->fuses);
@@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- accel_pci_dev->pci_dev = pdev;
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
@@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, hw_data->instance_id);
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir\n");
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
@@ -313,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
@@ -324,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_request_regions(pdev, adf_driver_name)) {
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
}
/* Read accelerator capabilities mask */
@@ -332,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
- for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
- struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
- bar_nr = i * 2;
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
- dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
}
pci_set_master(pdev);
@@ -352,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_err_free_reg;
}
ret = adf_dev_configure(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
- if (ret) {
- adf_dev_stop(accel_dev);
- goto out_err;
- }
+ if (ret)
+ goto out_err_dev_stop;
- return 0;
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
return ret;
}
@@ -391,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev)
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
- if (qat_admin_register())
- return -EFAULT;
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
@@ -411,7 +417,6 @@ static int __init adfdrv_init(void)
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
- qat_admin_unregister();
}
module_init(adfdrv_init);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
index a2fbb6ce75cd..85ff245bd1d8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
uint32_t const **arb_map_config);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out);
-int qat_admin_register(void);
-int qat_admin_unregister(void);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 0d03c109c2d3..5570f78795c1 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -59,21 +59,30 @@
#include <adf_transport_access_macros.h>
#include <adf_transport_internal.h>
#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
- int i;
-
- for (i = 0; i < msix_num_entries; i++)
- pci_dev_info->msix_entries.entries[i].entry = i;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled, add entries for each bank */
+ if (!accel_dev->pf.vf_info) {
+ int i;
+
+ msix_num_entries += hw_data->num_banks;
+ for (i = 0; i < msix_num_entries; i++)
+ pci_dev_info->msix_entries.entries[i].entry = i;
+ } else {
+ pci_dev_info->msix_entries.entries[0].entry =
+ hw_data->num_banks;
+ }
if (pci_enable_msix_exact(pci_dev_info->pci_dev,
pci_dev_info->msix_entries.entries,
msix_num_entries)) {
- dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
return -EFAULT;
}
return 0;
@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
- dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
- accel_dev->accel_id);
- return IRQ_HANDLED;
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info) {
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+ u32 vf_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+ 0x0000FFFF) << 16) |
+ ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+ 0x01FFFE00) >> 9);
+
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ bool irq_handled = false;
+ int i;
+
+ /* Disable VF2PF interrupts for VFs with pending ints */
+ adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+ /*
+ * Schedule tasklets to handle VF2PF interrupt BHs
+ * unless the VF is malicious and is attempting to
+ * flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, (const unsigned long *)&vf_mask,
+ (sizeof(vf_mask) * BITS_PER_BYTE)) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr + 1);
+ continue;
+ }
+
+ /* Tasklet will re-enable ints from this VF */
+ tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+ irq_handled = true;
+ }
+
+ if (irq_handled)
+ return IRQ_HANDLED;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+ accel_dev->accel_id);
+
+ return IRQ_NONE;
}
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int ret, i;
+ int ret, i = 0;
char *name;
- /* Request msix irq for all banks */
- for (i = 0; i < hw_data->num_banks; i++) {
- struct adf_etr_bank_data *bank = &etr_data->banks[i];
- unsigned int cpu, cpus = num_online_cpus();
-
- name = *(pci_dev_info->msix_entries.names + i);
- snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
- "qat%d-bundle%d", accel_dev->accel_id, i);
- ret = request_irq(msixe[i].vector,
- adf_msix_isr_bundle, 0, name, bank);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d for %s\n",
- msixe[i].vector, name);
- return ret;
+ /* Request msix irq for all banks unless SR-IOV enabled */
+ if (!accel_dev->pf.vf_info) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ struct adf_etr_bank_data *bank = &etr_data->banks[i];
+ unsigned int cpu, cpus = num_online_cpus();
+
+ name = *(pci_dev_info->msix_entries.names + i);
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-bundle%d", accel_dev->accel_id, i);
+ ret = request_irq(msixe[i].vector,
+ adf_msix_isr_bundle, 0, name, bank);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to enable irq %d for %s\n",
+ msixe[i].vector, name);
+ return ret;
+ }
+
+ cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+ i) % cpus;
+ irq_set_affinity_hint(msixe[i].vector,
+ get_cpu_mask(cpu));
}
-
- cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
- irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
}
/* Request msix irq for AE */
@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int i;
+ int i = 0;
- for (i = 0; i < hw_data->num_banks; i++) {
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, &etr_data->banks[i]);
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ irq_set_affinity_hint(msixe[i].vector, NULL);
+ free_irq(msixe[i].vector, &etr_data->banks[i]);
+ }
}
irq_set_affinity_hint(msixe[i].vector, NULL);
free_irq(msixe[i].vector, accel_dev);
@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
char **names;
struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+ if (!accel_dev->pf.vf_info)
+ msix_num_entries += hw_data->num_banks;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
if (!(*(names + i)))
goto err;
}
+ accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
accel_dev->accel_pci_dev.msix_entries.entries = entries;
accel_dev->accel_pci_dev.msix_entries.names = names;
return 0;
@@ -198,13 +267,11 @@ err:
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
char **names = accel_dev->accel_pci_dev.msix_entries.names;
int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < msix_num_entries; i++)
+ for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
kfree(*(names + i));
kfree(names);
}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 000000000000..85399fcbbad4
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o \
+ adf_isr.o \
+ adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 000000000000..a9a27eff41fb
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,172 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+ .name = ADF_DH895XCCVF_DEVICE_NAME,
+ .type = DEV_DH895XCCVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcciov_class;
+ hw_data->instance_id = dh895xcciov_class.instances++;
+ hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+ hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 000000000000..8f6babfef629
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,68 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
+#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0)
+
+#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204
+#define ADF_DH895XCC_VINTSOU_BUN BIT(0)
+#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1)
+
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 000000000000..789426f21882
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,393 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = adf_driver_name,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_cfg_dev_remove(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ unsigned long val, bank = 0;
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ goto err;
+ if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&bank, ADF_DEC))
+ goto err;
+
+ val = bank;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
+
+ val = 128;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ (int)bank);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 1;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
+ return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ char name[ADF_DEVICE_NAME_LENGTH];
+ unsigned int i, bar_nr;
+ int ret, bar_mask;
+
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+ if (!accel_dev->debugfs_dir) {
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ ret = -EFAULT;
+ goto out_err_disable;
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ }
+
+ if (pci_request_regions(pdev, adf_driver_name)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.iov_msg_completion);
+
+ ret = adf_dev_configure(accel_dev);
+ if (ret)
+ goto out_err_free_reg;
+
+ ret = adf_dev_init(accel_dev);
+ if (ret)
+ goto out_err_dev_shutdown;
+
+ ret = adf_dev_start(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ if (adf_dev_stop(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
index 55b7a8e48bad..e270e4a63d14 100644
--- a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
@@ -44,64 +44,14 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <icp_qat_fw_init_admin.h>
+#ifndef ADF_DH895xVF_DRV_H_
+#define ADF_DH895xVF_DRV_H_
#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include "adf_drv.h"
-
-static struct service_hndl qat_admin;
-
-static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct icp_qat_fw_init_admin_req req;
- struct icp_qat_fw_init_admin_resp resp;
- int i;
-
- memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
- req.init_admin_cmd_id = cmd;
- for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
- memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
- if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
- resp.init_resp_hdr.status)
- return -EFAULT;
- }
- return 0;
-}
-
-static int qat_admin_start(struct adf_accel_dev *accel_dev)
-{
- return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
-}
-
-static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
- enum adf_event event)
-{
- int ret;
-
- switch (event) {
- case ADF_EVENT_START:
- ret = qat_admin_start(accel_dev);
- break;
- case ADF_EVENT_STOP:
- case ADF_EVENT_INIT:
- case ADF_EVENT_SHUTDOWN:
- default:
- ret = 0;
- }
- return ret;
-}
-
-int qat_admin_register(void)
-{
- memset(&qat_admin, 0, sizeof(struct service_hndl));
- qat_admin.event_hld = qat_admin_event_handler;
- qat_admin.name = "qat_admin";
- qat_admin.admin = 1;
- return adf_service_register(&qat_admin);
-}
-
-int qat_admin_unregister(void)
-{
- return adf_service_unregister(&qat_admin);
-}
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
new file mode 100644
index 000000000000..87c5d8adb125
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
@@ -0,0 +1,258 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_drv.h"
+#include "adf_dh895xccvf_hw_data.h"
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+ if (stat) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to enable MSI interrupts\n");
+ return stat;
+ }
+
+ accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+ if (!accel_dev->vf.irq_name)
+ return -ENOMEM;
+
+ return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ kfree(accel_dev->vf.irq_name);
+ pci_disable_msi(pdev);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 msg;
+
+ /* Read the message from PF */
+ msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+
+ if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) PF2VF messages */
+ goto err;
+
+ switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting msg received from PF 0x%x\n", msg);
+ adf_dev_stop(accel_dev);
+ break;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Version resp received from PF 0x%x\n", msg);
+ accel_dev->vf.pf_version =
+ (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+ accel_dev->vf.compatible =
+ (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ complete(&accel_dev->vf.iov_msg_completion);
+ break;
+ default:
+ goto err;
+ }
+
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
+ ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return;
+err:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+ msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+ (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+ mutex_init(&accel_dev->vf.vf2pf_lock);
+ return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+ tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+ mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+ struct adf_accel_dev *accel_dev = privdata;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 v_int;
+
+ /* Read VF INT source CSR to determine the source of VF interrupt */
+ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+
+ /* Check for PF2VF interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+ /* Disable PF to VF interrupt */
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ /* Schedule tasklet to handle interrupt BH */
+ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+ return IRQ_HANDLED;
+ }
+
+ /* Check bundle interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+ /* Disable Flag and Coalesce Ring Interrupts */
+ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+ 0);
+ tasklet_hi_schedule(&bank->resp_handler);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ unsigned int cpu;
+ int ret;
+
+ snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+ (void *)accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+ accel_dev->vf.irq_name);
+ return ret;
+ }
+ cpu = accel_dev->accel_id % num_online_cpus();
+ irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+ return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+ (unsigned long)priv_data->banks);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_disable(&priv_data->banks[0].resp_handler);
+ tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, (void *)accel_dev);
+ adf_cleanup_bh(accel_dev);
+ adf_cleanup_pf2vf_bh(accel_dev);
+ adf_disable_msi(accel_dev);
+}
+
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ if (adf_enable_msi(accel_dev))
+ goto err_out;
+
+ if (adf_setup_pf2vf_bh(accel_dev))
+ goto err_out;
+
+ if (adf_setup_bh(accel_dev))
+ goto err_out;
+
+ if (adf_request_msi_irq(accel_dev))
+ goto err_out;
+
+ return 0;
+err_out:
+ adf_vf_isr_resource_free(accel_dev);
+ return -EFAULT;
+}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 397a500b3d8a..1c19e44c3146 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1516,7 +1516,7 @@ static int sahara_probe(struct platform_device *pdev)
}
/* Allocate HW descriptors */
- dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+ dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
&dev->hw_phys_desc[0], GFP_KERNEL);
if (!dev->hw_desc[0]) {
@@ -1528,34 +1528,31 @@ static int sahara_probe(struct platform_device *pdev)
sizeof(struct sahara_hw_desc);
/* Allocate space for iv and key */
- dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+ dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
&dev->key_phys_base, GFP_KERNEL);
if (!dev->key_base) {
dev_err(&pdev->dev, "Could not allocate memory for key\n");
- err = -ENOMEM;
- goto err_key;
+ return -ENOMEM;
}
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
/* Allocate space for context: largest digest + message length field */
- dev->context_base = dma_alloc_coherent(&pdev->dev,
+ dev->context_base = dmam_alloc_coherent(&pdev->dev,
SHA256_DIGEST_SIZE + 4,
&dev->context_phys_base, GFP_KERNEL);
if (!dev->context_base) {
dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
- err = -ENOMEM;
- goto err_key;
+ return -ENOMEM;
}
/* Allocate space for HW links */
- dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+ dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
&dev->hw_phys_link[0], GFP_KERNEL);
if (!dev->hw_link[0]) {
dev_err(&pdev->dev, "Could not allocate hw links\n");
- err = -ENOMEM;
- goto err_link;
+ return -ENOMEM;
}
for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
@@ -1572,15 +1569,14 @@ static int sahara_probe(struct platform_device *pdev)
dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
if (IS_ERR(dev->kthread)) {
- err = PTR_ERR(dev->kthread);
- goto err_link;
+ return PTR_ERR(dev->kthread);
}
init_completion(&dev->dma_completion);
err = clk_prepare_enable(dev->clk_ipg);
if (err)
- goto err_link;
+ return err;
err = clk_prepare_enable(dev->clk_ahb);
if (err)
goto clk_ipg_disable;
@@ -1620,25 +1616,11 @@ static int sahara_probe(struct platform_device *pdev)
return 0;
err_algs:
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
- dev->hw_link[0], dev->hw_phys_link[0]);
kthread_stop(dev->kthread);
dev_ptr = NULL;
clk_disable_unprepare(dev->clk_ahb);
clk_ipg_disable:
clk_disable_unprepare(dev->clk_ipg);
-err_link:
- dma_free_coherent(&pdev->dev,
- 2 * AES_KEYSIZE_128,
- dev->key_base, dev->key_phys_base);
- dma_free_coherent(&pdev->dev,
- SHA256_DIGEST_SIZE,
- dev->context_base, dev->context_phys_base);
-err_key:
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
- dev->hw_desc[0], dev->hw_phys_desc[0]);
return err;
}
@@ -1647,16 +1629,6 @@ static int sahara_remove(struct platform_device *pdev)
{
struct sahara_dev *dev = platform_get_drvdata(pdev);
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
- dev->hw_link[0], dev->hw_phys_link[0]);
- dma_free_coherent(&pdev->dev,
- 2 * AES_KEYSIZE_128,
- dev->key_base, dev->key_phys_base);
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
- dev->hw_desc[0], dev->hw_phys_desc[0]);
-
kthread_stop(dev->kthread);
sahara_unregister_algs(dev);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
new file mode 100644
index 000000000000..8f4c7a273141
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
+sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
new file mode 100644
index 000000000000..e070c316e8b7
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -0,0 +1,542 @@
+/*
+ * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits
+ * keysize in CBC and ECB mode.
+ * Add support also for DES and 3DES in CBC and ECB mode.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+
+static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+ u32 mode = ctx->mode;
+ /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ u32 rx_cnt = SS_RX_DEFAULT;
+ u32 tx_cnt = 0;
+ u32 spaces;
+ u32 v;
+ int i, err = 0;
+ unsigned int ileft = areq->nbytes;
+ unsigned int oleft = areq->nbytes;
+ unsigned int todo;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ if (!areq->info) {
+ dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+ return -EINVAL;
+ }
+
+ if (!areq->src || !areq->dst) {
+ dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ss->slock);
+
+ for (i = 0; i < op->keylen; i += 4)
+ writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->info + i * 4);
+ writel(v, ss->base + SS_IV0 + i * 4);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ sg_miter_next(&mo);
+ if (!mi.addr || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+
+ ileft = areq->nbytes / 4;
+ oleft = areq->nbytes / 4;
+ oi = 0;
+ oo = 0;
+ do {
+ todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ if (todo > 0) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+ oi += todo * 4;
+ }
+ if (oi == mi.length) {
+ sg_miter_next(&mi);
+ oi = 0;
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+ todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ if (todo > 0) {
+ oleft -= todo;
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oo += todo * 4;
+ }
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (mo.length > 0);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = readl(ss->base + SS_IV0 + i * 4);
+ *(u32 *)(areq->info + i * 4) = v;
+ }
+ }
+
+release_ss:
+ sg_miter_stop(&mi);
+ sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/* Generic function that support SG with size not multiple of 4 */
+static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ int no_chunk = 1;
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+ u32 mode = ctx->mode;
+ /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ u32 rx_cnt = SS_RX_DEFAULT;
+ u32 tx_cnt = 0;
+ u32 v;
+ u32 spaces;
+ int i, err = 0;
+ unsigned int ileft = areq->nbytes;
+ unsigned int oleft = areq->nbytes;
+ unsigned int todo;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+ unsigned int ob = 0; /* offset in buf */
+ unsigned int obo = 0; /* offset in bufo*/
+ unsigned int obl = 0; /* length of data in bufo */
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ if (!areq->info) {
+ dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+ return -EINVAL;
+ }
+
+ if (!areq->src || !areq->dst) {
+ dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * if we have only SGs with size multiple of 4,
+ * we can use the SS optimized function
+ */
+ while (in_sg && no_chunk == 1) {
+ if ((in_sg->length % 4) != 0)
+ no_chunk = 0;
+ in_sg = sg_next(in_sg);
+ }
+ while (out_sg && no_chunk == 1) {
+ if ((out_sg->length % 4) != 0)
+ no_chunk = 0;
+ out_sg = sg_next(out_sg);
+ }
+
+ if (no_chunk == 1)
+ return sun4i_ss_opti_poll(areq);
+
+ spin_lock_bh(&ss->slock);
+
+ for (i = 0; i < op->keylen; i += 4)
+ writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->info + i * 4);
+ writel(v, ss->base + SS_IV0 + i * 4);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ sg_miter_next(&mo);
+ if (!mi.addr || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+ ileft = areq->nbytes;
+ oleft = areq->nbytes;
+ oi = 0;
+ oo = 0;
+
+ while (oleft > 0) {
+ if (ileft > 0) {
+ /*
+ * todo is the number of consecutive 4byte word that we
+ * can read from current SG
+ */
+ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ if (todo > 0 && ob == 0) {
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+ todo);
+ ileft -= todo * 4;
+ oi += todo * 4;
+ } else {
+ /*
+ * not enough consecutive bytes, so we need to
+ * linearize in buf. todo is in bytes
+ * After that copy, if we have a multiple of 4
+ * we need to be able to write all buf in one
+ * pass, so it is why we min() with rx_cnt
+ */
+ todo = min3(rx_cnt * 4 - ob, ileft,
+ mi.length - oi);
+ memcpy(buf + ob, mi.addr + oi, todo);
+ ileft -= todo;
+ oi += todo;
+ ob += todo;
+ if (ob % 4 == 0) {
+ writesl(ss->base + SS_RXFIFO, buf,
+ ob / 4);
+ ob = 0;
+ }
+ }
+ if (oi == mi.length) {
+ sg_miter_next(&mi);
+ oi = 0;
+ }
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+ dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+ mode,
+ oi, mi.length, ileft, areq->nbytes, rx_cnt,
+ oo, mo.length, oleft, areq->nbytes, tx_cnt,
+ todo, ob);
+
+ if (tx_cnt == 0)
+ continue;
+ /* todo in 4bytes word */
+ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ if (todo > 0) {
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oleft -= todo * 4;
+ oo += todo * 4;
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } else {
+ /*
+ * read obl bytes in bufo, we read at maximum for
+ * emptying the device
+ */
+ readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+ obl = tx_cnt * 4;
+ obo = 0;
+ do {
+ /*
+ * how many bytes we can copy ?
+ * no more than remaining SG size
+ * no more than remaining buffer
+ * no need to test against oleft
+ */
+ todo = min(mo.length - oo, obl - obo);
+ memcpy(mo.addr + oo, bufo + obo, todo);
+ oleft -= todo;
+ obo += todo;
+ oo += todo;
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (obo < obl);
+ /* bufo must be fully used here */
+ }
+ }
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = readl(ss->base + SS_IV0 + i * 4);
+ *(u32 *)(areq->info + i * 4) = v;
+ }
+ }
+
+release_ss:
+ sg_miter_stop(&mi);
+ sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+
+ return err;
+}
+
+/* CBC AES */
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB AES */
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC DES */
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB DES */
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC 3DES */
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB 3DES */
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct sun4i_ss_alg_template *algt;
+
+ memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ op->ss = algt->ss;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
+
+ return 0;
+}
+
+/* check and set the AES key, prepare the mode to be used */
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ op->keymode = SS_AES_128BITS;
+ break;
+ case 192 / 8:
+ op->keymode = SS_AES_192BITS;
+ break;
+ case 256 / 8:
+ op->keymode = SS_AES_256BITS;
+ break;
+ default:
+ dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
+
+/* check and set the DES key, prepare the mode to be used */
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ u32 flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (unlikely(keylen != DES_KEY_SIZE)) {
+ dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ flags = crypto_ablkcipher_get_flags(tfm);
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ dev_dbg(ss->dev, "Weak key %u\n", keylen);
+ return -EINVAL;
+ }
+
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
+
+/* check and set the 3DES key, prepare the mode to be used */
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
new file mode 100644
index 000000000000..eab6fe227fa0
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -0,0 +1,425 @@
+/*
+ * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SS.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+
+#include "sun4i-ss.h"
+
+static struct sun4i_ss_alg_template ss_algs[] = {
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .mode = SS_OP_MD5,
+ .alg.hash = {
+ .init = sun4i_hash_init,
+ .update = sun4i_hash_update,
+ .final = sun4i_hash_final,
+ .finup = sun4i_hash_finup,
+ .digest = sun4i_hash_digest,
+ .export = sun4i_hash_export_md5,
+ .import = sun4i_hash_import_md5,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun4i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = sun4i_hash_crainit
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .mode = SS_OP_SHA1,
+ .alg.hash = {
+ .init = sun4i_hash_init,
+ .update = sun4i_hash_update,
+ .final = sun4i_hash_final,
+ .finup = sun4i_hash_finup,
+ .digest = sun4i_hash_digest,
+ .export = sun4i_hash_export_sha1,
+ .import = sun4i_hash_import_sha1,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun4i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = sun4i_hash_crainit
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun4i_ss_aes_setkey,
+ .encrypt = sun4i_ss_cbc_aes_encrypt,
+ .decrypt = sun4i_ss_cbc_aes_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun4i_ss_aes_setkey,
+ .encrypt = sun4i_ss_ecb_aes_encrypt,
+ .decrypt = sun4i_ss_ecb_aes_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = sun4i_ss_des_setkey,
+ .encrypt = sun4i_ss_cbc_des_encrypt,
+ .decrypt = sun4i_ss_cbc_des_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sun4i_ss_des_setkey,
+ .encrypt = sun4i_ss_ecb_des_encrypt,
+ .decrypt = sun4i_ss_ecb_des_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_cbc_des3_encrypt,
+ .decrypt = sun4i_ss_cbc_des3_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_ecb_des3_encrypt,
+ .decrypt = sun4i_ss_ecb_des3_decrypt,
+ }
+ }
+},
+};
+
+static int sun4i_ss_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 v;
+ int err, i;
+ unsigned long cr;
+ const unsigned long cr_ahb = 24 * 1000 * 1000;
+ const unsigned long cr_mod = 150 * 1000 * 1000;
+ struct sun4i_ss_ctx *ss;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ss->base)) {
+ dev_err(&pdev->dev, "Cannot request MMIO\n");
+ return PTR_ERR(ss->base);
+ }
+
+ ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(ss->ssclk)) {
+ err = PTR_ERR(ss->ssclk);
+ dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
+ return err;
+ }
+ dev_dbg(&pdev->dev, "clock ss acquired\n");
+
+ ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(ss->busclk)) {
+ err = PTR_ERR(ss->busclk);
+ dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
+ return err;
+ }
+ dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
+
+ ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_info(&pdev->dev, "no reset control found\n");
+ ss->reset = NULL;
+ }
+
+ /* Enable both clocks */
+ err = clk_prepare_enable(ss->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+ err = clk_prepare_enable(ss->ssclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+ goto error_ssclk;
+ }
+
+ /*
+ * Check that clock have the correct rates given in the datasheet
+ * Try to set the clock to the maximum allowed
+ */
+ err = clk_set_rate(ss->ssclk, cr_mod);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
+ goto error_clk;
+ }
+
+ /* Deassert reset if we have a reset control */
+ if (ss->reset) {
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot deassert reset control\n");
+ goto error_clk;
+ }
+ }
+
+ /*
+ * The only impact on clocks below requirement are bad performance,
+ * so do not print "errors"
+ * warn on Overclocked clocks
+ */
+ cr = clk_get_rate(ss->busclk);
+ if (cr >= cr_ahb)
+ dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+ cr, cr / 1000000, cr_ahb);
+ else
+ dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+ cr, cr / 1000000, cr_ahb);
+
+ cr = clk_get_rate(ss->ssclk);
+ if (cr <= cr_mod)
+ if (cr < cr_mod)
+ dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+ else
+ dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+ else
+ dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+
+ /*
+ * Datasheet named it "Die Bonding ID"
+ * I expect to be a sort of Security System Revision number.
+ * Since the A80 seems to have an other version of SS
+ * this info could be useful
+ */
+ writel(SS_ENABLED, ss->base + SS_CTL);
+ v = readl(ss->base + SS_CTL);
+ v >>= 16;
+ v &= 0x07;
+ dev_info(&pdev->dev, "Die ID %d\n", v);
+ writel(0, ss->base + SS_CTL);
+
+ ss->dev = &pdev->dev;
+
+ spin_lock_init(&ss->slock);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ ss_algs[i].ss = ss;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ err = crypto_register_alg(&ss_algs[i].alg.crypto);
+ if (err != 0) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.crypto.cra_name);
+ goto error_alg;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ if (err != 0) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ goto error_alg;
+ }
+ break;
+ }
+ }
+ platform_set_drvdata(pdev, ss);
+ return 0;
+error_alg:
+ i--;
+ for (; i >= 0; i--) {
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto_unregister_alg(&ss_algs[i].alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
+ }
+ }
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+error_clk:
+ clk_disable_unprepare(ss->ssclk);
+error_ssclk:
+ clk_disable_unprepare(ss->busclk);
+ return err;
+}
+
+static int sun4i_ss_remove(struct platform_device *pdev)
+{
+ int i;
+ struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto_unregister_alg(&ss_algs[i].alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
+ }
+ }
+
+ writel(0, ss->base + SS_CTL);
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+ clk_disable_unprepare(ss->busclk);
+ clk_disable_unprepare(ss->ssclk);
+ return 0;
+}
+
+static const struct of_device_id a20ss_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun4i-a10-crypto" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
+
+static struct platform_driver sun4i_ss_driver = {
+ .probe = sun4i_ss_probe,
+ .remove = sun4i_ss_remove,
+ .driver = {
+ .name = "sun4i-ss",
+ .of_match_table = a20ss_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun4i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
new file mode 100644
index 000000000000..ff8031498809
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -0,0 +1,492 @@
+/*
+ * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for MD5 and SHA1.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+#include <linux/scatterlist.h>
+
+/* This is a totally arbitrary value */
+#define SS_TIMEOUT 100
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun4i_req_ctx));
+ return 0;
+}
+
+/* sun4i_hash_init: initialize request context */
+int sun4i_hash_init(struct ahash_request *areq)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun4i_ss_alg_template *algt;
+ struct sun4i_ss_ctx *ss;
+
+ memset(op, 0, sizeof(struct sun4i_req_ctx));
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+ op->ss = algt->ss;
+ op->mode = algt->mode;
+
+ return 0;
+}
+
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct md5_state *octx = out;
+ int i;
+
+ octx->byte_count = op->byte_count + op->len;
+
+ memcpy(octx->block, op->buf, op->len);
+
+ if (op->byte_count > 0) {
+ for (i = 0; i < 4; i++)
+ octx->hash[i] = op->hash[i];
+ } else {
+ octx->hash[0] = SHA1_H0;
+ octx->hash[1] = SHA1_H1;
+ octx->hash[2] = SHA1_H2;
+ octx->hash[3] = SHA1_H3;
+ }
+
+ return 0;
+}
+
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ const struct md5_state *ictx = in;
+ int i;
+
+ sun4i_hash_init(areq);
+
+ op->byte_count = ictx->byte_count & ~0x3F;
+ op->len = ictx->byte_count & 0x3F;
+
+ memcpy(op->buf, ictx->block, op->len);
+
+ for (i = 0; i < 4; i++)
+ op->hash[i] = ictx->hash[i];
+
+ return 0;
+}
+
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sha1_state *octx = out;
+ int i;
+
+ octx->count = op->byte_count + op->len;
+
+ memcpy(octx->buffer, op->buf, op->len);
+
+ if (op->byte_count > 0) {
+ for (i = 0; i < 5; i++)
+ octx->state[i] = op->hash[i];
+ } else {
+ octx->state[0] = SHA1_H0;
+ octx->state[1] = SHA1_H1;
+ octx->state[2] = SHA1_H2;
+ octx->state[3] = SHA1_H3;
+ octx->state[4] = SHA1_H4;
+ }
+
+ return 0;
+}
+
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ const struct sha1_state *ictx = in;
+ int i;
+
+ sun4i_hash_init(areq);
+
+ op->byte_count = ictx->count & ~0x3F;
+ op->len = ictx->count & 0x3F;
+
+ memcpy(op->buf, ictx->buffer, op->len);
+
+ for (i = 0; i < 5; i++)
+ op->hash[i] = ictx->state[i];
+
+ return 0;
+}
+
+/*
+ * sun4i_hash_update: update hash engine
+ *
+ * Could be used for both SHA1 and MD5
+ * Write data by step of 32bits and put then in the SS.
+ *
+ * Since we cannot leave partial data and hash state in the engine,
+ * we need to get the hash state at the end of this function.
+ * We can get the hash state every 64 bytes
+ *
+ * So the first work is to get the number of bytes to write to SS modulo 64
+ * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
+ *
+ * So at the begin of update()
+ * if op->len + areq->nbytes < 64
+ * => all data will be written to wait buffer (op->buf) and end=0
+ * if not, write all data from op->buf to the device and position end to
+ * complete to 64bytes
+ *
+ * example 1:
+ * update1 60o => op->len=60
+ * update2 60o => need one more word to have 64 bytes
+ * end=4
+ * so write all data from op->buf and one word of SGs
+ * write remaining data in op->buf
+ * final state op->len=56
+ */
+int sun4i_hash_update(struct ahash_request *areq)
+{
+ u32 v, ivmode = 0;
+ unsigned int i = 0;
+ /*
+ * i is the total bytes read from SGs, to be compared to areq->nbytes
+ * i is important because we cannot rely on SG length since the sum of
+ * SG->length could be greater than areq->nbytes
+ */
+
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sun4i_ss_ctx *ss = op->ss;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ unsigned int in_i = 0; /* advancement in the current SG */
+ unsigned int end;
+ /*
+ * end is the position when we need to stop writing to the device,
+ * to be compared to i
+ */
+ int in_r, err = 0;
+ unsigned int todo;
+ u32 spaces, rx_cnt = SS_RX_DEFAULT;
+ size_t copied = 0;
+ struct sg_mapping_iter mi;
+
+ dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+ __func__, crypto_tfm_alg_name(areq->base.tfm),
+ op->byte_count, areq->nbytes, op->mode,
+ op->len, op->hash[0]);
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ /* protect against overflow */
+ if (areq->nbytes > UINT_MAX - op->len) {
+ dev_err(ss->dev, "Cannot process too large request\n");
+ return -EINVAL;
+ }
+
+ if (op->len + areq->nbytes < 64) {
+ /* linearize data to op->buf */
+ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ op->buf + op->len, areq->nbytes, 0);
+ op->len += copied;
+ return 0;
+ }
+
+ end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+ if (end > areq->nbytes || areq->nbytes - end > 63) {
+ dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+ end, areq->nbytes);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ss->slock);
+
+ /*
+ * if some data have been processed before,
+ * we need to restore the partial hash state
+ */
+ if (op->byte_count > 0) {
+ ivmode = SS_IV_ARBITRARY;
+ for (i = 0; i < 5; i++)
+ writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+ }
+ /* Enable the device */
+ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+ i = 0;
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ in_i = 0;
+
+ do {
+ /*
+ * we need to linearize in two case:
+ * - the buffer is already used
+ * - the SG does not have enough byte remaining ( < 4)
+ */
+ if (op->len > 0 || (mi.length - in_i) < 4) {
+ /*
+ * if we have entered here we have two reason to stop
+ * - the buffer is full
+ * - reach the end
+ */
+ while (op->len < 64 && i < end) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, end - i,
+ 64 - op->len);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+ in_i += in_r;
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ if (op->len > 3 && (op->len % 4) == 0) {
+ /* write buf to the device */
+ writesl(ss->base + SS_RXFIFO, op->buf,
+ op->len / 4);
+ op->byte_count += op->len;
+ op->len = 0;
+ }
+ }
+ if (mi.length - in_i > 3 && i < end) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, areq->nbytes - i,
+ ((mi.length - in_i) / 4) * 4);
+ /* how many bytes we can write in the device*/
+ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+ writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+ op->byte_count += todo * 4;
+ i += todo * 4;
+ in_i += todo * 4;
+ rx_cnt -= todo;
+ if (rx_cnt == 0) {
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ }
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ } while (i < end);
+ /* final linear */
+ if ((areq->nbytes - i) < 64) {
+ while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, areq->nbytes - i,
+ 64 - op->len);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+ in_i += in_r;
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ }
+
+ sg_miter_stop(&mi);
+
+ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+ i = 0;
+ do {
+ v = readl(ss->base + SS_CTL);
+ i++;
+ } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+ if (i >= SS_TIMEOUT) {
+ dev_err_ratelimited(ss->dev,
+ "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+ i, SS_TIMEOUT, v, areq->nbytes);
+ err = -EIO;
+ goto release_ss;
+ }
+
+ /* get the partial hash only if something was written */
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+ op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
+
+release_ss:
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/*
+ * sun4i_hash_final: finalize hashing operation
+ *
+ * If we have some remaining bytes, we write them.
+ * Then ask the SS for finalizing the hashing operation
+ *
+ * I do not check RX FIFO size in this function since the size is 32
+ * after each enabling and this function neither write more than 32 words.
+ */
+int sun4i_hash_final(struct ahash_request *areq)
+{
+ u32 v, ivmode = 0;
+ unsigned int i;
+ unsigned int j = 0;
+ int zeros, err = 0;
+ unsigned int index, padlen;
+ __be64 bits;
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sun4i_ss_ctx *ss = op->ss;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ u32 bf[32];
+ u32 wb = 0;
+ unsigned int nwait, nbw = 0;
+
+ dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
+ __func__, op->byte_count, areq->nbytes, op->mode,
+ op->len, op->hash[0]);
+
+ spin_lock_bh(&ss->slock);
+
+ /*
+ * if we have already written something,
+ * restore the partial hash state
+ */
+ if (op->byte_count > 0) {
+ ivmode = SS_IV_ARBITRARY;
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+ writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+ }
+ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+ /* write the remaining words of the wait buffer */
+ if (op->len > 0) {
+ nwait = op->len / 4;
+ if (nwait > 0) {
+ writesl(ss->base + SS_RXFIFO, op->buf, nwait);
+ op->byte_count += 4 * nwait;
+ }
+ nbw = op->len - 4 * nwait;
+ wb = *(u32 *)(op->buf + nwait * 4);
+ wb &= (0xFFFFFFFF >> (4 - nbw) * 8);
+ }
+
+ /* write the remaining bytes of the nbw buffer */
+ if (nbw > 0) {
+ wb |= ((1 << 7) << (nbw * 8));
+ bf[j++] = wb;
+ } else {
+ bf[j++] = 1 << 7;
+ }
+
+ /*
+ * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+ * I take the operations from other MD5/SHA1 implementations
+ */
+
+ /* we have already send 4 more byte of which nbw data */
+ if (op->mode == SS_OP_MD5) {
+ index = (op->byte_count + 4) & 0x3f;
+ op->byte_count += nbw;
+ if (index > 56)
+ zeros = (120 - index) / 4;
+ else
+ zeros = (56 - index) / 4;
+ } else {
+ op->byte_count += nbw;
+ index = op->byte_count & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ zeros = (padlen - 1) / 4;
+ }
+
+ memset(bf + j, 0, 4 * zeros);
+ j += zeros;
+
+ /* write the length of data */
+ if (op->mode == SS_OP_SHA1) {
+ bits = cpu_to_be64(op->byte_count << 3);
+ bf[j++] = bits & 0xffffffff;
+ bf[j++] = (bits >> 32) & 0xffffffff;
+ } else {
+ bf[j++] = (op->byte_count << 3) & 0xffffffff;
+ bf[j++] = (op->byte_count >> 29) & 0xffffffff;
+ }
+ writesl(ss->base + SS_RXFIFO, bf, j);
+
+ /* Tell the SS to stop the hashing */
+ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+
+ /*
+ * Wait for SS to finish the hash.
+ * The timeout could happen only in case of bad overcloking
+ * or driver bug.
+ */
+ i = 0;
+ do {
+ v = readl(ss->base + SS_CTL);
+ i++;
+ } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+ if (i >= SS_TIMEOUT) {
+ dev_err_ratelimited(ss->dev,
+ "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+ i, SS_TIMEOUT, v, areq->nbytes);
+ err = -EIO;
+ goto release_ss;
+ }
+
+ /* Get the hash from the device */
+ if (op->mode == SS_OP_SHA1) {
+ for (i = 0; i < 5; i++) {
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ memcpy(areq->result + i * 4, &v, 4);
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ v = readl(ss->base + SS_MD0 + i * 4);
+ memcpy(areq->result + i * 4, &v, 4);
+ }
+ }
+
+release_ss:
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/* sun4i_hash_finup: finalize hashing operation after an update */
+int sun4i_hash_finup(struct ahash_request *areq)
+{
+ int err;
+
+ err = sun4i_hash_update(areq);
+ if (err != 0)
+ return err;
+
+ return sun4i_hash_final(areq);
+}
+
+/* combo of init/update/final functions */
+int sun4i_hash_digest(struct ahash_request *areq)
+{
+ int err;
+
+ err = sun4i_hash_init(areq);
+ if (err != 0)
+ return err;
+
+ err = sun4i_hash_update(areq);
+ if (err != 0)
+ return err;
+
+ return sun4i_hash_final(areq);
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
new file mode 100644
index 000000000000..8e9c05f6e4d4
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -0,0 +1,201 @@
+/*
+ * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Support AES cipher with 128,192,256 bits keysize.
+ * Support MD5 and SHA1 hash algorithms.
+ * Support DES and 3DES
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/rng.h>
+
+#define SS_CTL 0x00
+#define SS_KEY0 0x04
+#define SS_KEY1 0x08
+#define SS_KEY2 0x0C
+#define SS_KEY3 0x10
+#define SS_KEY4 0x14
+#define SS_KEY5 0x18
+#define SS_KEY6 0x1C
+#define SS_KEY7 0x20
+
+#define SS_IV0 0x24
+#define SS_IV1 0x28
+#define SS_IV2 0x2C
+#define SS_IV3 0x30
+
+#define SS_FCSR 0x44
+
+#define SS_MD0 0x4C
+#define SS_MD1 0x50
+#define SS_MD2 0x54
+#define SS_MD3 0x58
+#define SS_MD4 0x5C
+
+#define SS_RXFIFO 0x200
+#define SS_TXFIFO 0x204
+
+/* SS_CTL configuration values */
+
+/* PRNG generator mode - bit 15 */
+#define SS_PRNG_ONESHOT (0 << 15)
+#define SS_PRNG_CONTINUE (1 << 15)
+
+/* IV mode for hash */
+#define SS_IV_ARBITRARY (1 << 14)
+
+/* SS operation mode - bits 12-13 */
+#define SS_ECB (0 << 12)
+#define SS_CBC (1 << 12)
+#define SS_CTS (3 << 12)
+
+/* Counter width for CNT mode - bits 10-11 */
+#define SS_CNT_16BITS (0 << 10)
+#define SS_CNT_32BITS (1 << 10)
+#define SS_CNT_64BITS (2 << 10)
+
+/* Key size for AES - bits 8-9 */
+#define SS_AES_128BITS (0 << 8)
+#define SS_AES_192BITS (1 << 8)
+#define SS_AES_256BITS (2 << 8)
+
+/* Operation direction - bit 7 */
+#define SS_ENCRYPTION (0 << 7)
+#define SS_DECRYPTION (1 << 7)
+
+/* SS Method - bits 4-6 */
+#define SS_OP_AES (0 << 4)
+#define SS_OP_DES (1 << 4)
+#define SS_OP_3DES (2 << 4)
+#define SS_OP_SHA1 (3 << 4)
+#define SS_OP_MD5 (4 << 4)
+#define SS_OP_PRNG (5 << 4)
+
+/* Data end bit - bit 2 */
+#define SS_DATA_END (1 << 2)
+
+/* PRNG start bit - bit 1 */
+#define SS_PRNG_START (1 << 1)
+
+/* SS Enable bit - bit 0 */
+#define SS_DISABLED (0 << 0)
+#define SS_ENABLED (1 << 0)
+
+/* SS_FCSR configuration values */
+/* RX FIFO status - bit 30 */
+#define SS_RXFIFO_FREE (1 << 30)
+
+/* RX FIFO empty spaces - bits 24-29 */
+#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f)
+
+/* TX FIFO status - bit 22 */
+#define SS_TXFIFO_AVAILABLE (1 << 22)
+
+/* TX FIFO available spaces - bits 16-21 */
+#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f)
+
+#define SS_RX_MAX 32
+#define SS_RX_DEFAULT SS_RX_MAX
+#define SS_TX_MAX 33
+
+#define SS_RXFIFO_EMP_INT_PENDING (1 << 10)
+#define SS_TXFIFO_AVA_INT_PENDING (1 << 8)
+#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
+#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
+
+struct sun4i_ss_ctx {
+ void __iomem *base;
+ int irq;
+ struct clk *busclk;
+ struct clk *ssclk;
+ struct reset_control *reset;
+ struct device *dev;
+ struct resource *res;
+ spinlock_t slock; /* control the use of the device */
+};
+
+struct sun4i_ss_alg_template {
+ u32 type;
+ u32 mode;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+ struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_tfm_ctx {
+ u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
+ u32 keylen;
+ u32 keymode;
+ struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_cipher_req_ctx {
+ u32 mode;
+};
+
+struct sun4i_req_ctx {
+ u32 mode;
+ u64 byte_count; /* number of bytes "uploaded" to the device */
+ u32 hash[5]; /* for storing SS_IVx register */
+ char buf[64];
+ unsigned int len;
+ struct sun4i_ss_ctx *ss;
+};
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm);
+int sun4i_hash_init(struct ahash_request *areq);
+int sun4i_hash_update(struct ahash_request *areq);
+int sun4i_hash_final(struct ahash_request *areq);
+int sun4i_hash_finup(struct ahash_request *areq);
+int sun4i_hash_digest(struct ahash_request *areq);
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out);
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in);
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out);
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in);
+
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 83aca95a95bc..cd774534d987 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -766,6 +766,7 @@ static int talitos_rng_init(struct hwrng *rng)
static int talitos_register_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
+ int err;
priv->rng.name = dev_driver_string(dev),
priv->rng.init = talitos_rng_init,
@@ -773,14 +774,22 @@ static int talitos_register_rng(struct device *dev)
priv->rng.data_read = talitos_rng_data_read,
priv->rng.priv = (unsigned long)dev;
- return hwrng_register(&priv->rng);
+ err = hwrng_register(&priv->rng);
+ if (!err)
+ priv->rng_registered = true;
+
+ return err;
}
static void talitos_unregister_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
+ if (!priv->rng_registered)
+ return;
+
hwrng_unregister(&priv->rng);
+ priv->rng_registered = false;
}
/*
@@ -799,7 +808,6 @@ struct talitos_ctx {
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
- unsigned int authsize;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -819,16 +827,6 @@ struct talitos_ahash_req_ctx {
struct scatterlist *psrc;
};
-static int aead_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
-{
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-
- ctx->authsize = authsize;
-
- return 0;
-}
-
static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
@@ -857,12 +855,11 @@ badkey:
/*
* talitos_edesc - s/w-extended descriptor
- * @assoc_nents: number of segments in associated data scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @assoc_chained: whether assoc is chained or not
* @src_chained: whether src is chained or not
* @dst_chained: whether dst is chained or not
+ * @icv_ool: whether ICV is out-of-line
* @iv_dma: dma address of iv for checking continuity and link table
* @dma_len: length of dma mapped link_tbl space
* @dma_link_tbl: bus physical address of link_tbl/buf
@@ -875,12 +872,11 @@ badkey:
* of link_tbl data
*/
struct talitos_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- bool assoc_chained;
bool src_chained;
bool dst_chained;
+ bool icv_ool;
dma_addr_t iv_dma;
int dma_len;
dma_addr_t dma_link_tbl;
@@ -952,14 +948,6 @@ static void ipsec_esp_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- if (edesc->assoc_chained)
- talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
- else if (areq->assoclen)
- /* assoc_nents counts also for IV in non-contiguous cases */
- dma_unmap_sg(dev, areq->assoc,
- edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
- DMA_TO_DEVICE);
-
talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
if (edesc->dma_len)
@@ -976,7 +964,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
{
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
@@ -986,13 +974,12 @@ static void ipsec_esp_encrypt_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
- if (edesc->dst_nents) {
+ if (edesc->icv_ool) {
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
- icvdata, ctx->authsize);
+ memcpy((char *)sg_virt(sg) + sg->length - authsize,
+ icvdata, authsize);
}
kfree(edesc);
@@ -1006,10 +993,10 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
{
struct aead_request *req = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
- void *icvdata;
+ char *oicv, *icv;
edesc = container_of(desc, struct talitos_edesc, desc);
@@ -1017,16 +1004,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
if (!err) {
/* auth check */
- if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
- else
- icvdata = &edesc->link_tbl[0];
-
sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
- ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
+ icv = (char *)sg_virt(sg) + sg->length - authsize;
+
+ if (edesc->dma_len) {
+ oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
+ if (edesc->icv_ool)
+ icv = oicv + authsize;
+ } else
+ oicv = (char *)&edesc->link_tbl[0];
+
+ err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
}
kfree(edesc);
@@ -1059,53 +1048,69 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
* convert scatterlist to SEC h/w link table format
* stop at cryptlen bytes
*/
-static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- int cryptlen, struct talitos_ptr *link_tbl_ptr)
+static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ unsigned int offset, int cryptlen,
+ struct talitos_ptr *link_tbl_ptr)
{
int n_sg = sg_count;
+ int count = 0;
- while (sg && n_sg--) {
- to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
- link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
- link_tbl_ptr->j_extent = 0;
- link_tbl_ptr++;
- cryptlen -= sg_dma_len(sg);
- sg = sg_next(sg);
- }
+ while (cryptlen && sg && n_sg--) {
+ unsigned int len = sg_dma_len(sg);
+
+ if (offset >= len) {
+ offset -= len;
+ goto next;
+ }
+
+ len -= offset;
+
+ if (len > cryptlen)
+ len = cryptlen;
- /* adjust (decrease) last one (or two) entry's len to cryptlen */
- link_tbl_ptr--;
- while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
- /* Empty this entry, and move to previous one */
- cryptlen += be16_to_cpu(link_tbl_ptr->len);
- link_tbl_ptr->len = 0;
- sg_count--;
- link_tbl_ptr--;
+ to_talitos_ptr(link_tbl_ptr + count,
+ sg_dma_address(sg) + offset, 0);
+ link_tbl_ptr[count].len = cpu_to_be16(len);
+ link_tbl_ptr[count].j_extent = 0;
+ count++;
+ cryptlen -= len;
+ offset = 0;
+
+next:
+ sg = sg_next(sg);
}
- link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
- + cryptlen);
/* tag end of link table */
- link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+ if (count > 0)
+ link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
- return sg_count;
+ return count;
+}
+
+static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+ int cryptlen,
+ struct talitos_ptr *link_tbl_ptr)
+{
+ return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
+ link_tbl_ptr);
}
/*
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- u64 seq, void (*callback) (struct device *dev,
- struct talitos_desc *desc,
- void *context, int error))
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ unsigned int authsize = crypto_aead_authsize(aead);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
- unsigned int authsize = ctx->authsize;
unsigned int ivsize = crypto_aead_ivsize(aead);
+ int tbl_off = 0;
int sg_count, ret;
int sg_link_tbl_len;
@@ -1113,36 +1118,27 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
DMA_TO_DEVICE);
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_chained);
+
/* hmac data */
- desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
- if (edesc->assoc_nents) {
- int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+ areq->assoclen,
+ &edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
- /* assoc_nents - 1 entries for assoc, 1 for IV */
- sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
- areq->assoclen, tbl_ptr);
-
- /* add IV to link table */
- tbl_ptr += sg_count - 1;
- tbl_ptr->j_extent = 0;
- tbl_ptr++;
- to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
- tbl_ptr->len = cpu_to_be16(ivsize);
- tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
- if (areq->assoclen)
- to_talitos_ptr(&desc->ptr[1],
- sg_dma_address(areq->assoc), 0);
- else
- to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
+ to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
}
@@ -1150,8 +1146,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
desc->ptr[2].len = cpu_to_be16(ivsize);
desc->ptr[2].j_extent = 0;
- /* Sync needed for the aead_givencrypt case */
- dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@@ -1167,33 +1161,24 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize;
- sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- : DMA_TO_DEVICE,
- edesc->src_chained);
-
- if (sg_count == 1) {
+ sg_link_tbl_len = cryptlen;
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ sg_link_tbl_len += authsize;
+
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+ sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
+ desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ tbl_off *
+ sizeof(struct talitos_ptr), 0);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
- } else {
- sg_link_tbl_len = cryptlen;
-
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len = cryptlen + authsize;
-
- sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
- &edesc->link_tbl[0]);
- if (sg_count > 1) {
- desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- } else {
- /* Only one segment now, so no link tbl needed */
- to_talitos_ptr(&desc->ptr[4],
- sg_dma_address(areq->src), 0);
- }
- }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1204,16 +1189,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_nents ? : 1,
DMA_FROM_DEVICE, edesc->dst_chained);
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
- } else {
- int tbl_off = edesc->src_nents + 1;
+ edesc->icv_ool = false;
+
+ if (sg_count > 1 &&
+ (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ areq->assoclen, cryptlen,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
tbl_off * sizeof(struct talitos_ptr), 0);
- sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
- tbl_ptr);
/* Add an entry to the link table for ICV data */
tbl_ptr += sg_count - 1;
@@ -1224,13 +1210,16 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/* icv data follows link tables */
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
- (tbl_off + edesc->dst_nents + 1 +
- edesc->assoc_nents) *
- sizeof(struct talitos_ptr), 0);
+ (edesc->src_nents + edesc->dst_nents +
+ 2) * sizeof(struct talitos_ptr) +
+ authsize, 0);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
- }
+
+ edesc->icv_ool = true;
+ } else
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1268,7 +1257,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
- struct scatterlist *assoc,
struct scatterlist *src,
struct scatterlist *dst,
u8 *iv,
@@ -1281,8 +1269,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
bool encrypt)
{
struct talitos_edesc *edesc;
- int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
- bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int src_nents, dst_nents, alloc_len, dma_len;
+ bool src_chained = false, dst_chained = false;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1298,48 +1286,35 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- if (assoclen) {
- /*
- * Currently it is assumed that iv is provided whenever assoc
- * is.
- */
- BUG_ON(!iv);
-
- assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
- talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
- assoc_chained);
- assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
-
- if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
- assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
- }
-
if (!dst || dst == src) {
- src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+ src_nents = sg_count(src, assoclen + cryptlen + authsize,
+ &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
} else { /* dst && dst != src*/
- src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+ src_nents = sg_count(src, assoclen + cryptlen +
+ (encrypt ? 0 : authsize),
&src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+ dst_nents = sg_count(dst, assoclen + cryptlen +
+ (encrypt ? authsize : 0),
&dst_chained);
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
* allocate space for base edesc plus the link tables,
- * allowing for two separate entries for ICV and generated ICV (+ 2),
- * and the ICV data itself
+ * allowing for two separate entries for AD and generated ICV (+ 2),
+ * and space for two sets of ICVs (stashed and generated)
*/
alloc_len = sizeof(struct talitos_edesc);
- if (assoc_nents || src_nents || dst_nents) {
+ if (src_nents || dst_nents) {
if (is_sec1)
dma_len = (src_nents ? cryptlen : 0) +
(dst_nents ? cryptlen : 0);
else
- dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
- sizeof(struct talitos_ptr) + authsize;
+ dma_len = (src_nents + dst_nents + 2) *
+ sizeof(struct talitos_ptr) + authsize * 2;
alloc_len += dma_len;
} else {
dma_len = 0;
@@ -1348,13 +1323,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- if (assoc_chained)
- talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
- else if (assoclen)
- dma_unmap_sg(dev, assoc,
- assoc_nents ? assoc_nents - 1 : 1,
- DMA_TO_DEVICE);
-
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1362,10 +1330,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
}
- edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->assoc_chained = assoc_chained;
edesc->src_chained = src_chained;
edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
@@ -1382,12 +1348,13 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
int icv_stashing, bool encrypt)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
- ctx->authsize, ivsize, icv_stashing,
+ authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1405,14 +1372,14 @@ static int aead_encrypt(struct aead_request *req)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- unsigned int authsize = ctx->authsize;
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
struct scatterlist *sg;
@@ -1437,7 +1404,7 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@@ -1445,40 +1412,16 @@ static int aead_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
+ icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
sg = sg_last(req->src, edesc->src_nents ? : 1);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
- ctx->authsize);
+ memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
- return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct aead_request *areq = &req->areq;
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct talitos_edesc *edesc;
-
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, req->giv, 0, true);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* set encrypt */
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
-
- memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
- /* avoid consecutive packets going out with same IV */
- *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
-
- return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1710,7 +1653,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
- return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
areq->base.flags, encrypt);
}
@@ -1895,7 +1838,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
+ return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags, false);
}
@@ -2161,6 +2104,7 @@ struct talitos_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
__be32 desc_hdr_template;
};
@@ -2168,15 +2112,16 @@ struct talitos_alg_template {
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2187,15 +2132,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2207,15 +2154,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha224),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2226,15 +2174,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2246,15 +2196,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2265,15 +2216,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2285,15 +2238,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha384),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2304,15 +2258,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2324,15 +2280,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2343,15 +2300,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2363,15 +2322,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2382,15 +2342,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2658,15 +2619,9 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-
- talitos_cra_init(tfm);
-
- /* random first IV */
- get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
-
+ talitos_cra_init(crypto_aead_tfm(tfm));
return 0;
}
@@ -2713,9 +2668,9 @@ static int talitos_remove(struct platform_device *ofdev)
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
switch (t_alg->algt.type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- case CRYPTO_ALG_TYPE_AEAD:
- crypto_unregister_alg(&t_alg->algt.alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&t_alg->algt.alg.aead);
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&t_alg->algt.alg.hash);
break;
@@ -2727,7 +2682,7 @@ static int talitos_remove(struct platform_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- for (i = 0; i < priv->num_channels; i++)
+ for (i = 0; priv->chan && i < priv->num_channels; i++)
kfree(priv->chan[i].fifo);
kfree(priv->chan);
@@ -2774,15 +2729,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
- alg = &t_alg->algt.alg.crypto;
- alg->cra_init = talitos_cra_init_aead;
- alg->cra_type = &crypto_aead_type;
- alg->cra_aead.setkey = aead_setkey;
- alg->cra_aead.setauthsize = aead_setauthsize;
- alg->cra_aead.encrypt = aead_encrypt;
- alg->cra_aead.decrypt = aead_decrypt;
- alg->cra_aead.givencrypt = aead_givencrypt;
- alg->cra_aead.geniv = "<built-in>";
+ alg = &t_alg->algt.alg.aead.base;
+ t_alg->algt.alg.aead.init = talitos_cra_init_aead;
+ t_alg->algt.alg.aead.setkey = aead_setkey;
+ t_alg->algt.alg.aead.encrypt = aead_encrypt;
+ t_alg->algt.alg.aead.decrypt = aead_decrypt;
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
@@ -3041,7 +2992,7 @@ static int talitos_probe(struct platform_device *ofdev)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
- char *name = NULL;
+ struct crypto_alg *alg = NULL;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -3053,21 +3004,26 @@ static int talitos_probe(struct platform_device *ofdev)
switch (t_alg->algt.type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- case CRYPTO_ALG_TYPE_AEAD:
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
- name = t_alg->algt.alg.crypto.cra_driver_name;
+ alg = &t_alg->algt.alg.crypto;
break;
+
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = crypto_register_aead(
+ &t_alg->algt.alg.aead);
+ alg = &t_alg->algt.alg.aead.base;
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
&t_alg->algt.alg.hash);
- name =
- t_alg->algt.alg.hash.halg.base.cra_driver_name;
+ alg = &t_alg->algt.alg.hash.halg.base;
break;
}
if (err) {
dev_err(dev, "%s alg registration failed\n",
- name);
+ alg->cra_driver_name);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 314daf55e7f7..0090f3211d68 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,12 +52,7 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
-static const struct talitos_ptr zero_entry = {
- .len = 0,
- .j_extent = 0,
- .eptr = 0,
- .ptr = 0
-};
+static const struct talitos_ptr zero_entry;
/* descriptor */
struct talitos_desc {
@@ -154,6 +149,7 @@ struct talitos_private {
/* hwrng device */
struct hwrng rng;
+ bool rng_registered;
};
extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index e79e567e43aa..263af709e536 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key);
pagefault_enable();
preempt_enable();
@@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key);
pagefault_enable();
preempt_enable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 7299995c78ec..0b8fe2ec5315 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
@@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 7adae42a7b79..ee1306cd8f59 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable();
@@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable();
@@ -113,6 +115,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *src, unsigned int nbytes)
{
int ret;
+ u64 inc;
struct blkcipher_walk walk;
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
@@ -131,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
walk.dst.virt.addr,
(nbytes &
@@ -140,7 +144,12 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
walk.iv);
pagefault_enable();
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ /* We need to update IV mostly for last bytes/round */
+ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
+ if (inc > 0)
+ while (inc--)
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 6c5c20c6108e..228053921b3f 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1437,28 +1437,28 @@ Load_ctr32_enc_key:
?vperm v31,v31,$out0,$keyperm
lvx v25,$x10,$key_ # pre-load round[2]
- vadduwm $two,$one,$one
+ vadduqm $two,$one,$one
subi $inp,$inp,15 # undo "caller"
$SHL $len,$len,4
- vadduwm $out1,$ivec,$one # counter values ...
- vadduwm $out2,$ivec,$two
+ vadduqm $out1,$ivec,$one # counter values ...
+ vadduqm $out2,$ivec,$two
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
le?li $idx,8
- vadduwm $out3,$out1,$two
+ vadduqm $out3,$out1,$two
vxor $out1,$out1,$rndkey0
le?lvsl $inpperm,0,$idx
- vadduwm $out4,$out2,$two
+ vadduqm $out4,$out2,$two
vxor $out2,$out2,$rndkey0
le?vspltisb $tmp,0x0f
- vadduwm $out5,$out3,$two
+ vadduqm $out5,$out3,$two
vxor $out3,$out3,$rndkey0
le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- vadduwm $out6,$out4,$two
+ vadduqm $out6,$out4,$two
vxor $out4,$out4,$rndkey0
- vadduwm $out7,$out5,$two
+ vadduqm $out7,$out5,$two
vxor $out5,$out5,$rndkey0
- vadduwm $ivec,$out6,$two # next counter value
+ vadduqm $ivec,$out6,$two # next counter value
vxor $out6,$out6,$rndkey0
vxor $out7,$out7,$rndkey0
@@ -1594,27 +1594,27 @@ Loop_ctr32_enc8x_middle:
vcipherlast $in0,$out0,$in0
vcipherlast $in1,$out1,$in1
- vadduwm $out1,$ivec,$one # counter values ...
+ vadduqm $out1,$ivec,$one # counter values ...
vcipherlast $in2,$out2,$in2
- vadduwm $out2,$ivec,$two
+ vadduqm $out2,$ivec,$two
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
vcipherlast $in3,$out3,$in3
- vadduwm $out3,$out1,$two
+ vadduqm $out3,$out1,$two
vxor $out1,$out1,$rndkey0
vcipherlast $in4,$out4,$in4
- vadduwm $out4,$out2,$two
+ vadduqm $out4,$out2,$two
vxor $out2,$out2,$rndkey0
vcipherlast $in5,$out5,$in5
- vadduwm $out5,$out3,$two
+ vadduqm $out5,$out3,$two
vxor $out3,$out3,$rndkey0
vcipherlast $in6,$out6,$in6
- vadduwm $out6,$out4,$two
+ vadduqm $out6,$out4,$two
vxor $out4,$out4,$rndkey0
vcipherlast $in7,$out7,$in7
- vadduwm $out7,$out5,$two
+ vadduqm $out7,$out5,$two
vxor $out5,$out5,$rndkey0
le?vperm $in0,$in0,$in0,$inpperm
- vadduwm $ivec,$out6,$two # next counter value
+ vadduqm $ivec,$out6,$two # next counter value
vxor $out6,$out6,$rndkey0
le?vperm $in1,$in1,$in1,$inpperm
vxor $out7,$out7,$rndkey0
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index b5e29002b666..2183a2e77641 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_init_p8(ctx->htable, (const u64 *) key);
pagefault_enable();
@@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
@@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
pagefault_enable();
@@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index 0a6f899839dd..d8429cb71f02 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -61,6 +61,12 @@ $code=<<___;
mtspr 256,r0
li r10,0x30
lvx_u $H,0,r4 # load H
+ le?xor r7,r7,r7
+ le?addi r7,r7,0x8 # need a vperm start with 08
+ le?lvsr 5,0,r7
+ le?vspltisb 6,0x0f
+ le?vxor 5,5,6 # set a b-endian mask
+ le?vperm $H,$H,$H,5
vspltisb $xC2,-16 # 0xf0
vspltisb $t0,1 # one
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index a59188494af8..b9997335f193 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -169,6 +169,7 @@ my $vpmsumd = sub { vcrypto_op(@_, 1224); };
my $vpmsubh = sub { vcrypto_op(@_, 1096); };
my $vpmsumw = sub { vcrypto_op(@_, 1160); };
my $vaddudm = sub { vcrypto_op(@_, 192); };
+my $vadduqm = sub { vcrypto_op(@_, 256); };
my $mtsle = sub {
my ($f, $arg) = @_;