summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 19:49:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 19:49:58 -0800
commit642356cb5f4a8c82b5ca5ebac288c327d10df236 (patch)
tree85bdf911a1307d33838449cb8209b828dcfef1c7 /drivers/crypto
parentf838767555d40f29bc4771c5c8cc63193094b7cc (diff)
parent4ee812f6143d78d8ba1399671d78c8d78bf2817c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add library interfaces of certain crypto algorithms for WireGuard - Remove the obsolete ablkcipher and blkcipher interfaces - Move add_early_randomness() out of rng_mutex Algorithms: - Add blake2b shash algorithm - Add blake2s shash algorithm - Add curve25519 kpp algorithm - Implement 4 way interleave in arm64/gcm-ce - Implement ciphertext stealing in powerpc/spe-xts - Add Eric Biggers's scalar accelerated ChaCha code for ARM - Add accelerated 32r2 code from Zinc for MIPS - Add OpenSSL/CRYPTOGRAMS poly1305 implementation for ARM and MIPS Drivers: - Fix entropy reading failures in ks-sa - Add support for sam9x60 in atmel - Add crypto accelerator for amlogic GXL - Add sun8i-ce Crypto Engine - Add sun8i-ss cryptographic offloader - Add a host of algorithms to inside-secure - Add NPCM RNG driver - add HiSilicon HPRE accelerator - Add HiSilicon TRNG driver" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (285 commits) crypto: vmx - Avoid weird build failures crypto: lib/chacha20poly1305 - use chacha20_crypt() crypto: x86/chacha - only unregister algorithms if registered crypto: chacha_generic - remove unnecessary setkey() functions crypto: amlogic - enable working on big endian kernel crypto: sun8i-ce - enable working on big endian crypto: mips/chacha - select CRYPTO_SKCIPHER, not CRYPTO_BLKCIPHER hwrng: ks-sa - Enable COMPILE_TEST crypto: essiv - remove redundant null pointer check before kfree crypto: atmel-aes - Change data type for "lastc" buffer crypto: atmel-tdes - Set the IV after {en,de}crypt crypto: sun4i-ss - fix big endian issues crypto: sun4i-ss - hide the Invalid keylen message crypto: sun4i-ss - use crypto_ahash_digestsize crypto: sun4i-ss - remove dependency on not 64BIT crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c MAINTAINERS: Add maintainer for HiSilicon SEC V2 driver crypto: hisilicon - add DebugFS for HiSilicon SEC Documentation: add DebugFS doc for HiSilicon SEC crypto: hisilicon - add SRIOV for HiSilicon SEC ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig92
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/allwinner/Kconfig87
-rw-r--r--drivers/crypto/allwinner/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/Makefile (renamed from drivers/crypto/sunxi-ss/Makefile)0
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-cipher.c)34
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-core.c)139
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-hash.c)47
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-prng.c)9
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h (renamed from drivers/crypto/sunxi-ss/sun4i-ss.h)2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c438
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c676
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h254
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c436
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c642
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h218
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/amlogic/Kconfig24
-rw-r--r--drivers/crypto/amlogic/Makefile2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c382
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c332
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h161
-rw-r--r--drivers/crypto/atmel-aes.c590
-rw-r--r--drivers/crypto/atmel-authenc.h2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c469
-rw-r--r--drivers/crypto/bcm/cipher.c373
-rw-r--r--drivers/crypto/bcm/cipher.h10
-rw-r--r--drivers/crypto/bcm/spu2.c6
-rw-r--r--drivers/crypto/caam/Kconfig6
-rw-r--r--drivers/crypto/caam/caampkc.c72
-rw-r--r--drivers/crypto/caam/caampkc.h8
-rw-r--r--drivers/crypto/caam/ctrl.c222
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/qi.c8
-rw-r--r--drivers/crypto/caam/qi.h1
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c292
-rw-r--r--drivers/crypto/cavium/nitrox/Kconfig2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c39
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h15
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c134
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c94
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c169
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c100
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c14
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h13
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c14
-rw-r--r--drivers/crypto/ccp/ccp-dev.c15
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c59
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccree/cc_aead.c3
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c334
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h16
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c5
-rw-r--r--drivers/crypto/geode-aes.c433
-rw-r--r--drivers/crypto/geode-aes.h15
-rw-r--r--drivers/crypto/hifn_795x.c183
-rw-r--r--drivers/crypto/hisilicon/Kconfig45
-rw-r--r--drivers/crypto/hisilicon/Makefile6
-rw-r--r--drivers/crypto/hisilicon/hpre/Makefile2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h83
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c1137
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c1052
-rw-r--r--drivers/crypto/hisilicon/qm.c142
-rw-r--r--drivers/crypto/hisilicon/qm.h17
-rw-r--r--drivers/crypto/hisilicon/sec2/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h156
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c889
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h198
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c1095
-rw-r--r--drivers/crypto/hisilicon/sgl.c184
-rw-r--r--drivers/crypto/hisilicon/sgl.h24
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h1
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c46
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c294
-rw-r--r--drivers/crypto/inside-secure/safexcel.c329
-rw-r--r--drivers/crypto/inside-secure/safexcel.h131
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c1506
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c1475
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c228
-rw-r--r--drivers/crypto/marvell/cesa.h6
-rw-r--r--drivers/crypto/marvell/cipher.c14
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c250
-rw-r--r--drivers/crypto/mxs-dcp.c140
-rw-r--r--drivers/crypto/n2_core.c194
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c81
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c45
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c87
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c76
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c29
-rw-r--r--drivers/crypto/nx/nx.c64
-rw-r--r--drivers/crypto/nx/nx.h19
-rw-r--r--drivers/crypto/nx/nx_debugfs.c18
-rw-r--r--drivers/crypto/omap-aes.c209
-rw-r--r--drivers/crypto/omap-aes.h4
-rw-r--r--drivers/crypto/omap-des.c232
-rw-r--r--drivers/crypto/padlock-aes.c157
-rw-r--r--drivers/crypto/picoxcell_crypto.c386
-rw-r--r--drivers/crypto/qat/Kconfig2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c304
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h4
-rw-r--r--drivers/crypto/qce/Makefile2
-rw-r--r--drivers/crypto/qce/cipher.h8
-rw-r--r--drivers/crypto/qce/common.c12
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/core.c2
-rw-r--r--drivers/crypto/qce/dma.c4
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c (renamed from drivers/crypto/qce/ablkcipher.c)172
-rw-r--r--drivers/crypto/rockchip/Makefile2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c8
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h3
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c556
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c538
-rw-r--r--drivers/crypto/s5p-sss.c187
-rw-r--r--drivers/crypto/sahara.c156
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c338
-rw-r--r--drivers/crypto/talitos.c314
-rw-r--r--drivers/crypto/ux500/Kconfig2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c371
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c3
-rw-r--r--drivers/crypto/virtio/Kconfig2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c192
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/vmx/Makefile6
137 files changed, 16546 insertions, 5463 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1fb622f2a87d..43ed1b621718 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -11,6 +11,8 @@ menuconfig CRYPTO_HW
if CRYPTO_HW
+source "drivers/crypto/allwinner/Kconfig"
+
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
@@ -26,7 +28,7 @@ config CRYPTO_DEV_PADLOCK
config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
help
Use VIA PadLock for AES algorithm.
@@ -54,7 +56,7 @@ config CRYPTO_DEV_GEODE
tristate "Support for the Geode LX AES engine"
depends on X86_32 && PCI
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Say 'Y' here to use the AMD Geode LX processor on-board AES
engine for the CryptoAPI AES algorithm.
@@ -107,7 +109,7 @@ config CRYPTO_PAES_S390
depends on ZCRYPT
depends on PKEY
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -169,7 +171,7 @@ config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This is the s390 hardware accelerated implementation of the
@@ -182,7 +184,7 @@ config CRYPTO_AES_S390
tristate "AES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197).
@@ -236,7 +238,7 @@ config CRYPTO_DEV_MARVELL_CESA
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
help
@@ -248,7 +250,7 @@ config CRYPTO_DEV_MARVELL_CESA
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
@@ -265,7 +267,7 @@ config CRYPTO_DEV_NIAGARA2
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
depends on !ARCH_DMA_ADDR_T_64BIT
@@ -285,7 +287,7 @@ config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select HW_RANDOM
depends on FSL_SOC
@@ -323,7 +325,7 @@ config CRYPTO_DEV_IXP4XX
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
@@ -332,11 +334,12 @@ config CRYPTO_DEV_PPC4XX
depends on PPC && 4xx
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AES
select CRYPTO_LIB_AES
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -373,7 +376,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_CBC
select CRYPTO_ECB
@@ -387,7 +390,7 @@ config CRYPTO_DEV_OMAP_DES
tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
help
OMAP processors have DES/3DES module accelerator. Select this if you
@@ -403,7 +406,7 @@ config CRYPTO_DEV_PICOXCELL
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_CBC
select CRYPTO_ECB
@@ -418,7 +421,7 @@ config CRYPTO_DEV_PICOXCELL
config CRYPTO_DEV_SAHARA
tristate "Support for SAHARA crypto accelerator"
depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
help
@@ -445,7 +448,7 @@ config CRYPTO_DEV_S5P
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
@@ -489,11 +492,9 @@ if CRYPTO_DEV_UX500
endif # if CRYPTO_DEV_UX500
config CRYPTO_DEV_ATMEL_AUTHENC
- tristate "Support for Atmel IPSEC/SSL hw accelerator"
+ bool "Support for Atmel IPSEC/SSL hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
- select CRYPTO_AUTHENC
- select CRYPTO_DEV_ATMEL_AES
- select CRYPTO_DEV_ATMEL_SHA
+ depends on CRYPTO_DEV_ATMEL_AES
help
Some Atmel processors can combine the AES and SHA hw accelerators
to enhance support of IPSEC/SSL.
@@ -505,7 +506,9 @@ config CRYPTO_DEV_ATMEL_AES
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AUTHENC if CRYPTO_DEV_ATMEL_AUTHENC
+ select CRYPTO_DEV_ATMEL_SHA if CRYPTO_DEV_ATMEL_AUTHENC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
@@ -518,7 +521,7 @@ config CRYPTO_DEV_ATMEL_TDES
tristate "Support for Atmel DES/TDES hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Some Atmel processors have DES/TDES hw accelerator.
Select this if you want to use the Atmel module for
@@ -590,7 +593,7 @@ config CRYPTO_DEV_MXS_DCP
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
@@ -620,7 +623,7 @@ config CRYPTO_DEV_QCE
select CRYPTO_CBC
select CRYPTO_XTS
select CRYPTO_CTR
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver supports Qualcomm crypto engine accelerator
hardware. To compile this driver as a module, choose M here. The
@@ -657,31 +660,6 @@ config CRYPTO_DEV_IMGTEC_HASH
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
hashing algorithms.
-config CRYPTO_DEV_SUN4I_SS
- tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI && !64BIT
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_AES
- select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
- help
- Some Allwinner SoC have a crypto accelerator named
- Security System. Select this if you want to use it.
- The Security System handle AES/DES/3DES ciphers in CBC mode
- and SHA1 and MD5 hash algorithms.
-
- To compile this driver as a module, choose M here: the module
- will be called sun4i-ss.
-
-config CRYPTO_DEV_SUN4I_SS_PRNG
- bool "Support for Allwinner Security System PRNG"
- depends on CRYPTO_DEV_SUN4I_SS
- select CRYPTO_RNG
- help
- Select this option if you want to provide kernel-side support for
- the Pseudo-Random Number Generator found in the Security System.
-
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -691,7 +669,7 @@ config CRYPTO_DEV_ROCKCHIP
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver interfaces with the hardware crypto accelerator.
@@ -702,7 +680,7 @@ config CRYPTO_DEV_MEDIATEK
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_SHA1
select CRYPTO_SHA256
@@ -730,7 +708,7 @@ config CRYPTO_DEV_BCM_SPU
select CRYPTO_SHA512
help
This driver provides support for Broadcom crypto acceleration using the
- Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
+ Secure Processing Unit (SPU). The SPU driver registers skcipher,
ahash, and aead algorithms with the kernel cryptographic API.
source "drivers/crypto/stm32/Kconfig"
@@ -740,7 +718,7 @@ config CRYPTO_DEV_SAFEXCEL
depends on OF || PCI || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_HASH
select CRYPTO_HMAC
@@ -748,6 +726,8 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_CHACHA20POLY1305
+ select CRYPTO_SHA3
help
This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic
engines designed by Inside Secure. It currently accelerates DES, 3DES and
@@ -762,7 +742,7 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_HASH
select CRYPTO_SHA1
@@ -779,7 +759,7 @@ config CRYPTO_DEV_CCREE
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
default n
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -805,4 +785,6 @@ config CRYPTO_DEV_CCREE
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/amlogic/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index afc4753b5d28..40229d499476 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
@@ -39,7 +40,6 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
-obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
@@ -48,3 +48,4 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += hisilicon/
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
new file mode 100644
index 000000000000..12e7c6a85a02
--- /dev/null
+++ b/drivers/crypto/allwinner/Kconfig
@@ -0,0 +1,87 @@
+config CRYPTO_DEV_ALLWINNER
+ bool "Support for Allwinner cryptographic offloader"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y if ARCH_SUNXI
+ help
+ Say Y here to get to see options for Allwinner hardware crypto devices
+
+config CRYPTO_DEV_SUN4I_SS
+ tristate "Support for Allwinner Security System cryptographic accelerator"
+ depends on ARCH_SUNXI
+ depends on PM
+ depends on CRYPTO_DEV_ALLWINNER
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ help
+ Some Allwinner SoC have a crypto accelerator named
+ Security System. Select this if you want to use it.
+ The Security System handle AES/DES/3DES ciphers in CBC mode
+ and SHA1 and MD5 hash algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i-ss.
+
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_SUN8I_CE
+ tristate "Support for Allwinner Crypto Engine cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the crypto Engine availlable on
+ Allwinner SoC H2+, H3, H5, H6, R40 and A64.
+ The Crypto Engine handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ce.
+
+config CRYPTO_DEV_SUN8I_CE_DEBUG
+ bool "Enable sun8i-ce stats"
+ depends on CRYPTO_DEV_SUN8I_CE
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ce debug stats.
+ This will create /sys/kernel/debug/sun8i-ce/stats for displaying
+ the number of requests per flow and per algorithm.
+
+config CRYPTO_DEV_SUN8I_SS
+ tristate "Support for Allwinner Security System cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the Security System available on
+ Allwinner SoC A80, A83T.
+ The Security System handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ss.
+
+config CRYPTO_DEV_SUN8I_SS_DEBUG
+ bool "Enable sun8i-ss stats"
+ depends on CRYPTO_DEV_SUN8I_SS
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ss debug stats.
+ This will create /sys/kernel/debug/sun8i-ss/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/allwinner/Makefile b/drivers/crypto/allwinner/Makefile
new file mode 100644
index 000000000000..6effe864d7ff
--- /dev/null
+++ b/drivers/crypto/allwinner/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss/
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/allwinner/sun4i-ss/Makefile
index c0a2797d3168..c0a2797d3168 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/allwinner/sun4i-ss/Makefile
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 6536fd4bee65..cb2b0874f68f 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -72,7 +72,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oi = 0;
oo = 0;
do {
- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
@@ -87,7 +88,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
oleft -= todo;
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
@@ -239,7 +241,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* todo is the number of consecutive 4byte word that we
* can read from current SG
*/
- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft / 4);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo && !ob) {
writesl(ss->base + SS_RXFIFO, mi.addr + oi,
todo);
@@ -253,8 +256,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we need to be able to write all buf in one
* pass, so it is why we min() with rx_cnt
*/
- todo = min3(rx_cnt * 4 - ob, ileft,
- mi.length - oi);
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
memcpy(buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
@@ -274,7 +277,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
+ dev_dbg(ss->dev,
+ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
@@ -282,7 +286,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (!tx_cnt)
continue;
/* todo in 4bytes word */
- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
@@ -308,7 +313,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* no more than remaining buffer
* no need to test against oleft
*/
- todo = min(mo.length - oo, obl - obo);
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
memcpy(mo.addr + oo, bufo + obo, todo);
oleft -= todo;
obo += todo;
@@ -480,6 +486,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
const char *name = crypto_tfm_alg_name(tfm);
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -497,13 +504,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ goto error_pm;
+
return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
}
void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put(op->ss->dev);
}
/* check and set the AES key, prepare the mode to be used */
@@ -524,7 +540,7 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keymode = SS_AES_256BITS;
break;
default:
- dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 9aa6fe081a27..814cd12149a9 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -44,7 +44,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -70,7 +71,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -223,6 +225,82 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun4i_ss_pm_suspend(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+
+ clk_disable_unprepare(ss->ssclk);
+ clk_disable_unprepare(ss->busclk);
+ return 0;
+}
+
+static int sun4i_ss_pm_resume(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ int err;
+
+ err = clk_prepare_enable(ss->busclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable busclk\n");
+ goto err_enable;
+ }
+
+ err = clk_prepare_enable(ss->ssclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable ssclk\n");
+ goto err_enable;
+ }
+
+ if (ss->reset) {
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
+ }
+ }
+
+ return err;
+err_enable:
+ sun4i_ss_pm_suspend(dev);
+ return err;
+}
+
+const struct dev_pm_ops sun4i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
+};
+
+/*
+ * When power management is enabled, this function enables the PM and set the
+ * device as suspended
+ * When power management is disabled, this function just enables the device
+ */
+static int sun4i_ss_pm_init(struct sun4i_ss_ctx *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun4i_ss_pm_exit(struct sun4i_ss_ctx *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
static int sun4i_ss_probe(struct platform_device *pdev)
{
u32 v;
@@ -269,18 +347,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
ss->reset = NULL;
}
- /* Enable both clocks */
- err = clk_prepare_enable(ss->busclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
- return err;
- }
- err = clk_prepare_enable(ss->ssclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
- goto error_ssclk;
- }
-
/*
* Check that clock have the correct rates given in the datasheet
* Try to set the clock to the maximum allowed
@@ -288,16 +354,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
err = clk_set_rate(ss->ssclk, cr_mod);
if (err) {
dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
- goto error_clk;
- }
-
- /* Deassert reset if we have a reset control */
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(&pdev->dev, "Cannot deassert reset control\n");
- goto error_clk;
- }
+ return err;
}
/*
@@ -325,12 +382,26 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
cr, cr / 1000000, cr_mod);
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ spin_lock_init(&ss->slock);
+
+ err = sun4i_ss_pm_init(ss);
+ if (err)
+ return err;
+
/*
* Datasheet named it "Die Bonding ID"
* I expect to be a sort of Security System Revision number.
* Since the A80 seems to have an other version of SS
* this info could be useful
*/
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_pm;
+
writel(SS_ENABLED, ss->base + SS_CTL);
v = readl(ss->base + SS_CTL);
v >>= 16;
@@ -338,9 +409,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Die ID %d\n", v);
writel(0, ss->base + SS_CTL);
- ss->dev = &pdev->dev;
-
- spin_lock_init(&ss->slock);
+ pm_runtime_put_sync(ss->dev);
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
ss_algs[i].ss = ss;
@@ -370,7 +439,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
break;
}
}
- platform_set_drvdata(pdev, ss);
return 0;
error_alg:
i--;
@@ -387,12 +455,8 @@ error_alg:
break;
}
}
- if (ss->reset)
- reset_control_assert(ss->reset);
-error_clk:
- clk_disable_unprepare(ss->ssclk);
-error_ssclk:
- clk_disable_unprepare(ss->busclk);
+error_pm:
+ sun4i_ss_pm_exit(ss);
return err;
}
@@ -415,11 +479,7 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
}
- writel(0, ss->base + SS_CTL);
- if (ss->reset)
- reset_control_assert(ss->reset);
- clk_disable_unprepare(ss->busclk);
- clk_disable_unprepare(ss->ssclk);
+ sun4i_ss_pm_exit(ss);
return 0;
}
@@ -434,6 +494,7 @@ static struct platform_driver sun4i_ss_driver = {
.remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
+ .pm = &sun4i_ss_pm_ops,
.of_match_table = a20ss_crypto_of_match_table,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fcffba5ef927..fdc0e6cdbb85 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -19,17 +19,29 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct sun4i_ss_alg_template *algt;
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ return err;
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sun4i_req_ctx));
return 0;
}
+void sun4i_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ pm_runtime_put(op->ss->dev);
+}
+
/* sun4i_hash_init: initialize request context */
int sun4i_hash_init(struct ahash_request *areq)
{
@@ -175,7 +187,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
unsigned int in_i = 0;
- u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
+ u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -184,6 +196,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
+ __le32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@@ -215,7 +228,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
if (op->byte_count) {
ivmode = SS_IV_ARBITRARY;
- for (i = 0; i < 5; i++)
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
writel(op->hash[i], ss->base + SS_IV0 + i * 4);
}
/* Enable the device */
@@ -272,8 +285,8 @@ static int sun4i_hash(struct ahash_request *areq)
*/
while (op->len < 64 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, end - i,
- 64 - op->len);
+ in_r = min(end - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -293,8 +306,8 @@ static int sun4i_hash(struct ahash_request *areq)
}
if (mi.length - in_i > 3 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- ((mi.length - in_i) / 4) * 4);
+ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
+ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
/* how many bytes we can write in the device*/
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
@@ -320,8 +333,8 @@ static int sun4i_hash(struct ahash_request *areq)
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- 64 - op->len);
+ in_r = min(areq->nbytes - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -395,7 +408,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
- wb = *(u32 *)(op->buf + nwait * 4);
+ wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@@ -404,7 +417,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
- bf[j++] = wb;
+ bf[j++] = le32_to_cpu(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@@ -423,13 +436,13 @@ hash_final:
/* write the length of data */
if (op->mode == SS_OP_SHA1) {
- __be64 bits = cpu_to_be64(op->byte_count << 3);
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __be64 *bits = (__be64 *)&bf[j];
+ *bits = cpu_to_be64(op->byte_count << 3);
+ j += 2;
} else {
- __le64 bits = op->byte_count << 3;
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __le64 *bits = (__le64 *)&bf[j];
+ *bits = cpu_to_le64(op->byte_count << 3);
+ j += 2;
}
writesl(ss->base + SS_RXFIFO, bf, j);
@@ -471,7 +484,7 @@ hash_final:
}
} else {
for (i = 0; i < 4; i++) {
- v = readl(ss->base + SS_MD0 + i * 4);
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 63d636424161..729aafdbea84 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -17,7 +17,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
{
struct sun4i_ss_alg_template *algt;
struct rng_alg *alg = crypto_rng_alg(tfm);
- int i;
+ int i, err;
u32 v;
u32 *data = (u32 *)dst;
const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
@@ -28,6 +28,10 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ return err;
+
spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -52,5 +56,8 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
writel(0, ss->base + SS_CTL);
spin_unlock_bh(&ss->slock);
+
+ pm_runtime_put(ss->dev);
+
return 0;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 35a27a7145f8..60425ac75d90 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <crypto/sha.h>
@@ -177,6 +178,7 @@ struct sun4i_req_ctx {
};
int sun4i_hash_crainit(struct crypto_tfm *tfm);
+void sun4i_hash_craexit(struct crypto_tfm *tfm);
int sun4i_hash_init(struct ahash_request *areq);
int sun4i_hash_update(struct ahash_request *areq);
int sun4i_hash_final(struct ahash_request *areq);
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
new file mode 100644
index 000000000000..08b68c3c1ca9
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
+sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
new file mode 100644
index 000000000000..37d0b6c386a0
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-cipher.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ce.h"
+
+static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct scatterlist *sg;
+
+ if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
+ return true;
+
+ if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
+ return true;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ dma_addr_t addr_iv = 0, addr_key = 0;
+ void *backup_iv = NULL;
+ u32 common, sym;
+ int flow, i;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+
+ dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+
+ flow = rctx->flow;
+
+ chan = &ce->chanlist[flow];
+
+ cet = chan->tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->alg_cipher[algt->ce_algo_id];
+ common |= rctx->op_dir | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+ /* CTS and recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->has_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(areq->cryptlen);
+ else
+ cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
+
+ sym = ce->variant->op_mode[algt->ce_blockmode];
+ len = op->keylen;
+ switch (len) {
+ case 128 / 8:
+ sym |= CE_AES_128BITS;
+ break;
+ case 192 / 8:
+ sym |= CE_AES_192BITS;
+ break;
+ case 256 / 8:
+ sym |= CE_AES_256BITS;
+ break;
+ }
+
+ cet->t_sym_ctl = cpu_to_le32(sym);
+ cet->t_asym_ctl = 0;
+
+ chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
+ chan->op_dir = rctx->op_dir;
+ chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
+ chan->keylen = op->keylen;
+
+ addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ cet->t_key = cpu_to_le32(addr_key);
+ if (dma_mapping_error(ce->dev, addr_key)) {
+ dev_err(ce->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ chan->ivlen = ivsize;
+ chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!chan->bounce_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & CE_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(chan->bounce_iv, areq->iv, ivsize);
+ addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ cet->t_iv = cpu_to_le32(addr_iv);
+ if (dma_mapping_error(ce->dev, addr_iv)) {
+ dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->dst, sg, nr_sgd, i) {
+ cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_dst[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ chan->timeout = areq->cryptlen;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (areq->iv && ivsize > 0) {
+ if (addr_iv)
+ dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(chan->bounce_iv);
+ }
+
+theend_key:
+ dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+ return err;
+}
+
+static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_DECRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_ENCRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ce_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ op->ce = algt->ce;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync_suspend(op->ce->dev);
+}
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = verify_skcipher_des3_key(tfm, key);
+ if (err)
+ return err;
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
new file mode 100644
index 000000000000..73a7649f915d
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-core.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ce.h"
+
+/*
+ * mod clock is lower on H3 than other SoC due to some DMA timeout occurring
+ * with high value.
+ * If you want to tune mod clock, loading driver and passing selftest is
+ * insufficient, you need to test with some LUKS test (mount and write to it)
+ */
+static const struct ce_variant ce_h3_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 50000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h5_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h6_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .has_t_dlen_in_bytes = true,
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ { "ram", 0, 400000000 },
+ }
+};
+
+static const struct ce_variant ce_a64_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_r40_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+/*
+ * sun8i_ce_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
+{
+ return atomic_inc_return(&ce->flow) % MAXFLOW;
+}
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
+{
+ u32 v;
+ int err = 0;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->chanlist[flow].stat_req++;
+#endif
+
+ mutex_lock(&ce->mlock);
+
+ v = readl(ce->base + CE_ICR);
+ v |= 1 << flow;
+ writel(v, ce->base + CE_ICR);
+
+ reinit_completion(&ce->chanlist[flow].complete);
+ writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ);
+
+ ce->chanlist[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ writel(v, ce->base + CE_TLR);
+ mutex_unlock(&ce->mlock);
+
+ wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
+ msecs_to_jiffies(ce->chanlist[flow].timeout));
+
+ if (ce->chanlist[flow].status == 0) {
+ dev_err(ce->dev, "DMA timeout for %s\n", name);
+ err = -EFAULT;
+ }
+ /* No need to lock for this read, the channel is locked so
+ * nothing could modify the error value for this channel
+ */
+ v = readl(ce->base + CE_ESR);
+ if (v) {
+ v >>= (flow * 4);
+ v &= 0xFF;
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ if (v & CE_ERR_ADDR_INVALID)
+ dev_err(ce->dev, "CE ERROR: address invalid\n");
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ce->base + CE_ISR);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ce->base + CE_ISR);
+ ce->chanlist[flow].status = 1;
+ complete(&ce->chanlist[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ce_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ce_dev *ce = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ce_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ce_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ce->chanlist[i].engine);
+ if (ce->chanlist[i].tl)
+ dma_free_coherent(ce->dev, sizeof(struct ce_task),
+ ce->chanlist[i].tl,
+ ce->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
+{
+ int i, err;
+
+ ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW,
+ sizeof(struct sun8i_ce_flow), GFP_KERNEL);
+ if (!ce->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ce->chanlist[i].complete);
+
+ ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true);
+ if (!ce->chanlist[i].engine) {
+ dev_err(ce->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ce->chanlist[i].engine);
+ if (err) {
+ dev_err(ce->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ ce->chanlist[i].tl = dma_alloc_coherent(ce->dev,
+ sizeof(struct ce_task),
+ &ce->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!ce->chanlist[i].tl) {
+ dev_err(ce->dev, "Cannot get DMA memory for task %d\n",
+ i);
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ce_free_chanlist(ce, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ce_pm_suspend(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ce->reset);
+ for (i = 0; i < CE_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ce->ceclks[i]);
+ return 0;
+}
+
+static int sun8i_ce_pm_resume(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ce->ceclks[i]);
+ if (err) {
+ dev_err(ce->dev, "Cannot prepare_enable %s\n",
+ ce->variant->ce_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ce->reset);
+ if (err) {
+ dev_err(ce->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ return 0;
+error:
+ sun8i_ce_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ce_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL)
+};
+
+static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ce->dev);
+ pm_runtime_set_autosuspend_delay(ce->dev, 2000);
+
+ err = pm_runtime_set_suspended(ce->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ce->dev);
+ return err;
+}
+
+static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
+{
+ pm_runtime_disable(ce->dev);
+}
+
+static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name);
+ if (IS_ERR(ce->ceclks[i])) {
+ err = PTR_ERR(ce->ceclks[i]);
+ dev_err(ce->dev, "Cannot get %s CE clock err=%d\n",
+ ce->variant->ce_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ce->ceclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ce->variant->ce_clks[i].freq > 0 &&
+ cr != ce->variant->ce_clks[i].freq) {
+ dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq,
+ ce->variant->ce_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq);
+ if (err)
+ dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq);
+ }
+ if (ce->variant->ce_clks[i].max_freq > 0 &&
+ cr > ce->variant->ce_clks[i].max_freq)
+ dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ce->variant->ce_clks[i].name, cr,
+ ce->variant->ce_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
+{
+ int ce_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ce = ce;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ce->variant->alg_cipher[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ id = ce_algs[i].ce_blockmode;
+ ce_method = ce->variant->op_mode[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ce->dev, "ERROR: Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ default:
+ ce_algs[i].ce = NULL;
+ dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ce_probe(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce;
+ int err, irq;
+ u32 v;
+
+ ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ ce->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ce);
+
+ ce->variant = of_device_get_match_data(&pdev->dev);
+ if (!ce->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ if (IS_ERR(ce->base))
+ return PTR_ERR(ce->base);
+
+ err = sun8i_ce_get_clks(ce);
+ if (err)
+ return err;
+
+ /* Get Non Secure IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
+ return irq;
+ }
+
+ ce->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ce->reset)) {
+ if (PTR_ERR(ce->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ce->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ce->reset);
+ }
+
+ mutex_init(&ce->mlock);
+
+ err = sun8i_ce_allocate_chanlist(ce);
+ if (err)
+ return err;
+
+ err = sun8i_ce_pm_init(ce);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
+ "sun8i-ce-ns", ce);
+ if (err) {
+ dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ce_register_algs(ce);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ce->base + CE_CTR);
+ v >>= CE_DIE_ID_SHIFT;
+ v &= CE_DIE_ID_MASK;
+ dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v);
+
+ pm_runtime_put_sync(ce->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ /* Ignore error of debugfs */
+ ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ ce->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ce->dbgfs_dir, ce,
+ &sun8i_ce_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ce_unregister_algs(ce);
+error_irq:
+ sun8i_ce_pm_exit(ce);
+error_pm:
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ce_remove(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
+
+ sun8i_ce_unregister_algs(ce);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ debugfs_remove_recursive(ce->dbgfs_dir);
+#endif
+
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+
+ sun8i_ce_pm_exit(ce);
+ return 0;
+}
+
+static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-h3-crypto",
+ .data = &ce_h3_variant },
+ { .compatible = "allwinner,sun8i-r40-crypto",
+ .data = &ce_r40_variant },
+ { .compatible = "allwinner,sun50i-a64-crypto",
+ .data = &ce_a64_variant },
+ { .compatible = "allwinner,sun50i-h5-crypto",
+ .data = &ce_h5_variant },
+ { .compatible = "allwinner,sun50i-h6-crypto",
+ .data = &ce_h6_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
+
+static struct platform_driver sun8i_ce_driver = {
+ .probe = sun8i_ce_probe,
+ .remove = sun8i_ce_remove,
+ .driver = {
+ .name = "sun8i-ce",
+ .pm = &sun8i_ce_pm_ops,
+ .of_match_table = sun8i_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ce_driver);
+
+MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
new file mode 100644
index 000000000000..43db49ceafe4
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ce.h - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+/* CE Registers */
+#define CE_TDQ 0x00
+#define CE_CTR 0x04
+#define CE_ICR 0x08
+#define CE_ISR 0x0C
+#define CE_TLR 0x10
+#define CE_TSR 0x14
+#define CE_ESR 0x18
+#define CE_CSSGR 0x1C
+#define CE_CDSGR 0x20
+#define CE_CSAR 0x24
+#define CE_CDAR 0x28
+#define CE_TPR 0x2C
+
+/* Used in struct ce_task */
+/* ce_task common */
+#define CE_ENCRYPTION 0
+#define CE_DECRYPTION BIT(8)
+
+#define CE_COMM_INT BIT(31)
+
+/* ce_task symmetric */
+#define CE_AES_128BITS 0
+#define CE_AES_192BITS 1
+#define CE_AES_256BITS 2
+
+#define CE_OP_ECB 0
+#define CE_OP_CBC (1 << 8)
+
+#define CE_ALG_AES 0
+#define CE_ALG_DES 1
+#define CE_ALG_3DES 2
+
+/* Used in ce_variant */
+#define CE_ID_NOTSUPP 0xFF
+
+#define CE_ID_CIPHER_AES 0
+#define CE_ID_CIPHER_DES 1
+#define CE_ID_CIPHER_DES3 2
+#define CE_ID_CIPHER_MAX 3
+
+#define CE_ID_OP_ECB 0
+#define CE_ID_OP_CBC 1
+#define CE_ID_OP_MAX 2
+
+/* Used in CE registers */
+#define CE_ERR_ALGO_NOTSUP BIT(0)
+#define CE_ERR_DATALEN BIT(1)
+#define CE_ERR_KEYSRAM BIT(2)
+#define CE_ERR_ADDR_INVALID BIT(5)
+#define CE_ERR_KEYLADDER BIT(6)
+
+#define CE_DIE_ID_SHIFT 16
+#define CE_DIE_ID_MASK 0x07
+
+#define MAX_SG 8
+
+#define CE_MAX_CLOCKS 3
+
+#define MAXFLOW 4
+
+/*
+ * struct ce_clock - Describe clocks used by sun8i-ce
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock (generally given by datasheet)
+ */
+struct ce_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ce_variant - Describe CE capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the
+ * coresponding CE_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @has_t_dlen_in_bytes: Does the request size for cipher is in
+ * bytes or words
+ * @ce_clks: list of clocks needed by this variant
+ */
+struct ce_variant {
+ char alg_cipher[CE_ID_CIPHER_MAX];
+ u32 op_mode[CE_ID_OP_MAX];
+ bool has_t_dlen_in_bytes;
+ struct ce_clock ce_clks[CE_MAX_CLOCKS];
+};
+
+struct sginfo {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+/*
+ * struct ce_task - CE Task descriptor
+ * The structure of this descriptor could be found in the datasheet
+ */
+struct ce_task {
+ __le32 t_id;
+ __le32 t_common_ctl;
+ __le32 t_sym_ctl;
+ __le32 t_asym_ctl;
+ __le32 t_key;
+ __le32 t_iv;
+ __le32 t_ctr;
+ __le32 t_dlen;
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ __le32 next;
+ __le32 reserved[3];
+} __packed __aligned(8);
+
+/*
+ * struct sun8i_ce_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @bounce_iv: buffer which contain the IV
+ * @ivlen: size of bounce_iv
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @method: current method for flow
+ * @op_dir: direction (encrypt vs decrypt) of this flow
+ * @op_mode: op_mode for this flow
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ce_flow {
+ struct crypto_engine *engine;
+ void *bounce_iv;
+ unsigned int ivlen;
+ unsigned int keylen;
+ struct completion complete;
+ int status;
+ u32 method;
+ u32 op_dir;
+ u32 op_mode;
+ dma_addr_t t_phy;
+ int timeout;
+ struct ce_task *tl;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ce_dev - main container for all this driver information
+ * @base: base address of CE
+ * @ceclks: clocks used by CE
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ce_dev {
+ void __iomem *base;
+ struct clk *ceclks[CE_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ce_flow *chanlist;
+ atomic_t flow;
+ const struct ce_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ce_dev *ce;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ce_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ce_algo_id: the CE_ID for this template
+ * @ce_blockmode: the type of block operation CE_ID
+ * @ce: pointer to the sun8i_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ce_alg_template {
+ u32 type;
+ u32 ce_algo_id;
+ u32 ce_blockmode;
+ struct sun8i_ce_dev *ce;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_skdecrypt(struct skcipher_request *areq);
+int sun8i_ce_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile
new file mode 100644
index 000000000000..add7b0543fd5
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o
+sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
new file mode 100644
index 000000000000..f222979a5623
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-cipher.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ss.h"
+
+static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+
+ /* SS need same numbers of SG (with same length) for source and destination */
+ in_sg = areq->src;
+ out_sg = areq->dst;
+ while (in_sg && out_sg) {
+ if (in_sg->length != out_sg->length)
+ return true;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ }
+ if (in_sg || out_sg)
+ return true;
+ return false;
+}
+
+static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ss_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ void *backup_iv = NULL;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+ int i;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+
+ dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+#endif
+
+ rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
+ rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
+ rctx->keylen = op->keylen;
+
+ rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_key)) {
+ dev_err(ss->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ rctx->ivlen = ivsize;
+ rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!rctx->biv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(rctx->biv, areq->iv, ivsize);
+ rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->src;
+ while (i < nr_sgs && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgs_next;
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgs_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->dst;
+ while (i < nr_sgd && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgd_next;
+ rctx->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_dst[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgd_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (rctx->p_iv)
+ dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->biv) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ memzero_explicit(backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->biv);
+ }
+ }
+
+theend_key:
+ dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+
+ return err;
+}
+
+static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ss_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ss_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_DECRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_ENCRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ op->ss = algt->ss;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ss->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0) {
+ dev_err(op->ss->dev, "pm error %d\n", err);
+ goto error_pm;
+ }
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync(op->ss->dev);
+}
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
new file mode 100644
index 000000000000..90997cc509b8
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-core.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SecuritySystem
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ss.h"
+
+static const struct ss_variant ss_a80_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+static const struct ss_variant ss_a83t_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+/*
+ * sun8i_ss_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss)
+{
+ return atomic_inc_return(&ss->flow) % MAXFLOW;
+}
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx,
+ const char *name)
+{
+ int flow = rctx->flow;
+ u32 v = 1;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[flow].stat_req++;
+#endif
+
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= rctx->op_mode;
+ v |= rctx->method;
+
+ if (rctx->op_dir)
+ v |= SS_DECRYPTION;
+
+ switch (rctx->keylen) {
+ case 128 / 8:
+ v |= SS_AES_128BITS << 7;
+ break;
+ case 192 / 8:
+ v |= SS_AES_192BITS << 7;
+ break;
+ case 256 / 8:
+ v |= SS_AES_256BITS << 7;
+ break;
+ }
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!rctx->t_dst[i].addr)
+ break;
+
+ mutex_lock(&ss->mlock);
+ writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
+
+ if (i == 0) {
+ if (rctx->p_iv)
+ writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
+ } else {
+ if (rctx->biv) {
+ if (rctx->op_dir == SS_ENCRYPTION)
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ else
+ writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ }
+ }
+
+ dev_dbg(ss->dev,
+ "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n",
+ i, flow, name, v,
+ rctx->t_src[i].len, rctx->t_dst[i].len,
+ rctx->method, rctx->op_mode,
+ rctx->op_dir, rctx->t_src[i].len);
+
+ writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ mutex_unlock(&ss->mlock);
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t ss_irq_handler(int irq, void *data)
+{
+ struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ss->base + SS_INT_STA_REG);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ss->base + SS_INT_STA_REG);
+ ss->flows[flow].status = 1;
+ complete(&ss->flows[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ss_alg_template ss_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ss_dev *ss = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ss_algs[i].alg.skcipher.base.cra_driver_name,
+ ss_algs[i].alg.skcipher.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ss_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ss_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ss->flows[i].engine);
+ i--;
+ }
+}
+
+/*
+ * Allocate the flow list structure
+ */
+static int allocate_flows(struct sun8i_ss_dev *ss)
+{
+ int i, err;
+
+ ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
+ GFP_KERNEL);
+ if (!ss->flows)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ss->flows[i].complete);
+
+ ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
+ if (!ss->flows[i].engine) {
+ dev_err(ss->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ss->flows[i].engine);
+ if (err) {
+ dev_err(ss->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ss_free_flows(ss, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ss_pm_suspend(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ss->reset);
+ for (i = 0; i < SS_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ss->ssclks[i]);
+ return 0;
+}
+
+static int sun8i_ss_pm_resume(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ss->ssclks[i]);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable %s\n",
+ ss->variant->ss_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ /* enable interrupts for all flows */
+ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
+
+ return 0;
+error:
+ sun8i_ss_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL)
+};
+
+static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
+static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
+{
+ int ss_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ ss_algs[i].ss = ss;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ss_algs[i].ss_algo_id;
+ ss_method = ss->variant->alg_cipher[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ id = ss_algs[i].ss_blockmode;
+ ss_method = ss->variant->op_mode[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "DEBUG: Register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ return err;
+ }
+ break;
+ default:
+ ss_algs[i].ss = NULL;
+ dev_err(ss->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name);
+ if (IS_ERR(ss->ssclks[i])) {
+ err = PTR_ERR(ss->ssclks[i]);
+ dev_err(ss->dev, "Cannot get %s SS clock err=%d\n",
+ ss->variant->ss_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ss->ssclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ss->variant->ss_clks[i].freq > 0 &&
+ cr != ss->variant->ss_clks[i].freq) {
+ dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq,
+ ss->variant->ss_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq);
+ if (err)
+ dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq);
+ }
+ if (ss->variant->ss_clks[i].max_freq > 0 &&
+ cr > ss->variant->ss_clks[i].max_freq)
+ dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ss->variant->ss_clks[i].name, cr,
+ ss->variant->ss_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ss_probe(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss;
+ int err, irq;
+ u32 v;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ss->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ss->base))
+ return PTR_ERR(ss->base);
+
+ err = sun8i_ss_get_clks(ss);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ return irq;
+ }
+
+ ss->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ss->reset);
+ }
+
+ mutex_init(&ss->mlock);
+
+ err = allocate_flows(ss);
+ if (err)
+ return err;
+
+ err = sun8i_ss_pm_init(ss);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss);
+ if (err) {
+ dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ss_register_algs(ss);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ss->base + SS_CTL_REG);
+ v >>= SS_DIE_ID_SHIFT;
+ v &= SS_DIE_ID_MASK;
+ dev_info(&pdev->dev, "Security System Die ID %x\n", v);
+
+ pm_runtime_put_sync(ss->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ /* Ignore error of debugfs */
+ ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ss->dbgfs_dir, ss,
+ &sun8i_ss_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ss_unregister_algs(ss);
+error_irq:
+ sun8i_ss_pm_exit(ss);
+error_pm:
+ sun8i_ss_free_flows(ss, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ss_remove(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss = platform_get_drvdata(pdev);
+
+ sun8i_ss_unregister_algs(ss);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ debugfs_remove_recursive(ss->dbgfs_dir);
+#endif
+
+ sun8i_ss_free_flows(ss, MAXFLOW);
+
+ sun8i_ss_pm_exit(ss);
+
+ return 0;
+}
+
+static const struct of_device_id sun8i_ss_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a83t-crypto",
+ .data = &ss_a83t_variant },
+ { .compatible = "allwinner,sun9i-a80-crypto",
+ .data = &ss_a80_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
+
+static struct platform_driver sun8i_ss_driver = {
+ .probe = sun8i_ss_probe,
+ .remove = sun8i_ss_remove,
+ .driver = {
+ .name = "sun8i-ss",
+ .pm = &sun8i_ss_pm_ops,
+ .of_match_table = sun8i_ss_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
new file mode 100644
index 000000000000..b5f855f3de10
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ss.h - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+#define SS_ENCRYPTION 0
+#define SS_DECRYPTION BIT(6)
+
+#define SS_ALG_AES 0
+#define SS_ALG_DES (1 << 2)
+#define SS_ALG_3DES (2 << 2)
+
+#define SS_CTL_REG 0x00
+#define SS_INT_CTL_REG 0x04
+#define SS_INT_STA_REG 0x08
+#define SS_KEY_ADR_REG 0x10
+#define SS_IV_ADR_REG 0x18
+#define SS_SRC_ADR_REG 0x20
+#define SS_DST_ADR_REG 0x28
+#define SS_LEN_ADR_REG 0x30
+
+#define SS_ID_NOTSUPP 0xFF
+
+#define SS_ID_CIPHER_AES 0
+#define SS_ID_CIPHER_DES 1
+#define SS_ID_CIPHER_DES3 2
+#define SS_ID_CIPHER_MAX 3
+
+#define SS_ID_OP_ECB 0
+#define SS_ID_OP_CBC 1
+#define SS_ID_OP_MAX 2
+
+#define SS_AES_128BITS 0
+#define SS_AES_192BITS 1
+#define SS_AES_256BITS 2
+
+#define SS_OP_ECB 0
+#define SS_OP_CBC (1 << 13)
+
+#define SS_FLOW0 BIT(30)
+#define SS_FLOW1 BIT(31)
+
+#define MAX_SG 8
+
+#define MAXFLOW 2
+
+#define SS_MAX_CLOCKS 2
+
+#define SS_DIE_ID_SHIFT 20
+#define SS_DIE_ID_MASK 0x07
+
+/*
+ * struct ss_clock - Describe clocks used by sun8i-ss
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock
+ */
+struct ss_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ss_variant - Describe SS capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the
+ * coresponding SS_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @ss_clks! list of clock needed by this variant
+ */
+struct ss_variant {
+ char alg_cipher[SS_ID_CIPHER_MAX];
+ u32 op_mode[SS_ID_OP_MAX];
+ struct ss_clock ss_clks[SS_MAX_CLOCKS];
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+};
+
+/*
+ * struct sun8i_ss_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ss_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ss_dev - main container for all this driver information
+ * @base: base address of SS
+ * @ssclks: clocks used by SS
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @flows: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ss_dev {
+ void __iomem *base;
+ struct clk *ssclks[SS_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ss_flow *flows;
+ atomic_t flow;
+ const struct ss_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @p_key: DMA address of the key
+ * @p_iv: DMA address of the IV
+ * @method: current algorithm for this request
+ * @op_mode: op_mode for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @ivlen: size of biv
+ * @keylen: keylen for this request
+ * @biv: buffer which contain the IV
+ */
+struct sun8i_cipher_req_ctx {
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ u32 p_key;
+ u32 p_iv;
+ u32 method;
+ u32 op_mode;
+ u32 op_dir;
+ int flow;
+ unsigned int ivlen;
+ unsigned int keylen;
+ void *biv;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ss: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ss_dev *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ss_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ss_algo_id: the SS_ID for this template
+ * @ss_blockmode: the type of block operation SS_ID
+ * @ss: pointer to the sun8i_ss_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ss_alg_template {
+ u32 type;
+ u32 ss_algo_id;
+ u32 ss_blockmode;
+ struct sun8i_ss_dev *ss;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_skdecrypt(struct skcipher_request *areq);
+int sun8i_ss_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss);
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index de5e9352e920..7d6b695c4ab3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
- if (!dev->scatter_buffer_va) {
- dma_free_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- dev->sdr, dev->sdr_pa);
+ if (!dev->scatter_buffer_va)
return -ENOMEM;
- }
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
new file mode 100644
index 000000000000..b90850d18965
--- /dev/null
+++ b/drivers/crypto/amlogic/Kconfig
@@ -0,0 +1,24 @@
+config CRYPTO_DEV_AMLOGIC_GXL
+ tristate "Support for amlogic cryptographic offloader"
+ default y if ARCH_MESON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ help
+ Select y here to have support for the cryptographic offloader
+ available on Amlogic GXL SoC.
+ This hardware handles AES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amlogic-gxl-crypto.
+
+config CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ bool "Enable amlogic stats"
+ depends on CRYPTO_DEV_AMLOGIC_GXL
+ depends on DEBUG_FS
+ help
+ Say y to enable amlogic-crypto debug stats.
+ This will create /sys/kernel/debug/gxl-crypto/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/amlogic/Makefile b/drivers/crypto/amlogic/Makefile
new file mode 100644
index 000000000000..39057e62c13e
--- /dev/null
+++ b/drivers/crypto/amlogic/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic-gxl-crypto.o
+amlogic-gxl-crypto-y := amlogic-gxl-core.o amlogic-gxl-cipher.o
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
new file mode 100644
index 000000000000..e589015aac1c
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ */
+
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <crypto/internal/skcipher.h>
+#include "amlogic-gxl.h"
+
+static int get_engine_number(struct meson_dev *mc)
+{
+ return atomic_inc_return(&mc->flow) % MAXFLOW;
+}
+
+static bool meson_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if (sg_nents(src_sg) != sg_nents(dst_sg))
+ return true;
+
+ /* KEY/IV descriptors use 3 desc */
+ if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
+ return true;
+
+ while (src_sg && dst_sg) {
+ if ((src_sg->length % 16) != 0)
+ return true;
+ if ((dst_sg->length % 16) != 0)
+ return true;
+ if (src_sg->length != dst_sg->length)
+ return true;
+ if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
+ return true;
+ if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
+ return true;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ return false;
+}
+
+static int meson_cipher_do_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(req, op->fallback_tfm);
+ skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir == MESON_DECRYPT)
+ err = crypto_skcipher_decrypt(req);
+ else
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return err;
+}
+
+static int meson_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct meson_dev *mc = op->mc;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+ int flow = rctx->flow;
+ unsigned int todo, eat, len;
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+ struct meson_desc *desc;
+ int nr_sgs, nr_sgd;
+ int i, err = 0;
+ unsigned int keyivlen, ivsize, offset, tloffset;
+ dma_addr_t phykeyiv;
+ void *backup_iv = NULL, *bkeyiv;
+ __le32 v;
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+
+ dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, crypto_skcipher_ivsize(tfm),
+ op->keylen, flow);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt->stat_req++;
+ mc->chanlist[flow].stat_req++;
+#endif
+
+ /*
+ * The hardware expect a list of meson_desc structures.
+ * The 2 first structures store key
+ * The third stores IV
+ */
+ bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
+ if (!bkeyiv)
+ return -ENOMEM;
+
+ memcpy(bkeyiv, op->key, op->keylen);
+ keyivlen = op->keylen;
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && ivsize > 0) {
+ if (ivsize > areq->cryptlen) {
+ dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
+ err = -EINVAL;
+ goto theend;
+ }
+ memcpy(bkeyiv + 32, areq->iv, ivsize);
+ keyivlen = 48;
+ if (rctx->op_dir == MESON_DECRYPT) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ }
+ if (keyivlen == 24)
+ keyivlen = 32;
+
+ phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(mc->dev, phykeyiv);
+ if (err) {
+ dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
+ goto theend;
+ }
+
+ tloffset = 0;
+ eat = 0;
+ i = 0;
+ while (keyivlen > eat) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+ todo = min(keyivlen - eat, 16u);
+ desc->t_src = cpu_to_le32(phykeyiv + i * 16);
+ desc->t_dst = cpu_to_le32(i * 16);
+ v = (MODE_KEY << 20) | DESC_OWN | 16;
+ desc->t_status = cpu_to_le32(v);
+
+ eat += todo;
+ i++;
+ tloffset++;
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs < 0) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend;
+ }
+ }
+
+ src_sg = areq->src;
+ dst_sg = areq->dst;
+ len = areq->cryptlen;
+ while (src_sg) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+
+ desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
+ desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
+ todo = min(len, sg_dma_len(src_sg));
+ v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
+ if (rctx->op_dir)
+ v |= DESC_ENCRYPTION;
+ len -= todo;
+
+ if (!sg_next(src_sg))
+ v |= DESC_LAST;
+ desc->t_status = cpu_to_le32(v);
+ tloffset++;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ reinit_completion(&mc->chanlist[flow].complete);
+ mc->chanlist[flow].status = 0;
+ writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
+ wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
+ msecs_to_jiffies(500));
+ if (mc->chanlist[flow].status == 0) {
+ dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
+ err = -EINVAL;
+ }
+
+ dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->op_dir == MESON_DECRYPT) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst,
+ areq->cryptlen - ivsize,
+ ivsize, 0);
+ }
+ }
+theend:
+ kzfree(bkeyiv);
+ kzfree(backup_iv);
+
+ return err;
+}
+
+static int meson_handle_cipher_request(struct crypto_engine *engine,
+ void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = meson_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int meson_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_DECRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_ENCRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_cipher_init(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct meson_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+
+ memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ op->mc = algt->mc;
+
+ sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ op->enginectx.op.do_one_request = meson_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+void meson_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_dev *mc = op->mc;
+
+ switch (keylen) {
+ case 128 / 8:
+ op->keymode = MODE_AES_128;
+ break;
+ case 192 / 8:
+ op->keymode = MODE_AES_192;
+ break;
+ case 256 / 8:
+ op->keymode = MODE_AES_256;
+ break;
+ default:
+ dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
new file mode 100644
index 000000000000..fa05fce1c0de
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * Core file which registers crypto algorithms supported by the hardware.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+
+#include "amlogic-gxl.h"
+
+static irqreturn_t meson_irq_handler(int irq, void *data)
+{
+ struct meson_dev *mc = (struct meson_dev *)data;
+ int flow;
+ u32 p;
+
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (mc->irqs[flow] == irq) {
+ p = readl(mc->base + ((0x04 + flow) << 2));
+ if (p) {
+ writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2));
+ mc->chanlist[flow].status = 1;
+ complete(&mc->chanlist[flow].complete);
+ return IRQ_HANDLED;
+ }
+ dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow);
+ }
+ }
+
+ dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq);
+ return IRQ_HANDLED;
+}
+
+static struct meson_alg_template mc_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+static int meson_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct meson_dev *mc = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ mc_algs[i].alg.skcipher.base.cra_driver_name,
+ mc_algs[i].alg.skcipher.base.cra_name,
+ mc_algs[i].stat_req, mc_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int meson_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meson_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations meson_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = meson_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void meson_free_chanlist(struct meson_dev *mc, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(mc->chanlist[i].engine);
+ if (mc->chanlist[i].tl)
+ dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC,
+ mc->chanlist[i].tl,
+ mc->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int meson_allocate_chanlist(struct meson_dev *mc)
+{
+ int i, err;
+
+ mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW,
+ sizeof(struct meson_flow), GFP_KERNEL);
+ if (!mc->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&mc->chanlist[i].complete);
+
+ mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true);
+ if (!mc->chanlist[i].engine) {
+ dev_err(mc->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(mc->chanlist[i].engine);
+ if (err) {
+ dev_err(mc->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ mc->chanlist[i].tl = dma_alloc_coherent(mc->dev,
+ sizeof(struct meson_desc) * MAXDESC,
+ &mc->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!mc->chanlist[i].tl) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ meson_free_chanlist(mc, i);
+ return err;
+}
+
+static int meson_register_algs(struct meson_dev *mc)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ mc_algs[i].mc = mc;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(mc->dev, "Fail to register %s\n",
+ mc_algs[i].alg.skcipher.base.cra_name);
+ mc_algs[i].mc = NULL;
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void meson_unregister_algs(struct meson_dev *mc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ if (!mc_algs[i].mc)
+ continue;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int meson_crypto_probe(struct platform_device *pdev)
+{
+ struct meson_dev *mc;
+ int err, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ mc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mc);
+
+ mc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mc->base)) {
+ err = PTR_ERR(mc->base);
+ dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
+ return err;
+ }
+ mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
+ if (IS_ERR(mc->busclk)) {
+ err = PTR_ERR(mc->busclk);
+ dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err);
+ return err;
+ }
+
+ mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
+ for (i = 0; i < MAXFLOW; i++) {
+ mc->irqs[i] = platform_get_irq(pdev, i);
+ if (mc->irqs[i] < 0) {
+ dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ return mc->irqs[i];
+ }
+
+ err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
+ "gxl-crypto", mc);
+ if (err < 0) {
+ dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(mc->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+
+ err = meson_allocate_chanlist(mc);
+ if (err)
+ goto error_flow;
+
+ err = meson_register_algs(mc);
+ if (err)
+ goto error_alg;
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+ debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ meson_unregister_algs(mc);
+error_flow:
+ meson_free_chanlist(mc, MAXFLOW);
+ clk_disable_unprepare(mc->busclk);
+ return err;
+}
+
+static int meson_crypto_remove(struct platform_device *pdev)
+{
+ struct meson_dev *mc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ debugfs_remove_recursive(mc->dbgfs_dir);
+#endif
+
+ meson_unregister_algs(mc);
+
+ meson_free_chanlist(mc, MAXFLOW);
+
+ clk_disable_unprepare(mc->busclk);
+ return 0;
+}
+
+static const struct of_device_id meson_crypto_of_match_table[] = {
+ { .compatible = "amlogic,gxl-crypto", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
+
+static struct platform_driver meson_crypto_driver = {
+ .probe = meson_crypto_probe,
+ .remove = meson_crypto_remove,
+ .driver = {
+ .name = "gxl-crypto",
+ .of_match_table = meson_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(meson_crypto_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
new file mode 100644
index 000000000000..b7f2de91ab76
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * amlogic.h - hardware cryptographic offloader for Amlogic SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
+#define MODE_KEY 1
+#define MODE_AES_128 0x8
+#define MODE_AES_192 0x9
+#define MODE_AES_256 0xa
+
+#define MESON_DECRYPT 0
+#define MESON_ENCRYPT 1
+
+#define MESON_OPMODE_ECB 0
+#define MESON_OPMODE_CBC 1
+
+#define MAXFLOW 2
+
+#define MAXDESC 64
+
+#define DESC_LAST BIT(18)
+#define DESC_ENCRYPTION BIT(28)
+#define DESC_OWN BIT(31)
+
+/*
+ * struct meson_desc - Descriptor for DMA operations
+ * Note that without datasheet, some are unknown
+ * @t_status: Descriptor of the cipher operation (see description below)
+ * @t_src: Physical address of data to read
+ * @t_dst: Physical address of data to write
+ * t_status is segmented like this:
+ * @len: 0-16 length of data to operate
+ * @irq: 17 Ignored by hardware
+ * @eoc: 18 End means the descriptor is the last
+ * @loop: 19 Unknown
+ * @mode: 20-23 Type of algorithm (AES, SHA)
+ * @begin: 24 Unknown
+ * @end: 25 Unknown
+ * @op_mode: 26-27 Blockmode (CBC, ECB)
+ * @enc: 28 0 means decryption, 1 is for encryption
+ * @block: 29 Unknown
+ * @error: 30 Unknown
+ * @owner: 31 owner of the descriptor, 1 own by HW
+ */
+struct meson_desc {
+ __le32 t_status;
+ __le32 t_src;
+ __le32 t_dst;
+};
+
+/*
+ * struct meson_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct meson_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+ unsigned int keylen;
+ dma_addr_t t_phy;
+ struct meson_desc *tl;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct meson_dev - main container for all this driver information
+ * @base: base address of amlogic-crypto
+ * @busclk: bus clock for amlogic-crypto
+ * @dev: the platform device
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @irqs: IRQ numbers for amlogic-crypto
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct meson_dev {
+ void __iomem *base;
+ struct clk *busclk;
+ struct device *dev;
+ struct meson_flow *chanlist;
+ atomic_t flow;
+ int *irqs;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct dentry *dbgfs_dir;
+#endif
+};
+
+/*
+ * struct meson_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct meson_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct meson_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @keymode: The keymode(type and size of key) associated with this TFM
+ * @mc: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct meson_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ u32 keymode;
+ struct meson_dev *mc;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct meson_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @blockmode: the type of block operation
+ * @mc: pointer to the meson_dev structure associated with this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct meson_alg_template {
+ u32 type;
+ u32 blockmode;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ struct meson_dev *mc;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int meson_enqueue(struct crypto_async_request *areq, u32 type);
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int meson_cipher_init(struct crypto_tfm *tfm);
+void meson_cipher_exit(struct crypto_tfm *tfm);
+int meson_skdecrypt(struct skcipher_request *areq);
+int meson_skencrypt(struct skcipher_request *areq);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 026f193556f9..91092504bc96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
@@ -117,7 +118,7 @@ struct atmel_aes_ctx {
struct atmel_aes_ctr_ctx {
struct atmel_aes_base_ctx base;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
@@ -129,13 +130,13 @@ struct atmel_aes_gcm_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
- u32 j0[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
- u32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
size_t textlen;
- const u32 *ghash_in;
- u32 *ghash_out;
+ const __be32 *ghash_in;
+ __be32 *ghash_out;
atmel_aes_fn_t ghash_resume;
};
@@ -145,7 +146,7 @@ struct atmel_aes_xts_ctx {
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_ctx {
struct atmel_aes_base_ctx base;
struct atmel_sha_authenc_ctx *auth;
@@ -154,10 +155,10 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
- u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
+ u8 lastc[AES_BLOCK_SIZE];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_reqctx {
struct atmel_aes_reqctx base;
@@ -388,13 +389,13 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
}
static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
- u32 *value)
+ void *value)
{
atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
- const u32 *value)
+ const void *value)
{
atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
@@ -486,13 +487,36 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
return (dd->flags & AES_FLAGS_ENCRYPT);
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
#endif
+static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
#endif
@@ -500,26 +524,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher =
- crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-
- if (rctx->mode & AES_FLAGS_ENCRYPT) {
- scatterwalk_map_and_copy(req->info, req->dst,
- req->nbytes - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst) {
- memcpy(req->info, rctx->lastc, ivsize);
- } else {
- scatterwalk_map_and_copy(req->info, req->src,
- req->nbytes - ivsize, ivsize, 0);
- }
- }
- }
+ if (!dd->ctx->is_aead)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -530,7 +536,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
}
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv, const u32 *key, int keylen)
+ const __be32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
@@ -561,7 +567,7 @@ static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
}
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+ const __be32 *iv)
{
atmel_aes_write_ctrl_key(dd, use_dma, iv,
@@ -976,9 +982,9 @@ static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
static int atmel_aes_start(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
dd->ctx->block_size != AES_BLOCK_SIZE);
int err;
@@ -988,12 +994,13 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- atmel_aes_write_ctrl(dd, use_dma, req->info);
+ atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
@@ -1006,7 +1013,7 @@ atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
u32 ctr, blocks;
size_t datalen;
@@ -1014,11 +1021,11 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Check for transfer completion. */
ctx->offset += dd->total;
- if (ctx->offset >= req->nbytes)
+ if (ctx->offset >= req->cryptlen)
return atmel_aes_transfer_complete(dd);
/* Compute data length. */
- datalen = req->nbytes - ctx->offset;
+ datalen = req->cryptlen - ctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
if (dd->caps.has_ctr32) {
@@ -1071,8 +1078,8 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
int err;
atmel_aes_set_mode(dd, rctx);
@@ -1081,16 +1088,16 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
ctx->offset = 0;
dd->total = 0;
return atmel_aes_ctr_transfer(dd);
}
-static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
@@ -1121,28 +1128,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
if (!dd)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
- scatterwalk_map_and_copy(rctx->lastc, req->src,
- (req->nbytes - ivsize), ivsize, 0);
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
}
return atmel_aes_handle_queue(dd, &req->base);
}
-static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1152,297 +1161,279 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB);
}
-static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC);
}
-static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB);
}
-static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64);
}
-static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32);
}
-static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16);
}
-static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8);
}
-static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR);
}
-static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_start;
return 0;
}
-static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "atmel-ecb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ecb_encrypt,
- .decrypt = atmel_aes_ecb_decrypt,
- }
+static struct skcipher_alg aes_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "atmel-ecb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ecb_encrypt,
+ .decrypt = atmel_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "atmel-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cbc_encrypt,
- .decrypt = atmel_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "atmel-cbc-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cbc_encrypt,
+ .decrypt = atmel_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "atmel-ofb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ofb_encrypt,
- .decrypt = atmel_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "atmel-ofb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ofb_encrypt,
+ .decrypt = atmel_aes_ofb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "atmel-cfb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb_encrypt,
- .decrypt = atmel_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "atmel-cfb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb_encrypt,
+ .decrypt = atmel_aes_cfb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb32(aes)",
- .cra_driver_name = "atmel-cfb32-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb32_encrypt,
- .decrypt = atmel_aes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(aes)",
+ .base.cra_driver_name = "atmel-cfb32-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb32_encrypt,
+ .decrypt = atmel_aes_cfb32_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb16(aes)",
- .cra_driver_name = "atmel-cfb16-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb16_encrypt,
- .decrypt = atmel_aes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(aes)",
+ .base.cra_driver_name = "atmel-cfb16-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb16_encrypt,
+ .decrypt = atmel_aes_cfb16_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb8(aes)",
- .cra_driver_name = "atmel-cfb8-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb8_encrypt,
- .decrypt = atmel_aes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(aes)",
+ .base.cra_driver_name = "atmel-cfb8-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb8_encrypt,
+ .decrypt = atmel_aes_cfb8_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "atmel-ctr-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_ctr_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ctr_encrypt,
- .decrypt = atmel_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "atmel-ctr-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_ctr_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ctr_encrypt,
+ .decrypt = atmel_aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
};
-static struct crypto_alg aes_cfb64_alg = {
- .cra_name = "cfb64(aes)",
- .cra_driver_name = "atmel-cfb64-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb64_encrypt,
- .decrypt = atmel_aes_cfb64_decrypt,
- }
+static struct skcipher_alg aes_cfb64_alg = {
+ .base.cra_name = "cfb64(aes)",
+ .base.cra_driver_name = "atmel-cfb64-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB64_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb64_encrypt,
+ .decrypt = atmel_aes_cfb64_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
};
@@ -1450,7 +1441,7 @@ static struct crypto_alg aes_cfb64_alg = {
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume);
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
@@ -1471,7 +1462,7 @@ atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
@@ -1558,7 +1549,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
memcpy(data, iv, ivsize);
memset(data + ivsize, 0, padlen + sizeof(u64));
- ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
+ ((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
NULL, ctx->j0, atmel_aes_gcm_process);
@@ -1591,7 +1582,7 @@ static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u32 j0_lsw, *j0 = ctx->j0;
+ __be32 j0_lsw, *j0 = ctx->j0;
size_t padlen;
/* Write incr32(J0) into IV. */
@@ -1674,7 +1665,7 @@ static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u64 *data = dd->buf;
+ __be64 *data = dd->buf;
if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
@@ -1857,8 +1848,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned long flags;
int err;
@@ -1868,7 +1859,7 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- /* Compute the tweak value from req->info with ecb(aes). */
+ /* Compute the tweak value from req->iv with ecb(aes). */
flags = dd->flags;
dd->flags &= ~AES_FLAGS_MODE_MASK;
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
@@ -1876,16 +1867,16 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
ctx->key2, ctx->base.keylen);
dd->flags = flags;
- atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
}
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
- static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
u8 *tweak_bytes = (u8 *)tweak;
int i;
@@ -1908,20 +1899,21 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
atmel_aes_write_block(dd, AES_TWR(0), tweak);
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
-static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
if (err)
return err;
@@ -1932,48 +1924,46 @@ static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS);
}
-static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_xts_start;
return 0;
}
-static struct crypto_alg aes_xts_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "atmel-xts-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_xts_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_xts_setkey,
- .encrypt = atmel_aes_xts_encrypt,
- .decrypt = atmel_aes_xts_decrypt,
- }
+static struct skcipher_alg aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "atmel-xts-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ .init = atmel_aes_xts_init_tfm,
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc aead functions */
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
@@ -2041,7 +2031,7 @@ static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
bool enc = atmel_aes_is_encrypt(dd);
struct scatterlist *src, *dst;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
u32 emr;
if (is_async)
@@ -2460,23 +2450,23 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc)
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
crypto_unregister_aead(&aes_authenc_algs[i]);
#endif
if (dd->caps.has_xts)
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
if (dd->caps.has_cfb64)
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -2484,13 +2474,13 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
- err = crypto_register_alg(&aes_cfb64_alg);
+ err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -2502,12 +2492,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
}
if (dd->caps.has_xts) {
- err = crypto_register_alg(&aes_xts_alg);
+ err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
err = crypto_register_aead(&aes_authenc_algs[i]);
@@ -2519,22 +2509,22 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
return 0;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
for (j = 0; j < i; j++)
crypto_unregister_aead(&aes_authenc_algs[j]);
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -2709,7 +2699,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
atmel_aes_get_cap(aes_dd);
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
goto iclk_unprepare;
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index cbd37a2edada..d6de810df44f 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -12,7 +12,7 @@
#ifndef __ATMEL_AUTHENC_H__
#define __ATMEL_AUTHENC_H__
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
#include <crypto/authenc.h>
#include <crypto/hash.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 84cb8748a795..8ea0e4bcde0d 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -360,7 +360,7 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits[2];
+ __be64 bits[2];
u64 size[2];
size[0] = ctx->digcnt[0];
@@ -2212,7 +2212,7 @@ static struct ahash_alg sha_hmac_algs[] = {
},
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc functions */
static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 1a6c86ae6148..0c1f79b30fc1 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -36,6 +36,7 @@
#include <crypto/internal/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
@@ -72,7 +73,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_dev *dd;
int keylen;
- u32 key[3*DES_KEY_SIZE / sizeof(u32)];
+ u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
unsigned long flags;
u16 block_size;
@@ -80,6 +81,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_reqctx {
unsigned long mode;
+ u8 lastc[DES_BLOCK_SIZE];
};
struct atmel_tdes_dma {
@@ -106,7 +108,7 @@ struct atmel_tdes_dev {
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
size_t total;
struct scatterlist *in_sg;
@@ -307,8 +309,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
dd->ctx->keylen >> 2);
if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
- atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+ (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
}
return 0;
@@ -502,8 +504,8 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -571,19 +573,45 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
return err;
}
+static void
+atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
+{
+ struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+
req->base.complete(&req->base, err);
}
static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct atmel_tdes_ctx *ctx;
@@ -593,7 +621,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
+ ret = crypto_enqueue_request(&dd->queue, &req->base);
if (dd->flags & TDES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
@@ -610,18 +638,18 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
+ dd->total = req->cryptlen;
dd->in_offset = 0;
dd->in_sg = req->src;
dd->out_offset = 0;
dd->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= TDES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
@@ -665,32 +693,32 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
if (mode & TDES_FLAGS_CFB8) {
- if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
- if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
- if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
} else {
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -699,6 +727,15 @@ static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
+ if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+
return atmel_tdes_handle_queue(ctx->dd, req);
}
@@ -770,13 +807,13 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
dma_release_channel(dd->dma_lch_out.chan);
}
-static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des_key(tfm, key);
+ err = verify_skcipher_des_key(tfm, key);
if (err)
return err;
@@ -786,13 +823,13 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -802,84 +839,84 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
}
-static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, 0);
}
-static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
}
-static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
}
-static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB32);
}
-static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
}
-static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
}
-static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
}
-static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct atmel_tdes_dev *dd;
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
dd = atmel_tdes_find_dev(ctx);
if (!dd)
@@ -888,204 +925,184 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg tdes_algs[] = {
+static struct skcipher_alg tdes_algs[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "atmel-ecb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "atmel-ecb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "atmel-cbc-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "atmel-cbc-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
},
{
- .cra_name = "cfb(des)",
- .cra_driver_name = "atmel-cfb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(des)",
+ .base.cra_driver_name = "atmel-cfb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb_encrypt,
+ .decrypt = atmel_tdes_cfb_decrypt,
},
{
- .cra_name = "cfb8(des)",
- .cra_driver_name = "atmel-cfb8-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(des)",
+ .base.cra_driver_name = "atmel-cfb8-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb8_encrypt,
+ .decrypt = atmel_tdes_cfb8_decrypt,
},
{
- .cra_name = "cfb16(des)",
- .cra_driver_name = "atmel-cfb16-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(des)",
+ .base.cra_driver_name = "atmel-cfb16-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x1,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb16_encrypt,
+ .decrypt = atmel_tdes_cfb16_decrypt,
},
{
- .cra_name = "cfb32(des)",
- .cra_driver_name = "atmel-cfb32-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(des)",
+ .base.cra_driver_name = "atmel-cfb32-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x3,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb32_encrypt,
+ .decrypt = atmel_tdes_cfb32_decrypt,
},
{
- .cra_name = "ofb(des)",
- .cra_driver_name = "atmel-ofb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "atmel-ofb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "atmel-ecb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "atmel-ecb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "atmel-cbc-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "atmel-cbc-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "atmel-ofb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "atmel-ofb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
};
@@ -1148,7 +1165,7 @@ static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
int i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
- crypto_unregister_alg(&tdes_algs[i]);
+ crypto_unregister_skcipher(&tdes_algs[i]);
}
static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
@@ -1156,7 +1173,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
- err = crypto_register_alg(&tdes_algs[i]);
+ err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
}
@@ -1165,7 +1182,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
err_tdes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&tdes_algs[j]);
+ crypto_unregister_skcipher(&tdes_algs[j]);
return err;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index f85356a48e7e..1564a6f8c9cb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -110,8 +110,8 @@ static u8 select_channel(void)
}
/**
- * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
- * receive a SPU response message for an ablkcipher request. Includes buffers to
+ * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an skcipher request. Includes buffers to
* catch SPU message headers and the response data.
* @mssg: mailbox message containing the receive sg
* @rctx: crypto request context
@@ -130,7 +130,7 @@ static u8 select_channel(void)
* < 0 if an error
*/
static int
-spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
+spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 rx_frag_num,
unsigned int chunksize, u32 stat_pad_len)
@@ -179,8 +179,8 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
}
/**
- * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
- * send a SPU request message for an ablkcipher request. Includes SPU message
+ * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
+ * send a SPU request message for an skcipher request. Includes SPU message
* headers and the request data.
* @mssg: mailbox message containing the transmit sg
* @rctx: crypto request context
@@ -198,7 +198,7 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
* < 0 if an error
*/
static int
-spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
+spu_skcipher_tx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
{
@@ -283,7 +283,7 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
}
/**
- * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
+ * handle_skcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
* data.
* @rctx: Crypto request context
@@ -300,12 +300,12 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
* asynchronously
* Any other value indicates an error
*/
-static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
+static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req =
- container_of(areq, struct ablkcipher_request, base);
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
int err = 0;
@@ -468,7 +468,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
rx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
+ err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
stat_pad_len);
if (err)
return err;
@@ -482,7 +482,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
tx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
+ err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
pad_len);
if (err)
return err;
@@ -495,16 +495,16 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
}
/**
- * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
+ * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
* total received count for the request and updates global stats.
* @rctx: Crypto request context
*/
-static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
+static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -1685,8 +1685,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
/* Process the SPU response message */
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- handle_ablkcipher_resp(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ handle_skcipher_resp(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
handle_ahash_resp(rctx);
@@ -1708,8 +1708,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
spu_chunk_cleanup(rctx);
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = handle_ablkcipher_req(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = handle_skcipher_req(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = handle_ahash_req(rctx);
@@ -1739,7 +1739,7 @@ cb_finish:
/* ==================== Kernel Cryptographic API ==================== */
/**
- * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
+ * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
* @req: Crypto API request
* @encrypt: true if encrypting; false if decrypting
*
@@ -1747,11 +1747,11 @@ cb_finish:
* asynchronously
* < 0 if an error
*/
-static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
+static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
{
- struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
+ struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
struct iproc_ctx_s *ctx =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int err;
flow_log("%s() enc:%u\n", __func__, encrypt);
@@ -1761,7 +1761,7 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
rctx->parent = &req->base;
rctx->is_encrypt = encrypt;
rctx->bd_suppress = false;
- rctx->total_todo = req->nbytes;
+ rctx->total_todo = req->cryptlen;
rctx->src_sent = 0;
rctx->total_sent = 0;
rctx->total_received = 0;
@@ -1782,15 +1782,15 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
ctx->cipher.mode == CIPHER_MODE_GCM ||
ctx->cipher.mode == CIPHER_MODE_CCM) {
rctx->iv_ctr_len =
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
+ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
+ memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
} else {
rctx->iv_ctr_len = 0;
}
/* Choose a SPU to process this request */
rctx->chan_idx = select_channel();
- err = handle_ablkcipher_req(rctx);
+ err = handle_skcipher_req(rctx);
if (err != -EINPROGRESS)
/* synchronous result */
spu_chunk_cleanup(rctx);
@@ -1798,13 +1798,13 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
return err;
}
-static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1812,13 +1812,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1826,10 +1826,10 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
if (ctx->cipher.mode == CIPHER_MODE_XTS)
/* XTS includes two keys of equal length */
@@ -1846,7 +1846,7 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -1854,10 +1854,10 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int i;
ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
@@ -1874,16 +1874,16 @@ static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
struct spu_cipher_parms cipher_parms;
u32 alloc_len = 0;
int err;
- flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
+ flow_log("skcipher_setkey() keylen: %d\n", keylen);
flow_dump(" key: ", key, keylen);
switch (ctx->cipher.alg) {
@@ -1926,7 +1926,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
cipher_parms.iv_buf = NULL;
- cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
+ cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
cipher_parms.alg = ctx->cipher.alg;
@@ -1950,17 +1950,17 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
+ flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
- return ablkcipher_enqueue(req, true);
+ return skcipher_enqueue(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
- return ablkcipher_enqueue(req, false);
+ flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
+ return skcipher_enqueue(req, false);
}
static int ahash_enqueue(struct ahash_request *req)
@@ -3585,18 +3585,16 @@ static struct iproc_alg_s driver_algs[] = {
.auth_first = 0,
},
-/* ABLKCIPHER algorithms. */
+/* SKCIPHER algorithms. */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-iproc",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(arc4)",
+ .base.cra_driver_name = "ecb-arc4-iproc",
+ .base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_RC4,
@@ -3608,16 +3606,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "ofb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "ofb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3629,16 +3625,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3650,16 +3644,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3671,16 +3663,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "ofb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "ofb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3692,16 +3682,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3713,16 +3701,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3734,16 +3720,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3755,16 +3739,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3776,16 +3758,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3797,16 +3777,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3818,16 +3796,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -4282,16 +4258,17 @@ static int generic_cra_init(struct crypto_tfm *tfm,
return 0;
}
-static int ablkcipher_cra_init(struct crypto_tfm *tfm)
+static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
struct iproc_alg_s *cipher_alg;
flow_log("%s()\n", __func__);
- tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
+ crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
- cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
+ cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
return generic_cra_init(tfm, cipher_alg);
}
@@ -4363,6 +4340,11 @@ static void generic_cra_exit(struct crypto_tfm *tfm)
atomic_dec(&iproc_priv.session_count);
}
+static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ generic_cra_exit(crypto_skcipher_tfm(tfm));
+}
+
static void aead_cra_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
@@ -4524,10 +4506,10 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.bad_icv, 0);
}
-static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
+static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct crypto_alg *crypto = &driver_alg->alg.crypto;
+ struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
/* SPU2 does not support RC4 */
@@ -4535,26 +4517,23 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
(spu->spu_type == SPU_TYPE_SPU2))
return 0;
- crypto->cra_module = THIS_MODULE;
- crypto->cra_priority = cipher_pri;
- crypto->cra_alignmask = 0;
- crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
-
- crypto->cra_init = ablkcipher_cra_init;
- crypto->cra_exit = generic_cra_exit;
- crypto->cra_type = &crypto_ablkcipher_type;
- crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ crypto->base.cra_module = THIS_MODULE;
+ crypto->base.cra_priority = cipher_pri;
+ crypto->base.cra_alignmask = 0;
+ crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
- crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ crypto->init = skcipher_init_tfm;
+ crypto->exit = skcipher_exit_tfm;
+ crypto->setkey = skcipher_setkey;
+ crypto->encrypt = skcipher_encrypt;
+ crypto->decrypt = skcipher_decrypt;
- err = crypto_register_alg(crypto);
+ err = crypto_register_skcipher(crypto);
/* Mark alg as having been registered, if successful */
if (err == 0)
driver_alg->registered = true;
- pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
+ pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
return err;
}
@@ -4649,8 +4628,8 @@ static int spu_algs_register(struct device *dev)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = spu_register_ablkcipher(&driver_algs[i]);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = spu_register_skcipher(&driver_algs[i]);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = spu_register_ahash(&driver_algs[i]);
@@ -4680,8 +4659,8 @@ err_algs:
if (!driver_algs[j].registered)
continue;
switch (driver_algs[j].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[j].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
driver_algs[j].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -4837,10 +4816,10 @@ static int bcm_spu_remove(struct platform_device *pdev)
continue;
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[i].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
dev_dbg(dev, " unregistered cipher %s\n",
- driver_algs[i].alg.crypto.cra_driver_name);
+ driver_algs[i].alg.skcipher.base.cra_driver_name);
driver_algs[i].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 766452b24d0a..b6d83e3aa46c 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -1,3 +1,4 @@
+
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2016 Broadcom
@@ -11,6 +12,7 @@
#include <linux/mailbox_client.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aead.h>
#include <crypto/arc4.h>
#include <crypto/gcm.h>
@@ -102,7 +104,7 @@ struct auth_op {
struct iproc_alg_s {
u32 type;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -149,7 +151,7 @@ struct spu_msg_buf {
u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
union {
- /* Buffers only used for ablkcipher */
+ /* Buffers only used for skcipher */
struct {
/*
* Field used for either SUPDT when RC4 is used
@@ -214,7 +216,7 @@ struct iproc_ctx_s {
/*
* Buffer to hold SPU message header template. Template is created at
- * setkey time for ablkcipher requests, since most of the fields in the
+ * setkey time for skcipher requests, since most of the fields in the
* header are known at that time. At request time, just fill in a few
* missing pieces related to length of data in the request and IVs, etc.
*/
@@ -256,7 +258,7 @@ struct iproc_reqctx_s {
/* total todo, rx'd, and sent for this request */
unsigned int total_todo;
- unsigned int total_received; /* only valid for ablkcipher */
+ unsigned int total_received; /* only valid for skcipher */
unsigned int total_sent;
/*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2add51024575..59abb5ecefa4 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -542,7 +542,7 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
- * subsequent ablkcipher requests for this context.
+ * subsequent skcipher requests for this context.
* @spu2_cipher_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
@@ -1107,13 +1107,13 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
+ * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
*
* Called at setkey time to initialize a msg header that can be reused for all
- * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
+ * subsequent skcipher requests. Construct the message starting at spu_hdr.
* Caller should allocate this buffer in DMA-able memory at least
* SPU_HEADER_ALLOC_LEN bytes long.
*
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 137ed3df0c74..87053e46c788 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -97,7 +97,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Selecting this will offload crypto for users of the
@@ -110,7 +110,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
default y
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_DES
help
Selecting this will use CAAM Queue Interface (QI) for sending
@@ -158,7 +158,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_DEV_FSL_CAAM_COMMON
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_AEAD
select CRYPTO_HASH
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 83f96d4f86e0..6619c512ef1a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
- int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
unsigned int diff_size = 0;
int lzeros;
@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (!diff_size && src_nents == 1)
+ mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ goto src_fail;
+ }
+
+ if (!diff_size && mapped_src_nents == 1)
sec4_sg_len = 0; /* no need for an input hw s/g table */
else
- sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_len = mapped_src_nents + !!diff_size;
sec4_sg_index = sec4_sg_len;
- if (dst_nents > 1)
- sec4_sg_len += pad_sg_nents(dst_nents);
+
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
else
sec4_sg_len = pad_sg_nents(sec4_sg_len);
@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc)
- return ERR_PTR(-ENOMEM);
-
- sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map source\n");
- goto src_fail;
- }
-
- sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map destination\n");
goto dst_fail;
- }
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
if (diff_size)
@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
edesc->sec4_sg + !!diff_size, 0);
- if (dst_nents > 1)
+ if (mapped_dst_nents > 1)
sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!sec4_sg_bytes)
return edesc;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
+
edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
return edesc;
sec4_sg_fail:
- dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(edesc);
dst_fail:
- dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
src_fail:
- kfree(edesc);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
return ERR_PTR(-ENOMEM);
}
@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
+
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 2c488c9a3812..c68fb4c03ee6 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
/**
* rsa_edesc - s/w-extended rsa descriptor
- * @src_nents : number of segments in input scatterlist
- * @dst_nents : number of segments in output scatterlist
+ * @src_nents : number of segments in input s/w scatterlist
+ * @dst_nents : number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table
* @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table
@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
struct rsa_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index db22777d59b4..d7c3c3805693 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -176,6 +176,73 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
}
/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deinitialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static void devm_deinstantiate_rng(void *data)
+{
+ struct device *ctrldev = data;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+}
+
+/*
* instantiate_rng - builds and executes a descriptor on DECO0,
* which initializes the RNG block.
* @ctrldev - pointer to device
@@ -247,99 +314,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
kfree(desc);
- return ret;
-}
-
-/*
- * deinstantiate_rng - builds and executes a descriptor on DECO0,
- * which deinitializes the RNG block.
- * @ctrldev - pointer to device
- * @state_handle_mask - bitmask containing the instantiation status
- * for the RNG4 state handles which exist in
- * the RNG4 block: 1 if it's been instantiated
- *
- * Return: - 0 if no error occurred
- * - -ENOMEM if there isn't enough memory to allocate the descriptor
- * - -ENODEV if DECO0 couldn't be acquired
- * - -EAGAIN if an error occurred when executing the descriptor
- */
-static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
-{
- u32 *desc, status;
- int sh_idx, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
- /*
- * If the corresponding bit is set, then it means the state
- * handle was initialized by us, and thus it needs to be
- * deinitialized as well
- */
- if ((1 << sh_idx) & state_handle_mask) {
- /*
- * Create the descriptor for deinstantating this state
- * handle
- */
- build_deinstantiation_desc(desc, sh_idx);
-
- /* Try to run it through DECO0 */
- ret = run_descriptor_deco0(ctrldev, desc, &status);
-
- if (ret ||
- (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
- dev_err(ctrldev,
- "Failed to deinstantiate RNG4 SH%d\n",
- sh_idx);
- break;
- }
- dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
- }
- }
-
- kfree(desc);
+ if (!ret)
+ ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
+ ctrldev);
return ret;
}
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_ctrl __iomem *ctrl;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
-
- /* Remove platform devices under the crypto node */
- of_platform_depopulate(ctrldev);
-
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(ctrldev);
-#endif
-
- /*
- * De-initialize RNG state handles initialized by this driver.
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
- if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
- deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(ctrl);
-
- return 0;
-}
-
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
@@ -568,6 +549,13 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
}
+#ifdef CONFIG_DEBUG_FS
+static void caam_remove_debugfs(void *root)
+{
+ debugfs_remove_recursive(root);
+}
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -580,6 +568,7 @@ static int caam_probe(struct platform_device *pdev)
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
+ struct dentry *dfs_root;
#endif
u32 scfgr, comp_params;
u8 rng_vid;
@@ -611,10 +600,11 @@ static int caam_probe(struct platform_device *pdev)
/* Get configuration properties from device tree */
/* First, get register page */
- ctrl = of_iomap(nprop, 0);
- if (!ctrl) {
+ ctrl = devm_of_iomap(dev, nprop, 0, NULL);
+ ret = PTR_ERR_OR_ZERO(ctrl);
+ if (ret) {
dev_err(dev, "caam: of_iomap() failed\n");
- return -ENOMEM;
+ return ret;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
@@ -632,22 +622,18 @@ static int caam_probe(struct platform_device *pdev)
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
ret = qman_portals_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman portals probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
}
#endif
@@ -722,7 +708,7 @@ static int caam_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto iounmap_ctrl;
+ return ret;
}
ctrlpriv->era = caam_get_era(ctrl);
@@ -736,8 +722,12 @@ static int caam_probe(struct platform_device *pdev)
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+ dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
+ if (ret)
+ return ret;
+
+ ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
#endif
/* Check to see if (DPAA 1.x) QI present. If so, enable */
@@ -757,12 +747,6 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto shutdown_qi;
- }
-
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@@ -779,8 +763,7 @@ static int caam_probe(struct platform_device *pdev)
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
- ret = -ENOMEM;
- goto caam_remove;
+ return -ENOMEM;
}
if (ctrlpriv->era < 10)
@@ -843,7 +826,7 @@ static int caam_probe(struct platform_device *pdev)
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
- goto caam_remove;
+ return ret;
}
/*
* Set handles init'ed by this module as the complement of the
@@ -916,19 +899,11 @@ static int caam_probe(struct platform_device *pdev)
debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
- return 0;
-caam_remove:
- caam_remove(pdev);
- return ret;
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ dev_err(dev, "JR platform devices creation error\n");
-shutdown_qi:
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(dev);
-#endif
-iounmap_ctrl:
- iounmap(ctrl);
return ret;
}
@@ -938,7 +913,6 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 731b06becd9c..c7c10c90464b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -81,9 +81,6 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
-#ifdef CONFIG_CAAM_QI
- u8 qi_init; /* Nonzero if QI has been initialized */
-#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -102,7 +99,6 @@ struct caam_drv_private {
* variables at runtime.
*/
#ifdef CONFIG_DEBUG_FS
- struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 378f627e1d64..dacf2fa4aa8e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -500,9 +500,10 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-void caam_qi_shutdown(struct device *qidev)
+static void caam_qi_shutdown(void *data)
{
int i;
+ struct device *qidev = data;
struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
@@ -761,7 +762,10 @@ int caam_qi_init(struct platform_device *caam_pdev)
&times_congested, &caam_fops_u64_ro);
#endif
- ctrlpriv->qi_init = 1;
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ if (err)
+ return err;
+
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index db0549549e3b..848958951f68 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -147,7 +147,6 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 596ce28b957d..1ad66677d88e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -92,15 +92,15 @@ static inline void update_output_data(struct cpt_request_info *req_info,
}
}
-static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct fc_context *fctx = &rctx->fctx;
u64 *offset_control = &rctx->control_word;
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
u64 *ctrl_flags = NULL;
@@ -115,7 +115,7 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
else
req_info->req.opcode.s.minor = 3;
- req_info->req.param1 = req->nbytes; /* Encryption Data length */
+ req_info->req.param1 = req->cryptlen; /* Encryption Data length */
req_info->req.param2 = 0; /*Auth data length */
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
@@ -147,32 +147,32 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
return 0;
}
-static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
create_ctx_hdr(req, enc, &argcnt);
- update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_input_data(req_info, req->src, req->nbytes, &argcnt);
+ update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
-static inline void store_cb_info(struct ablkcipher_request *req,
+static inline void store_cb_info(struct skcipher_request *req,
struct cpt_request_info *req_info)
{
req_info->callback = (void *)cvm_callback;
req_info->callback_arg = (void *)&req->base;
}
-static inline void create_output_list(struct ablkcipher_request *req,
+static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -184,16 +184,16 @@ static inline void create_output_list(struct ablkcipher_request *req,
* [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
*/
/* Reading IV information */
- update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_output_data(req_info, req->dst, req->nbytes, &argcnt);
+ update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
req_info->outcnt = argcnt;
}
-static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
+static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct fc_context *fctx = &rctx->fctx;
struct cpt_request_info *req_info = &rctx->cpt_req;
void *cdev = NULL;
@@ -217,20 +217,20 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
return -EINPROGRESS;
}
-static int cvm_encrypt(struct ablkcipher_request *req)
+static int cvm_encrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, true);
}
-static int cvm_decrypt(struct ablkcipher_request *req)
+static int cvm_decrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, false);
}
-static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
const u8 *key1 = key;
@@ -284,10 +284,10 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
return -EINVAL;
}
-static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->cipher_type = cipher_type;
@@ -295,183 +295,159 @@ static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_ablkcipher_set_flags(cipher,
+ crypto_skcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
-static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CBC);
}
-static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
-static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CFB);
}
-static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_CBC);
}
-static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_ECB);
}
-static int cvm_enc_dec_init(struct crypto_tfm *tfm)
+static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
+
return 0;
}
-static struct crypto_alg algs[] = { {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "xts(aes)",
- .cra_driver_name = "cavium-xts-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .setkey = cvm_xts_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+static struct skcipher_alg algs[] = { {
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cavium-xts-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = cvm_xts_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cavium-cbc-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cbc_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cavium-cbc-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cbc_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(aes)",
- .cra_driver_name = "cavium-ecb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_ecb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cavium-ecb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_ecb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cavium-cfb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cfb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cavium-cfb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cfb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cavium-cbc-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_cbc_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cavium-cbc-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_cbc_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "cavium-ecb-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_ecb_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cavium-ecb-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_ecb_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
} };
static inline int cav_register_algs(void)
{
int err = 0;
- err = crypto_register_algs(algs, ARRAY_SIZE(algs));
+ err = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
if (err)
return err;
@@ -480,7 +456,7 @@ static inline int cav_register_algs(void)
static inline void cav_unregister_algs(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
int cvm_crypto_init(struct cpt_vf *cptvf)
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig
index 7b1e751bb9cd..7dc008332a81 100644
--- a/drivers/crypto/cavium/nitrox/Kconfig
+++ b/drivers/crypto/cavium/nitrox/Kconfig
@@ -4,7 +4,7 @@
#
config CRYPTO_DEV_NITROX
tristate
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_LIB_DES
select FW_LOADER
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index e4841eb2a09f..6f80cc3b5c84 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -74,6 +74,25 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
+static int nitrox_aes_gcm_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
struct scatterlist *src, char *iv, int ivsize,
int buflen)
@@ -186,6 +205,14 @@ static void nitrox_aead_callback(void *arg, int err)
areq->base.complete(&areq->base, err);
}
+static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen)
+{
+ if (assoclen <= 512)
+ return true;
+
+ return false;
+}
+
static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
@@ -195,6 +222,9 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen;
@@ -226,6 +256,9 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen - aead->authsize;
@@ -492,13 +525,13 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.setkey = nitrox_aes_gcm_setkey,
- .setauthsize = nitrox_aead_setauthsize,
+ .setauthsize = nitrox_aes_gcm_setauthsize,
.encrypt = nitrox_aes_gcm_enc,
.decrypt = nitrox_aes_gcm_dec,
.init = nitrox_aes_gcm_init,
@@ -511,7 +544,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_rfc4106",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 2217a2736c8e..c2d0c23fb81b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -109,6 +109,13 @@ struct nitrox_q_vector {
};
};
+enum mcode_type {
+ MCODE_TYPE_INVALID,
+ MCODE_TYPE_AE,
+ MCODE_TYPE_SE_SSL,
+ MCODE_TYPE_SE_IPSEC,
+};
+
/**
* mbox_msg - Mailbox message data
* @type: message type
@@ -128,6 +135,14 @@ union mbox_msg {
u64 chipid: 8;
u64 vfid: 8;
} id;
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 count: 4;
+ u64 info: 40;
+ u64 next_se_grp: 3;
+ u64 next_ae_grp: 3;
+ } mcode_info;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index bc924980e10c..c4632d84c9a1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
offset = UCD_UCODE_LOAD_BLOCK_NUM;
nitrox_write_csr(ndev, offset, block_num);
- code_size = ucode_size;
- code_size = roundup(code_size, 8);
+ code_size = roundup(ucode_size, 16);
while (code_size) {
data = ucode_data[i];
/* write 8 bytes at a time */
@@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev)
/* write block number and firmware length
* bit:<2:0> block number
- * bit:3 is set SE uses 32KB microcode
- * bit:3 is clear SE uses 64KB microcode
+ * bit:3 is set AE uses 32KB microcode
+ * bit:3 is clear AE uses 64KB microcode
*/
core_2_eid_val.value = 0ULL;
- core_2_eid_val.ucode_blk = 0;
+ core_2_eid_val.ucode_blk = 2;
if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
core_2_eid_val.ucode_len = 1;
else
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 02ee95064841..b51b0449b478 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -25,6 +25,7 @@ enum mbx_msg_opcode {
MSG_OP_VF_UP,
MSG_OP_VF_DOWN,
MSG_OP_CHIPID_VFID,
+ MSG_OP_MCODE_INFO = 11,
};
struct pf2vf_work {
@@ -73,6 +74,13 @@ static void pf2vf_send_response(struct nitrox_device *ndev,
vfdev->nr_queues = 0;
atomic_set(&vfdev->state, __NDEV_NOT_READY);
break;
+ case MSG_OP_MCODE_INFO:
+ msg.data = 0;
+ msg.mcode_info.count = 2;
+ msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
+ msg.mcode_info.next_se_grp = 1;
+ msg.mcode_info.next_ae_grp = 1;
+ break;
default:
msg.type = MBX_MSG_TYPE_NOP;
break;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index f69ba02c4d25..12282c1b14f5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -10,6 +10,8 @@
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define PRIO 4001
+typedef void (*sereq_completion_t)(void *req, int err);
+
/**
* struct gphdr - General purpose Header
* @param0: first parameter.
@@ -203,12 +205,14 @@ struct nitrox_crypto_ctx {
struct flexi_crypto_context *fctx;
} u;
struct crypto_ctx_hdr *chdr;
+ sereq_completion_t callback;
};
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
u8 *src;
u8 *dst;
+ u8 *iv_out;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 3cdce1f0f257..97af4d50d003 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -6,6 +6,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/xts.h>
@@ -47,6 +48,63 @@ static enum flexi_cipher flexi_cipher_type(const char *name)
return cipher->value;
}
+static void free_src_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->dst);
+}
+
+static void nitrox_skcipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+
+ free_src_sglist(skreq);
+ free_dst_sglist(skreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ skcipher_request_complete(skreq, err);
+}
+
+static void nitrox_cbc_cipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (err) {
+ nitrox_skcipher_callback(arg, err);
+ return;
+ }
+
+ if (nkreq->creq.ctrl.s.arg == ENCRYPT) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->dst, start, ivsize,
+ 0);
+ } else {
+ if (skreq->src != skreq->dst) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->src, start,
+ ivsize, 0);
+ } else {
+ memcpy(skreq->iv, nkreq->iv_out, ivsize);
+ kfree(nkreq->iv_out);
+ }
+ }
+
+ nitrox_skcipher_callback(arg, err);
+}
+
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -63,6 +121,8 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
+
+ nctx->callback = nitrox_skcipher_callback;
nctx->chdr = chdr;
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
sizeof(struct ctx_hdr));
@@ -71,6 +131,19 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
+static int nitrox_cbc_init(struct crypto_skcipher *tfm)
+{
+ int err;
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+ err = nitrox_skcipher_init(tfm);
+ if (err)
+ return err;
+
+ nctx->callback = nitrox_cbc_cipher_callback;
+ return 0;
+}
+
static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -173,34 +246,6 @@ static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize)
return 0;
}
-static void free_src_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->src);
-}
-
-static void free_dst_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->dst);
-}
-
-static void nitrox_skcipher_callback(void *arg, int err)
-{
- struct skcipher_request *skreq = arg;
-
- free_src_sglist(skreq);
- free_dst_sglist(skreq);
- if (err) {
- pr_err_ratelimited("request failed status 0x%0x\n", err);
- err = -EINVAL;
- }
-
- skcipher_request_complete(skreq, err);
-}
-
static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
@@ -240,8 +285,28 @@ static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
}
/* send the crypto request */
- return nitrox_process_se_request(nctx->ndev, creq,
- nitrox_skcipher_callback, skreq);
+ return nitrox_process_se_request(nctx->ndev, creq, nctx->callback,
+ skreq);
+}
+
+static int nitrox_cbc_decrypt(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ gfp_t flags = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (skreq->src != skreq->dst)
+ return nitrox_skcipher_crypt(skreq, false);
+
+ nkreq->iv_out = kmalloc(ivsize, flags);
+ if (!nkreq->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(nkreq->iv_out, skreq->src, start, ivsize, 0);
+ return nitrox_skcipher_crypt(skreq, false);
}
static int nitrox_aes_encrypt(struct skcipher_request *skreq)
@@ -340,8 +405,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
@@ -428,7 +493,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
@@ -455,8 +519,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = nitrox_3des_setkey,
.encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 8fec733f567f..e0a8bd15aa74 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_CCP_CRYPTO
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_RSA
select CRYPTO_LIB_AES
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 94c1ad7eeddf..ff50ee80d223 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -172,14 +172,12 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
.cra_exit = ccp_aes_gcm_cra_exit,
.cra_module = THIS_MODULE,
},
@@ -229,11 +227,10 @@ static int ccp_register_aes_aead(struct list_head *head,
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->base.cra_blocksize = def->blocksize;
- alg->base.cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_aead(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ pr_err("%s aead algorithm registration error (%d)\n",
alg->base.cra_name, ret);
kfree(ccp_aead);
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 8e4a531f4f70..04b2517df955 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -24,7 +24,7 @@ struct ccp_aes_xts_def {
const char *drv_name;
};
-static struct ccp_aes_xts_def aes_xts_algs[] = {
+static const struct ccp_aes_xts_def aes_xts_algs[] = {
{
.name = "xts(aes)",
.drv_name = "xts-aes-ccp",
@@ -61,26 +61,25 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
- struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ccpversion = ccp_version();
int ret;
- ret = xts_check_key(xfm, key, key_len);
+ ret = xts_verify_key(tfm, key, key_len);
if (ret)
return ret;
@@ -102,11 +101,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
-static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -116,7 +116,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!ctx->u.aes.key_len)
return -EINVAL;
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
/* Check conditions under which the CCP can fulfill a request. The
@@ -127,7 +127,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
*/
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
- if (req->nbytes == xts_unit_sizes[unit].size) {
+ if (req->cryptlen == xts_unit_sizes[unit].size) {
unit_size = unit;
break;
}
@@ -155,14 +155,14 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -177,7 +177,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.u.xts.iv = &rctx->iv_sg;
rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.xts.src = req->src;
- rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.src_len = req->cryptlen;
rctx->cmd.u.xts.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -185,19 +185,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
return ret;
}
-static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_encrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 1);
}
-static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_decrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 0);
}
-static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -212,14 +212,14 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -227,8 +227,8 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -239,30 +239,30 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_blocksize = AES_BLOCK_SIZE;
- alg->cra_ctxsize = sizeof(struct ccp_ctx);
- alg->cra_priority = CCP_CRA_PRIORITY;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
- alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
- alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
- alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
- alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
- alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
- alg->cra_init = ccp_aes_xts_cra_init;
- alg->cra_exit = ccp_aes_xts_cra_exit;
- alg->cra_module = THIS_MODULE;
-
- ret = crypto_register_alg(alg);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_blocksize = AES_BLOCK_SIZE;
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_priority = CCP_CRA_PRIORITY;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->setkey = ccp_aes_xts_setkey;
+ alg->encrypt = ccp_aes_xts_encrypt;
+ alg->decrypt = ccp_aes_xts_decrypt;
+ alg->min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->ivsize = AES_BLOCK_SIZE;
+ alg->init = ccp_aes_xts_init_tfm;
+ alg->exit = ccp_aes_xts_exit_tfm;
+
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 58c6dddfc5e1..33328a153225 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -21,25 +21,24 @@
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -52,7 +51,7 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
@@ -64,10 +63,11 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -77,14 +77,14 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
(ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
- (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ (req->cryptlen & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = AES_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -102,7 +102,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.aes.iv = iv_sg;
rctx->cmd.u.aes.iv_len = iv_len;
rctx->cmd.u.aes.src = req->src;
- rctx->cmd.u.aes.src_len = req->nbytes;
+ rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -110,48 +110,44 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_aes_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_encrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, true);
}
-static int ccp_aes_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_decrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, false);
}
-static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
/* Restore the original pointer */
- req->info = rctx->rfc3686_info;
+ req->iv = rctx->rfc3686_info;
return ccp_aes_complete(async_req, ret);
}
-static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -162,10 +158,11 @@ static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ccp_aes_setkey(tfm, key, key_len);
}
-static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
u8 *iv;
/* Initialize the CTR block */
@@ -173,84 +170,72 @@ static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
iv += CTR_RFC3686_NONCE_SIZE;
- memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE);
iv += CTR_RFC3686_IV_SIZE;
*(__be32 *)iv = cpu_to_be32(1);
/* Point to the new IV */
- rctx->rfc3686_info = req->info;
- req->info = rctx->rfc3686_iv;
+ rctx->rfc3686_info = req->iv;
+ req->iv = rctx->rfc3686_iv;
return ccp_aes_crypt(req, encrypt);
}
-static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, true);
}
-static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, false);
}
-static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_rfc3686_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_aes_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_cra_init,
- .cra_exit = ccp_aes_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_setkey,
- .encrypt = ccp_aes_encrypt,
- .decrypt = ccp_aes_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_aes_defaults = {
+ .setkey = ccp_aes_setkey,
+ .encrypt = ccp_aes_encrypt,
+ .decrypt = ccp_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = ccp_aes_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
-static struct crypto_alg ccp_aes_rfc3686_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_rfc3686_cra_init,
- .cra_exit = ccp_aes_rfc3686_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_rfc3686_setkey,
- .encrypt = ccp_aes_rfc3686_encrypt,
- .decrypt = ccp_aes_rfc3686_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- },
+static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
+ .setkey = ccp_aes_rfc3686_setkey,
+ .encrypt = ccp_aes_rfc3686_encrypt,
+ .decrypt = ccp_aes_rfc3686_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .init = ccp_aes_rfc3686_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_aes_def {
@@ -260,7 +245,7 @@ struct ccp_aes_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
static struct ccp_aes_def aes_algs[] = {
@@ -323,8 +308,8 @@ static struct ccp_aes_def aes_algs[] = {
static int ccp_register_aes_alg(struct list_head *head,
const struct ccp_aes_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -338,16 +323,16 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index d2c49b2f0323..9c129defdb50 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -20,28 +20,27 @@
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
- memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, DES3_EDE_BLOCK_SIZE);
return 0;
}
-static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -58,10 +57,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -71,14 +71,14 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
(ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
- (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+ (req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, DES3_EDE_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = DES3_EDE_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -97,7 +97,7 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.des3.iv = iv_sg;
rctx->cmd.u.des3.iv_len = iv_len;
rctx->cmd.u.des3.src = req->src;
- rctx->cmd.u.des3.src_len = req->nbytes;
+ rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -105,51 +105,43 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_des3_encrypt(struct ablkcipher_request *req)
+static int ccp_des3_encrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, true);
}
-static int ccp_des3_decrypt(struct ablkcipher_request *req)
+static int ccp_des3_decrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, false);
}
-static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+static int ccp_des3_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_des3_complete;
ctx->u.des3.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx));
return 0;
}
-static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_des3_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_des3_cra_init,
- .cra_exit = ccp_des3_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_des3_setkey,
- .encrypt = ccp_des3_encrypt,
- .decrypt = ccp_des3_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_des3_defaults = {
+ .setkey = ccp_des3_setkey,
+ .encrypt = ccp_des3_encrypt,
+ .decrypt = ccp_des3_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = ccp_des3_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_des3_def {
@@ -159,10 +151,10 @@ struct ccp_des3_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
-static struct ccp_des3_def des3_algs[] = {
+static const struct ccp_des3_def des3_algs[] = {
{
.mode = CCP_DES3_MODE_ECB,
.version = CCP_VERSION(5, 0),
@@ -186,8 +178,8 @@ static struct ccp_des3_def des3_algs[] = {
static int ccp_register_des3_alg(struct list_head *head,
const struct ccp_des3_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -201,16 +193,16 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8ee4cb45a3f3..88275b4867ea 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
static LIST_HEAD(aead_algs);
static LIST_HEAD(akcipher_algs);
@@ -330,7 +330,7 @@ static int ccp_register_algs(void)
int ret;
if (!aes_disable) {
- ret = ccp_register_aes_algs(&cipher_algs);
+ ret = ccp_register_aes_algs(&skcipher_algs);
if (ret)
return ret;
@@ -338,7 +338,7 @@ static int ccp_register_algs(void)
if (ret)
return ret;
- ret = ccp_register_aes_xts_algs(&cipher_algs);
+ ret = ccp_register_aes_xts_algs(&skcipher_algs);
if (ret)
return ret;
@@ -348,7 +348,7 @@ static int ccp_register_algs(void)
}
if (!des3_disable) {
- ret = ccp_register_des3_algs(&cipher_algs);
+ ret = ccp_register_des3_algs(&skcipher_algs);
if (ret)
return ret;
}
@@ -371,7 +371,7 @@ static int ccp_register_algs(void)
static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
- struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+ struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
@@ -381,8 +381,8 @@ static void ccp_unregister_algs(void)
kfree(ahash_alg);
}
- list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&ablk_alg->alg);
+ list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&ablk_alg->alg);
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 9015b5da6ba3..90a009e6b5c1 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -21,6 +21,7 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/akcipher.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/rsa.h>
/* We want the module name in front of our messages */
@@ -31,12 +32,12 @@
#define CCP_CRA_PRIORITY 300
-struct ccp_crypto_ablkcipher_alg {
+struct ccp_crypto_skcipher_alg {
struct list_head entry;
u32 mode;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
};
struct ccp_crypto_aead {
@@ -66,12 +67,12 @@ struct ccp_crypto_akcipher_alg {
struct akcipher_alg alg;
};
-static inline struct ccp_crypto_ablkcipher_alg *
- ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+static inline struct ccp_crypto_skcipher_alg *
+ ccp_crypto_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+ return container_of(alg, struct ccp_crypto_skcipher_alg, alg);
}
static inline struct ccp_crypto_ahash_alg *
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 57eb53b8ac21..82ac4c14c04c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -789,6 +789,18 @@ static int ccp5_init(struct ccp_device *ccp)
/* Find available queues */
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and fail
+ * the initialization (but not the load, as the PSP could get
+ * properly initialized).
+ */
+ if (qmr == 0xffffffff) {
+ dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
+ return 1;
+ }
+
for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
if (!(qmr & (1 << i)))
continue;
@@ -854,7 +866,7 @@ static int ccp5_init(struct ccp_device *ccp)
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
- ret = -EIO;
+ ret = 1;
goto e_pool;
}
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 73acf0fdb793..19ac509ed76e 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -641,18 +641,27 @@ int ccp_dev_init(struct sp_device *sp)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
- if (ret)
+ if (ret) {
+ /* A positive number means that the device cannot be initialized,
+ * but no additional message is required.
+ */
+ if (ret > 0)
+ goto e_quiet;
+
+ /* An unexpected problem occurred, and should be reported in the log */
goto e_err;
+ }
dev_notice(dev, "ccp enabled\n");
return 0;
e_err:
- sp->ccp_data = NULL;
-
dev_notice(dev, "ccp initialization failed\n");
+e_quiet:
+ sp->ccp_data = NULL;
+
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index a54f9367a580..0770a83bf1a5 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->entry);
INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c8da8eb160da..422193690fd4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1777,8 +1777,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
LSB_ITEM_SIZE);
break;
default:
+ kfree(hmac_buf);
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 39fdd0641637..7ca2d3408e7a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -303,6 +303,9 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
{
int state, rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/*
* The SEV spec requires that FACTORY_RESET must be issued in
* UNINIT state. Before we go further lets check if any guest is
@@ -347,6 +350,9 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
{
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (psp_master->sev_state == SEV_STATE_UNINIT) {
rc = __sev_platform_init_locked(&argp->error);
if (rc)
@@ -363,6 +369,9 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
void *blob = NULL;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -549,6 +558,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
void *pek_blob, *oca_blob;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -704,6 +716,16 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
struct sev_data_pdh_cert_export *data;
int ret;
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -750,13 +772,6 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
data->cert_chain_len = input.cert_chain_len;
cmd:
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_cert;
- }
-
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -938,8 +953,22 @@ static int sev_misc_init(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
- /* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
+
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
+ */
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
+ return -ENODEV;
+ }
+
+ if (!(val & 1)) {
+ /* Device does not support the SEV feature */
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -1073,6 +1102,18 @@ void psp_pci_init(void)
/* Initialize the platform */
rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sp->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
return;
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 82a084f02990..dd516b35ba86 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -23,6 +23,7 @@
#include <linux/dmaengine.h>
#include <linux/psp-sev.h>
#include <linux/miscdevice.h>
+#include <linux/capability.h>
#include "sp-dev.h"
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index d3e8faa03f15..64d318dc0d47 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -293,7 +293,8 @@ static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+static unsigned int hmac_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 254b48797799..3112b58d0bb1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -16,7 +16,7 @@
#include "cc_cipher.h"
#include "cc_request_mgr.h"
-#define MAX_ABLKCIPHER_SEQ_LEN 6
+#define MAX_SKCIPHER_SEQ_LEN 6
#define template_skcipher template_u.skcipher
@@ -822,7 +822,7 @@ static int cc_cipher_process(struct skcipher_request *req,
void *iv = req->iv;
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
int rc;
unsigned int seq_len = 0;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 38ee38b37ae6..1b4a5664e604 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -93,7 +93,7 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err);
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
@@ -568,11 +568,11 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.crypto);
+ container_of(alg, struct chcr_alg_template, alg.skcipher);
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
@@ -757,14 +757,14 @@ static inline void create_wreq(struct chcr_context *ctx,
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- struct chcr_blkcipher_req_ctx *reqctx =
- ablkcipher_request_ctx(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(wrparam->req);
unsigned int temp = 0, transhdr_len, dst_size;
int error;
int nents;
@@ -807,9 +807,9 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
@@ -843,7 +843,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
if (reqctx->op && (ablkctx->ciph_mode ==
CHCR_SCMD_CIPHER_MODE_AES_CBC))
sg_pcopy_to_buffer(wrparam->req->src,
- sg_nents(wrparam->req->src), wrparam->req->info, 16,
+ sg_nents(wrparam->req->src), wrparam->req->iv, 16,
reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
return skb;
@@ -866,11 +866,11 @@ static inline int chcr_keyctx_ck_size(unsigned int keylen)
return ck_size;
}
-static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
@@ -886,7 +886,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
return err;
}
-static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -912,13 +912,13 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -943,13 +943,13 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -981,7 +981,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1017,12 +1017,12 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
u32 isfinal)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_aes_ctx aes;
int ret, i;
u8 *key;
@@ -1051,16 +1051,16 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
return 0;
}
-static int chcr_update_cipher_iv(struct ablkcipher_request *req,
+static int chcr_update_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
+ ctr_add_iv(iv, req->iv, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
@@ -1071,7 +1071,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
/*Updated before sending last WR*/
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, req->iv, AES_BLOCK_SIZE);
else
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1085,16 +1085,16 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
* for subsequent update requests
*/
-static int chcr_final_cipher_iv(struct ablkcipher_request *req,
+static int chcr_final_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
@@ -1108,25 +1108,25 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct cipher_wr_param wrparam;
struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
goto unmap;
- if (req->nbytes == reqctx->processed) {
+ if (req->cryptlen == reqctx->processed) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
- err = chcr_final_cipher_iv(req, fw6_pld, req->info);
+ err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
goto complete;
}
@@ -1134,13 +1134,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
- bytes = req->nbytes - reqctx->processed;
+ bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
if (err)
@@ -1153,13 +1153,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
- req->info,
+ req->cryptlen,
+ req->iv,
reqctx->op);
goto complete;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
@@ -1185,33 +1185,33 @@ complete:
return err;
}
-static int process_cipher(struct ablkcipher_request *req,
+static int process_cipher(struct skcipher_request *req,
unsigned short qid,
struct sk_buff **skb,
unsigned short op_type)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
reqctx->processed = 0;
- if (!req->info)
+ if (!req->iv)
goto error;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes == 0) ||
- (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
+ (req->cryptlen == 0) ||
+ (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
- ablkctx->enckey_len, req->nbytes, ivsize);
+ ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
}
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
if (err)
goto error;
- if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
/*Min dsgl size*/
@@ -1219,14 +1219,14 @@ static int process_cipher(struct ablkcipher_request *req,
/* Can be sent as Imm*/
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
- dnents = sg_nents_xlen(req->dst, req->nbytes,
+ dnents = sg_nents_xlen(req->dst, req->cryptlen,
CHCR_DST_SG_SIZE, 0);
phys_dsgl = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
SGE_MAX_WR_LEN;
- bytes = IV + req->nbytes;
+ bytes = IV + req->cryptlen;
} else {
reqctx->imm = 0;
@@ -1236,21 +1236,21 @@ static int process_cipher(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
- bytes = req->nbytes;
+ bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR) {
- bytes = adjust_ctr_overflow(req->info, bytes);
+ bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
@@ -1259,7 +1259,7 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, IV);
+ memcpy(reqctx->iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
@@ -1268,7 +1268,7 @@ static int process_cipher(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
+ req->cryptlen,
reqctx->iv,
op_type);
goto error;
@@ -1296,9 +1296,9 @@ error:
return err;
}
-static int chcr_aes_encrypt(struct ablkcipher_request *req)
+static int chcr_aes_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
@@ -1329,9 +1329,9 @@ error:
return err;
}
-static int chcr_aes_decrypt(struct ablkcipher_request *req)
+static int chcr_aes_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
@@ -1398,27 +1398,28 @@ out:
return err;
}
-static int chcr_cra_init(struct crypto_tfm *tfm)
+static int chcr_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+
+ return chcr_device_init(ctx);
}
-static int chcr_rfc3686_init(struct crypto_tfm *tfm)
+static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
@@ -1427,17 +1428,17 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ return chcr_device_init(ctx);
}
-static void chcr_cra_exit(struct crypto_tfm *tfm)
+static void chcr_exit_tfm(struct crypto_skcipher *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
crypto_free_sync_skcipher(ablkctx->sw_cipher);
@@ -2056,8 +2057,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ chcr_handle_cipher_resp(skcipher_request_cast(req),
input, err);
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -2148,7 +2149,7 @@ out:
return err;
}
-static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int key_len)
{
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
@@ -2172,7 +2173,7 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -2576,12 +2577,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
u8 *buf = ulptx;
memcpy(buf, reqctx->iv, IV);
@@ -2599,13 +2600,13 @@ void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid)
{
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
@@ -2680,7 +2681,7 @@ void chcr_hash_dma_unmap(struct device *dev,
}
int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
int error;
@@ -2709,7 +2710,7 @@ err:
}
void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
@@ -3712,82 +3713,76 @@ static int chcr_aead_decrypt(struct aead_request *req)
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_cbc_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_cbc_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
- }
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = NULL,
- .cra_u .ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_xts_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_xts_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_ctr_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_ctr_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ .type = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_rfc3686_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = chcr_aes_rfc3686_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_rfc3686_init,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = chcr_aes_rfc3686_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
/* SHA */
@@ -4254,10 +4249,10 @@ static int chcr_unregister_alg(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
if (driver_algs[i].is_registered)
- crypto_unregister_alg(
- &driver_algs[i].alg.crypto);
+ crypto_unregister_skcipher(
+ &driver_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
if (driver_algs[i].is_registered)
@@ -4293,21 +4288,20 @@ static int chcr_register_alg(void)
if (driver_algs[i].is_registered)
continue;
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- driver_algs[i].alg.crypto.cra_priority =
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ driver_algs[i].alg.skcipher.base.cra_priority =
CHCR_CRA_PRIORITY;
- driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
- driver_algs[i].alg.crypto.cra_flags =
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
+ driver_algs[i].alg.skcipher.base.cra_flags =
+ CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK;
- driver_algs[i].alg.crypto.cra_ctxsize =
+ driver_algs[i].alg.skcipher.base.cra_ctxsize =
sizeof(struct chcr_context) +
sizeof(struct ablk_ctx);
- driver_algs[i].alg.crypto.cra_alignmask = 0;
- driver_algs[i].alg.crypto.cra_type =
- &crypto_ablkcipher_type;
- err = crypto_register_alg(&driver_algs[i].alg.crypto);
- name = driver_algs[i].alg.crypto.cra_driver_name;
+ driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
+
+ err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
+ name = driver_algs[i].alg.skcipher.base.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index d1e6b51df0ce..f58c2b5c7fc5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -287,7 +287,7 @@ struct hash_wr_param {
};
struct cipher_wr_param {
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
char *iv;
int bytes;
unsigned short qid;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 993c97e70565..6db2df8c8a05 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -160,9 +160,9 @@ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
return crypto_aead_ctx(tfm);
}
-static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm)
{
- return crypto_ablkcipher_ctx(tfm);
+ return crypto_skcipher_ctx(tfm);
}
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
@@ -285,7 +285,7 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
-struct chcr_blkcipher_req_ctx {
+struct chcr_skcipher_req_ctx {
struct sk_buff *skb;
struct scatterlist *dstsg;
unsigned int processed;
@@ -302,7 +302,7 @@ struct chcr_alg_template {
u32 type;
u32 is_registered;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -321,12 +321,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned short qid);
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
-int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
-void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req);
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 98bc5a4cd5e7..6c29b2c7bc65 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -97,7 +97,7 @@ static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
if (!skb)
return NULL;
- memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+ __skb_put_data(skb, flowc, flowclen);
skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
return skb;
@@ -1841,8 +1841,7 @@ skip_copy:
tp->urg_data = 0;
if (avail + offset >= skb->len) {
- if (likely(skb))
- chtls_free_skb(sk, skb);
+ chtls_free_skb(sk, skb);
buffers_freed++;
if (copied >= target &&
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index d81a1297cb9e..73a899e6f837 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -23,12 +24,12 @@ static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
static inline void
-_writefield(u32 offset, void *value)
+_writefield(u32 offset, const void *value)
{
int i;
for (i = 0; i < 4; i++)
- iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+ iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
}
/* Read a 128 bit field (either a writable key or IV) */
@@ -42,12 +43,12 @@ _readfield(u32 offset, void *value)
}
static int
-do_crypt(void *src, void *dst, int len, u32 flags)
+do_crypt(const void *src, void *dst, u32 len, u32 flags)
{
u32 status;
u32 counter = AES_OP_TIMEOUT;
- iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+ iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
iowrite32(len, _iobase + AES_LENA_REG);
@@ -64,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags)
return counter ? 0 : 1;
}
-static unsigned int
-geode_aes_crypt(struct geode_aes_op *op)
+static void
+geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
+ void *dst, u32 len, u8 *iv, int mode, int dir)
{
u32 flags = 0;
unsigned long iflags;
int ret;
- if (op->len == 0)
- return 0;
-
/* If the source and destination is the same, then
* we need to turn on the coherent flags, otherwise
* we don't need to worry
@@ -81,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op)
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
- if (op->dir == AES_DIR_ENCRYPT)
+ if (dir == AES_DIR_ENCRYPT)
flags |= AES_CTRL_ENCRYPT;
/* Start the critical section */
spin_lock_irqsave(&lock, iflags);
- if (op->mode == AES_MODE_CBC) {
+ if (mode == AES_MODE_CBC) {
flags |= AES_CTRL_CBC;
- _writefield(AES_WRITEIV0_REG, op->iv);
+ _writefield(AES_WRITEIV0_REG, iv);
}
- if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
- flags |= AES_CTRL_WRKEY;
- _writefield(AES_WRITEKEY0_REG, op->key);
- }
+ flags |= AES_CTRL_WRKEY;
+ _writefield(AES_WRITEKEY0_REG, tctx->key);
- ret = do_crypt(op->src, op->dst, op->len, flags);
+ ret = do_crypt(src, dst, len, flags);
BUG_ON(ret);
- if (op->mode == AES_MODE_CBC)
- _readfield(AES_WRITEIV0_REG, op->iv);
+ if (mode == AES_MODE_CBC)
+ _readfield(AES_WRITEIV0_REG, iv);
spin_unlock_irqrestore(&lock, iflags);
-
- return op->len;
}
/* CRYPTO-API Functions */
@@ -114,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op)
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
@@ -133,135 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+ tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tctx->fallback.cip->base.crt_flags |=
+ (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+ ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
}
return ret;
}
-static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
+static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
-}
-
-static int fallback_blk_dec(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
- return ret;
-}
-static int fallback_blk_enc(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
+ crypto_skcipher_clear_flags(tctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(tctx->fallback.skcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(tctx->fallback.skcipher) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
static void
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_ENCRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_ENCRYPT);
}
static void
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_DECRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_DECRYPT);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- op->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ tctx->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(op->fallback.cip)) {
+ if (IS_ERR(tctx->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.cip);
+ return PTR_ERR(tctx->fallback.cip);
}
return 0;
@@ -269,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(op->fallback.cip);
- op->fallback.cip = NULL;
+ crypto_free_cipher(tctx->fallback.cip);
}
static struct crypto_alg geode_alg = {
@@ -285,7 +237,7 @@ static struct crypto_alg geode_alg = {
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
+ .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -298,209 +250,126 @@ static struct crypto_alg geode_alg = {
}
};
-static int
-geode_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_init_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- ret = geode_aes_crypt(op);
-
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ tctx->fallback.skcipher =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tctx->fallback.skcipher)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(tctx->fallback.skcipher);
}
- return err;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tctx->fallback.skcipher));
+ return 0;
}
-static int
-geode_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static void geode_exit_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- return err;
+ crypto_free_skcipher(tctx->fallback.skcipher);
}
-static int fallback_init_blk(struct crypto_tfm *tfm)
+static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
+ if (dir == AES_DIR_DECRYPT)
+ return crypto_skcipher_decrypt(subreq);
+ else
+ return crypto_skcipher_encrypt(subreq);
+ }
- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ err = skcipher_walk_virt(&walk, req, false);
- if (IS_ERR(op->fallback.blk)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.blk);
+ while ((nbytes = walk.nbytes) != 0) {
+ geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv, mode, dir);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
- return 0;
+ return err;
}
-static void fallback_exit_blk(struct crypto_tfm *tfm)
+static int geode_cbc_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(op->fallback.blk);
- op->fallback.blk = NULL;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
}
-static struct crypto_alg geode_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_cbc_encrypt,
- .decrypt = geode_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
-};
-
-static int
-geode_ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_cbc_decrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
}
-static int
-geode_ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_ecb_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
+}
- return err;
+static int geode_ecb_decrypt(struct skcipher_request *req)
+{
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
}
-static struct crypto_alg geode_ecb_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_ecb_encrypt,
- .decrypt = geode_ecb_decrypt,
- }
- }
+static struct skcipher_alg geode_skcipher_algs[] = {
+ {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_cbc_encrypt,
+ .decrypt = geode_cbc_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_ecb_encrypt,
+ .decrypt = geode_ecb_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
};
static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
- crypto_unregister_alg(&geode_ecb_alg);
- crypto_unregister_alg(&geode_cbc_alg);
+ crypto_unregister_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
pci_iounmap(dev, _iobase);
_iobase = NULL;
@@ -538,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret)
goto eiomap;
- ret = crypto_register_alg(&geode_ecb_alg);
+ ret = crypto_register_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
if (ret)
goto ealg;
- ret = crypto_register_alg(&geode_cbc_alg);
- if (ret)
- goto eecb;
-
dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
return 0;
- eecb:
- crypto_unregister_alg(&geode_ecb_alg);
-
ealg:
crypto_unregister_alg(&geode_alg);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index 5c6e131a8f9d..6d0a0cdc7647 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -46,21 +46,10 @@
#define AES_OP_TIMEOUT 0x50000
-struct geode_aes_op {
-
- void *src;
- void *dst;
-
- u32 mode;
- u32 dir;
- u32 flags;
- int len;
-
+struct geode_aes_tfm_ctx {
u8 key[AES_KEYSIZE_128];
- u8 *iv;
-
union {
- struct crypto_blkcipher *blk;
+ struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
u32 keylen;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a18e62df68d9..4e7323884ae3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -22,6 +22,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
static char hifn_pll_ref[sizeof("extNNN")] = "ext";
module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
@@ -596,7 +597,7 @@ struct hifn_crypt_result {
struct hifn_crypto_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct hifn_device *dev;
};
@@ -1404,7 +1405,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
w->num = 0;
}
-static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
+static int skcipher_add(unsigned int *drestp, struct scatterlist *dst,
unsigned int size, unsigned int *nbytesp)
{
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
@@ -1433,11 +1434,11 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int hifn_cipher_walk(struct ablkcipher_request *req,
+static int hifn_cipher_walk(struct skcipher_request *req,
struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
- unsigned int nbytes = req->nbytes, offset, copy, diff;
+ unsigned int nbytes = req->cryptlen, offset, copy, diff;
int idx, tidx, err;
tidx = idx = 0;
@@ -1459,7 +1460,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
t = &w->cache[idx];
- err = ablkcipher_add(&dlen, dst, slen, &nbytes);
+ err = skcipher_add(&dlen, dst, slen, &nbytes);
if (err < 0)
return err;
@@ -1498,7 +1499,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
dst = &req->dst[idx];
- err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
+ err = skcipher_add(&dlen, dst, nbytes, &nbytes);
if (err < 0)
return err;
@@ -1518,13 +1519,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
return tidx;
}
-static int hifn_setup_session(struct ablkcipher_request *req)
+static int hifn_setup_session(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
struct hifn_device *dev = ctx->dev;
unsigned long dlen, flags;
- unsigned int nbytes = req->nbytes, idx = 0;
+ unsigned int nbytes = req->cryptlen, idx = 0;
int err = -EINVAL, sg_num;
struct scatterlist *dst;
@@ -1563,7 +1564,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
goto err_out;
}
- err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
+ err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->cryptlen, req);
if (err)
goto err_out;
@@ -1610,7 +1611,7 @@ static int hifn_start_device(struct hifn_device *dev)
return 0;
}
-static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
+static int skcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
{
unsigned int srest = *srestp, nbytes = *nbytesp, copy;
@@ -1660,12 +1661,12 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i)
BUG_ON(dev->started < 0);
}
-static void hifn_process_ready(struct ablkcipher_request *req, int error)
+static void hifn_process_ready(struct skcipher_request *req, int error)
{
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
int idx = 0, err;
struct scatterlist *dst, *t;
void *saddr;
@@ -1688,7 +1689,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
saddr = kmap_atomic(sg_page(t));
- err = ablkcipher_get(saddr, &t->length, t->offset,
+ err = skcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes);
if (err < 0) {
kunmap_atomic(saddr);
@@ -1910,7 +1911,7 @@ static void hifn_flush(struct hifn_device *dev)
{
unsigned long flags;
struct crypto_async_request *async_req;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i;
@@ -1926,7 +1927,7 @@ static void hifn_flush(struct hifn_device *dev)
spin_lock_irqsave(&dev->lock, flags);
while ((async_req = crypto_dequeue_request(&dev->queue))) {
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
spin_unlock_irqrestore(&dev->lock, flags);
hifn_process_ready(req, -ENODEV);
@@ -1936,14 +1937,14 @@ static void hifn_flush(struct hifn_device *dev)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1955,14 +1956,14 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1974,36 +1975,36 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_handle_req(struct ablkcipher_request *req)
+static int hifn_handle_req(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
int err = -EAGAIN;
- if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
+ if (dev->started + DIV_ROUND_UP(req->cryptlen, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
err = hifn_setup_session(req);
if (err == -EAGAIN) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
spin_unlock_irqrestore(&dev->lock, flags);
}
return err;
}
-static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto_req(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
unsigned ivsize;
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
- if (req->info && mode != ACRYPTO_MODE_ECB) {
+ if (req->iv && mode != ACRYPTO_MODE_ECB) {
if (type == ACRYPTO_TYPE_AES_128)
ivsize = HIFN_AES_IV_LENGTH;
else if (type == ACRYPTO_TYPE_DES)
@@ -2022,7 +2023,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
rctx->op = op;
rctx->mode = mode;
rctx->type = type;
- rctx->iv = req->info;
+ rctx->iv = req->iv;
rctx->ivsize = ivsize;
/*
@@ -2037,7 +2038,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
static int hifn_process_queue(struct hifn_device *dev)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
unsigned long flags;
int err = 0;
@@ -2053,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
err = hifn_handle_req(req);
if (err)
@@ -2063,7 +2064,7 @@ static int hifn_process_queue(struct hifn_device *dev)
return err;
}
-static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
int err;
@@ -2083,22 +2084,22 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
/*
* AES ecryption functions.
*/
-static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2107,22 +2108,22 @@ static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
/*
* AES decryption functions.
*/
-static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2131,22 +2132,22 @@ static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
/*
* DES ecryption functions.
*/
-static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2155,22 +2156,22 @@ static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
/*
* DES decryption functions.
*/
-static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2179,44 +2180,44 @@ static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
/*
* 3DES ecryption functions.
*/
-static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
}
/* 3DES decryption functions. */
-static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
@@ -2226,16 +2227,16 @@ struct hifn_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char drv_name[CRYPTO_MAX_ALG_NAME];
unsigned int bsize;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static struct hifn_alg_template hifn_alg_templates[] = {
+static const struct hifn_alg_template hifn_alg_templates[] = {
/*
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2245,7 +2246,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2255,7 +2256,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2266,7 +2267,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2280,7 +2281,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2290,7 +2291,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2300,7 +2301,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2311,7 +2312,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2325,7 +2326,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2335,7 +2336,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -2346,7 +2347,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2356,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2366,18 +2367,19 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
};
-static int hifn_cra_init(struct crypto_tfm *tfm)
+static int hifn_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
- struct hifn_context *ctx = crypto_tfm_ctx(tfm);
+ struct hifn_context *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = ha->dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct hifn_request_context));
+
return 0;
}
-static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
+static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_template *t)
{
struct hifn_crypto_alg *alg;
int err;
@@ -2386,26 +2388,25 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
if (!alg)
return -ENOMEM;
- snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
- snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
+ alg->alg = t->skcipher;
+ alg->alg.init = hifn_init_tfm;
+
+ snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
+ snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
- alg->alg.cra_priority = 300;
- alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->alg.cra_blocksize = t->bsize;
- alg->alg.cra_ctxsize = sizeof(struct hifn_context);
- alg->alg.cra_alignmask = 0;
- alg->alg.cra_type = &crypto_ablkcipher_type;
- alg->alg.cra_module = THIS_MODULE;
- alg->alg.cra_u.ablkcipher = t->ablkcipher;
- alg->alg.cra_init = hifn_cra_init;
+ alg->alg.base.cra_priority = 300;
+ alg->alg.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->alg.base.cra_blocksize = t->bsize;
+ alg->alg.base.cra_ctxsize = sizeof(struct hifn_context);
+ alg->alg.base.cra_alignmask = 0;
+ alg->alg.base.cra_module = THIS_MODULE;
alg->dev = dev;
list_add_tail(&alg->entry, &dev->alg_list);
- err = crypto_register_alg(&alg->alg);
+ err = crypto_register_skcipher(&alg->alg);
if (err) {
list_del(&alg->entry);
kfree(alg);
@@ -2420,7 +2421,7 @@ static void hifn_unregister_alg(struct hifn_device *dev)
list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
list_del(&a->entry);
- crypto_unregister_alg(&a->alg);
+ crypto_unregister_skcipher(&a->alg);
kfree(a);
}
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index ebaf91e0146d..c0e7a85fe129 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -2,7 +2,7 @@
config CRYPTO_DEV_HISI_SEC
tristate "Support for Hisilicon SEC crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select SG_SPLIT
@@ -14,26 +14,47 @@ config CRYPTO_DEV_HISI_SEC
To compile this as a module, choose M here: the module
will be called hisi_sec.
+config CRYPTO_DEV_HISI_SEC2
+ tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_DES
+ select CRYPTO_DEV_HISI_QM
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
+ It provides AES, SM4, and 3DES algorithms with ECB
+ CBC, and XTS cipher mode.
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec2.
+
config CRYPTO_DEV_HISI_QM
tristate
- depends on ARM64 && PCI && PCI_MSI
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI && PCI_MSI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
-config CRYPTO_HISI_SGL
- tristate
- depends on ARM64
- help
- HiSilicon accelerator engines use a common hardware scatterlist
- interface for data format. Specific engine driver may use this
- module.
-
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HiSilicon ZIP accelerator"
- depends on ARM64 && PCI && PCI_MSI
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select CRYPTO_HISI_SGL
select SG_SPLIT
help
Support for HiSilicon ZIP Driver
+
+config CRYPTO_DEV_HISI_HPRE
+ tristate "Support for HISI HPRE accelerator"
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ select CRYPTO_DEV_HISI_QM
+ select CRYPTO_DH
+ select CRYPTO_RSA
+ help
+ Support for HiSilicon HPRE(High Performance RSA Engine)
+ accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 45a279741126..7f5f74c72baa 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
-obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
-obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
+obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
+hisi_qm-objs = qm.o sgl.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
diff --git a/drivers/crypto/hisilicon/hpre/Makefile b/drivers/crypto/hisilicon/hpre/Makefile
new file mode 100644
index 000000000000..4fd32b789e1e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o
+hisi_hpre-objs = hpre_main.o hpre_crypto.o
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
new file mode 100644
index 000000000000..ddf13ea9862a
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef __HISI_HPRE_H
+#define __HISI_HPRE_H
+
+#include <linux/list.h>
+#include "../qm.h"
+
+#define HPRE_SQE_SIZE sizeof(struct hpre_sqe)
+#define HPRE_PF_DEF_Q_NUM 64
+#define HPRE_PF_DEF_Q_BASE 0
+
+enum {
+ HPRE_CLUSTER0,
+ HPRE_CLUSTER1,
+ HPRE_CLUSTER2,
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM,
+};
+
+enum hpre_ctrl_dbgfs_file {
+ HPRE_CURRENT_QM,
+ HPRE_CLEAR_ENABLE,
+ HPRE_CLUSTER_CTRL,
+ HPRE_DEBUG_FILE_NUM,
+};
+
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+
+struct hpre_debugfs_file {
+ int index;
+ enum hpre_ctrl_dbgfs_file type;
+ spinlock_t lock;
+ struct hpre_debug *debug;
+};
+
+/*
+ * One HPRE controller has one PF and multiple VFs, some global configurations
+ * which PF has need this structure.
+ * Just relevant for PF.
+ */
+struct hpre_debug {
+ struct dentry *debug_root;
+ struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
+};
+
+struct hpre {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct hpre_debug debug;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+enum hpre_alg_type {
+ HPRE_ALG_NC_NCRT = 0x0,
+ HPRE_ALG_NC_CRT = 0x1,
+ HPRE_ALG_KG_STD = 0x2,
+ HPRE_ALG_KG_CRT = 0x3,
+ HPRE_ALG_DH_G2 = 0x4,
+ HPRE_ALG_DH = 0x5,
+};
+
+struct hpre_sqe {
+ __le32 dw0;
+ __u8 task_len1;
+ __u8 task_len2;
+ __u8 mrttest_num;
+ __u8 resv1;
+ __le64 key;
+ __le64 in;
+ __le64 out;
+ __le16 tag;
+ __le16 resv2;
+#define _HPRE_SQE_ALIGN_EXT 7
+ __le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
+};
+
+struct hpre *hpre_find_device(int node);
+int hpre_algs_register(void);
+void hpre_algs_unregister(void);
+
+#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
new file mode 100644
index 000000000000..98f037e6ea3e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+#include <crypto/akcipher.h>
+#include <crypto/dh.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/kpp.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <linux/module.h>
+#include "hpre.h"
+
+struct hpre_ctx;
+
+#define HPRE_CRYPTO_ALG_PRI 1000
+#define HPRE_ALIGN_SZ 64
+#define HPRE_BITS_2_BYTES_SHIFT 3
+#define HPRE_RSA_512BITS_KSZ 64
+#define HPRE_RSA_1536BITS_KSZ 192
+#define HPRE_CRT_PRMS 5
+#define HPRE_CRT_Q 2
+#define HPRE_CRT_P 3
+#define HPRE_CRT_INV 4
+#define HPRE_DH_G_FLAG 0x02
+#define HPRE_TRY_SEND_TIMES 100
+#define HPRE_INVLD_REQ_ID (-1)
+#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
+
+#define HPRE_SQE_ALG_BITS 5
+#define HPRE_SQE_DONE_SHIFT 30
+#define HPRE_DH_MAX_P_SZ 512
+
+typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
+
+struct hpre_rsa_ctx {
+ /* low address: e--->n */
+ char *pubkey;
+ dma_addr_t dma_pubkey;
+
+ /* low address: d--->n */
+ char *prikey;
+ dma_addr_t dma_prikey;
+
+ /* low address: dq->dp->q->p->qinv */
+ char *crt_prikey;
+ dma_addr_t dma_crt_prikey;
+
+ struct crypto_akcipher *soft_tfm;
+};
+
+struct hpre_dh_ctx {
+ /*
+ * If base is g we compute the public key
+ * ya = g^xa mod p; [RFC2631 sec 2.1.1]
+ * else if base if the counterpart public key we
+ * compute the shared secret
+ * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ */
+ char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ dma_addr_t dma_xa_p;
+
+ char *g; /* m */
+ dma_addr_t dma_g;
+};
+
+struct hpre_ctx {
+ struct hisi_qp *qp;
+ struct hpre_asym_request **req_list;
+ spinlock_t req_lock;
+ unsigned int key_sz;
+ bool crt_g2_mode;
+ struct idr req_idr;
+ union {
+ struct hpre_rsa_ctx rsa;
+ struct hpre_dh_ctx dh;
+ };
+};
+
+struct hpre_asym_request {
+ char *src;
+ char *dst;
+ struct hpre_sqe req;
+ struct hpre_ctx *ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
+ int err;
+ int req_id;
+ hpre_cb cb;
+};
+
+static DEFINE_MUTEX(hpre_alg_lock);
+static unsigned int hpre_active_devs;
+
+static int hpre_alloc_req_id(struct hpre_ctx *ctx)
+{
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+
+ return id;
+}
+
+static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ idr_remove(&ctx->req_idr, req_id);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+}
+
+static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx;
+ int id;
+
+ ctx = hpre_req->ctx;
+ id = hpre_alloc_req_id(ctx);
+ if (id < 0)
+ return -EINVAL;
+
+ ctx->req_list[id] = hpre_req;
+ hpre_req->req_id = id;
+
+ return id;
+}
+
+static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ int id = hpre_req->req_id;
+
+ if (hpre_req->req_id >= 0) {
+ hpre_req->req_id = HPRE_INVLD_REQ_ID;
+ ctx->req_list[id] = NULL;
+ hpre_free_req_id(ctx, id);
+ }
+}
+
+static struct hisi_qp *hpre_get_qp_and_start(void)
+{
+ struct hisi_qp *qp;
+ struct hpre *hpre;
+ int ret;
+
+ /* find the proper hpre device, which is near the current CPU core */
+ hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
+ if (!hpre) {
+ pr_err("Can not find proper hpre device!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ qp = hisi_qm_create_qp(&hpre->qm, 0);
+ if (IS_ERR(qp)) {
+ pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0) {
+ hisi_qm_release_qp(qp);
+ pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return qp;
+}
+
+static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ enum dma_data_direction dma_dir;
+
+ if (is_src) {
+ hpre_req->src = NULL;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ hpre_req->dst = NULL;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+ *tmp = dma_map_single(dev, sg_virt(data),
+ len, dma_dir);
+ if (dma_mapping_error(dev, *tmp)) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ void *ptr;
+ int shift;
+
+ shift = ctx->key_sz - len;
+ if (shift < 0)
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ if (is_src) {
+ scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
+ hpre_req->src = ptr;
+ } else {
+ hpre_req->dst = ptr;
+ }
+
+ return 0;
+}
+
+static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ dma_addr_t tmp;
+ int ret;
+
+ /* when the data is dh's source, we should format it */
+ if ((sg_is_last(data) && len == ctx->key_sz) &&
+ ((is_dh && !is_src) || !is_dh))
+ ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
+ else
+ ret = hpre_prepare_dma_buf(hpre_req, data, len,
+ is_src, &tmp);
+ if (ret)
+ return ret;
+
+ if (is_src)
+ msg->in = cpu_to_le64(tmp);
+ else
+ msg->out = cpu_to_le64(tmp);
+
+ return 0;
+}
+
+static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst, struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t tmp;
+
+ tmp = le64_to_cpu(sqe->in);
+ if (!tmp)
+ return;
+
+ if (src) {
+ if (req->src)
+ dma_free_coherent(dev, ctx->key_sz,
+ req->src, tmp);
+ else
+ dma_unmap_single(dev, tmp,
+ ctx->key_sz, DMA_TO_DEVICE);
+ }
+
+ tmp = le64_to_cpu(sqe->out);
+ if (!tmp)
+ return;
+
+ if (req->dst) {
+ if (dst)
+ scatterwalk_map_and_copy(req->dst, dst, 0,
+ ctx->key_sz, 1);
+ dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
+ } else {
+ dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
+ }
+}
+
+static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
+ void **kreq)
+{
+ struct hpre_asym_request *req;
+ int err, id, done;
+
+#define HPRE_NO_HW_ERR 0
+#define HPRE_HW_TASK_DONE 3
+#define HREE_HW_ERR_MASK 0x7ff
+#define HREE_SQE_DONE_MASK 0x3
+ id = (int)le16_to_cpu(sqe->tag);
+ req = ctx->req_list[id];
+ hpre_rm_req_from_ctx(req);
+ *kreq = req;
+
+ err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
+ HREE_HW_ERR_MASK;
+
+ done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
+ HREE_SQE_DONE_MASK;
+
+ if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
+{
+ if (!ctx || !qp || qlen < 0)
+ return -EINVAL;
+
+ spin_lock_init(&ctx->req_lock);
+ ctx->qp = qp;
+
+ ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
+ if (!ctx->req_list)
+ return -ENOMEM;
+ ctx->key_sz = 0;
+ ctx->crt_g2_mode = false;
+ idr_init(&ctx->req_idr);
+
+ return 0;
+}
+
+static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ if (is_clear_all) {
+ idr_destroy(&ctx->req_idr);
+ kfree(ctx->req_list);
+ hisi_qm_release_qp(ctx->qp);
+ }
+
+ ctx->crt_g2_mode = false;
+ ctx->key_sz = 0;
+}
+
+static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct kpp_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.dh;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+}
+
+static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct akcipher_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.rsa;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ akcipher_request_complete(areq, ret);
+}
+
+static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
+{
+ struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_sqe *sqe = resp;
+
+ ctx->req_list[sqe->tag]->cb(ctx, resp);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx)
+{
+ struct hisi_qp *qp;
+
+ qp = hpre_get_qp_and_start();
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp->qp_ctx = ctx;
+ qp->req_cb = hpre_alg_cb;
+
+ return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+}
+
+static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (is_rsa) {
+ struct akcipher_request *akreq = req;
+
+ if (akreq->dst_len < ctx->key_sz) {
+ akreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = akcipher_request_ctx(akreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_rsa_cb;
+ h_req->areq.rsa = akreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ } else {
+ struct kpp_request *kreq = req;
+
+ if (kreq->dst_len < ctx->key_sz) {
+ kreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = kpp_request_ctx(kreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_dh_cb;
+ h_req->areq.dh = kreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
+ }
+
+ msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+
+ return 0;
+}
+
+#ifdef CONFIG_CRYPTO_DH
+static int hpre_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, false);
+ if (ret)
+ return ret;
+
+ if (req->src) {
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
+ if (ret)
+ goto clear_all;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
+ if (ret)
+ goto clear_all;
+
+ if (ctx->crt_g2_mode && !req->src)
+ msg->dw0 |= HPRE_ALG_DH_G2;
+ else
+ msg->dw0 |= HPRE_ALG_DH;
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_is_dh_params_length_valid(unsigned int key_sz)
+{
+#define _HPRE_DH_GRP1 768
+#define _HPRE_DH_GRP2 1024
+#define _HPRE_DH_GRP5 1536
+#define _HPRE_DH_GRP14 2048
+#define _HPRE_DH_GRP15 3072
+#define _HPRE_DH_GRP16 4096
+ switch (key_sz) {
+ case _HPRE_DH_GRP1:
+ case _HPRE_DH_GRP2:
+ case _HPRE_DH_GRP5:
+ case _HPRE_DH_GRP14:
+ case _HPRE_DH_GRP15:
+ case _HPRE_DH_GRP16:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz;
+
+ if (params->p_size > HPRE_DH_MAX_P_SZ)
+ return -EINVAL;
+
+ if (hpre_is_dh_params_length_valid(params->p_size <<
+ HPRE_BITS_2_BYTES_SHIFT))
+ return -EINVAL;
+
+ sz = ctx->key_sz = params->p_size;
+ ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
+ if (!ctx->dh.xa_p)
+ return -ENOMEM;
+
+ memcpy(ctx->dh.xa_p + sz, params->p, sz);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
+ ctx->crt_g2_mode = true;
+ return 0;
+ }
+
+ ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
+ if (!ctx->dh.g) {
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
+
+ return 0;
+}
+
+static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->dh.g) {
+ memset(ctx->dh.g, 0, sz);
+ dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
+ ctx->dh.g = NULL;
+ }
+
+ if (ctx->dh.xa_p) {
+ memset(ctx->dh.xa_p, 0, sz);
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ hpre_dh_clear_ctx(ctx, false);
+
+ ret = hpre_dh_set_params(ctx, &params);
+ if (ret < 0)
+ goto err_clear_ctx;
+
+ memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+
+err_clear_ctx:
+ hpre_dh_clear_ctx(ctx, false);
+ return ret;
+}
+
+static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_dh_clear_ctx(ctx, true);
+}
+#endif
+
+static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
+{
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static bool hpre_rsa_key_size_is_support(unsigned int len)
+{
+ unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
+
+#define _RSA_1024BITS_KEY_WDTH 1024
+#define _RSA_2048BITS_KEY_WDTH 2048
+#define _RSA_3072BITS_KEY_WDTH 3072
+#define _RSA_4096BITS_KEY_WDTH 4096
+
+ switch (bits) {
+ case _RSA_1024BITS_KEY_WDTH:
+ case _RSA_2048BITS_KEY_WDTH:
+ case _RSA_3072BITS_KEY_WDTH:
+ case _RSA_4096BITS_KEY_WDTH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int hpre_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_encrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.pubkey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_decrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.prikey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ if (ctx->crt_g2_mode) {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
+ msg->dw0 |= HPRE_ALG_NC_CRT;
+ } else {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
+ size_t vlen, bool private)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ ctx->key_sz = vlen;
+
+ /* if invalid key size provided, we use software tfm */
+ if (!hpre_rsa_key_size_is_support(ctx->key_sz))
+ return 0;
+
+ ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_pubkey,
+ GFP_KERNEL);
+ if (!ctx->rsa.pubkey)
+ return -ENOMEM;
+
+ if (private) {
+ ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.prikey) {
+ dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.pubkey,
+ ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
+ }
+ memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
+
+ /* Using hardware HPRE to do RSA */
+ return 1;
+}
+
+static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->rsa.pubkey = NULL;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ return -EINVAL;
+
+ memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_crt_para_get(char *para, const char *raw,
+ unsigned int raw_sz, unsigned int para_size)
+{
+ const char *ptr = raw;
+ size_t len = raw_sz;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len || len > para_size)
+ return -EINVAL;
+
+ memcpy(para + para_size - len, ptr, len);
+
+ return 0;
+}
+
+static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
+{
+ unsigned int hlf_ksz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+ u64 offset;
+ int ret;
+
+ ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
+ &ctx->rsa.dma_crt_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.crt_prikey)
+ return -ENOMEM;
+
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
+ rsa_key->dq_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
+ rsa_key->dp_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_Q;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_P;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_INV;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ ctx->crt_g2_mode = true;
+
+ return 0;
+
+free_key:
+ offset = hlf_ksz * HPRE_CRT_PRMS;
+ memset(ctx->rsa.crt_prikey, 0, offset);
+ dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
+ ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ ctx->crt_g2_mode = false;
+
+ return ret;
+}
+
+/* If it is clear all, all the resources of the QP will be cleaned. */
+static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ unsigned int half_key_sz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->rsa.pubkey) {
+ dma_free_coherent(dev, ctx->key_sz << 1,
+ ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ }
+
+ if (ctx->rsa.crt_prikey) {
+ memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
+ ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ }
+
+ if (ctx->rsa.prikey) {
+ memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
+ ctx->rsa.dma_prikey);
+ ctx->rsa.prikey = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+/*
+ * we should judge if it is CRT or not,
+ * CRT: return true, N-CRT: return false .
+ */
+static bool hpre_is_crt_key(struct rsa_key *key)
+{
+ u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
+ key->qinv_sz;
+
+#define LEN_OF_NCRT_PARA 5
+
+ /* N-CRT less than 5 parameters */
+ return len > LEN_OF_NCRT_PARA;
+}
+
+static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct rsa_key rsa_key;
+ int ret;
+
+ hpre_rsa_clear_ctx(ctx, false);
+
+ if (private)
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ return ret;
+
+ ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
+ if (ret <= 0)
+ return ret;
+
+ if (private) {
+ ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+
+ if (hpre_is_crt_key(&rsa_key)) {
+ ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
+ if (ret < 0)
+ goto free;
+ }
+ }
+
+ ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+
+ if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+
+free:
+ hpre_rsa_clear_ctx(ctx, false);
+ return ret;
+}
+
+static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, false);
+}
+
+static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, true);
+}
+
+static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
+ return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ if (IS_ERR(ctx->rsa.soft_tfm)) {
+ pr_err("Can not alloc_akcipher!\n");
+ return PTR_ERR(ctx->rsa.soft_tfm);
+ }
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ hpre_rsa_clear_ctx(ctx, true);
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+}
+
+static struct akcipher_alg rsa = {
+ .sign = hpre_rsa_dec,
+ .verify = hpre_rsa_enc,
+ .encrypt = hpre_rsa_enc,
+ .decrypt = hpre_rsa_dec,
+ .set_pub_key = hpre_rsa_setpubkey,
+ .set_priv_key = hpre_rsa_setprivkey,
+ .max_size = hpre_rsa_max_size,
+ .init = hpre_rsa_init_tfm,
+ .exit = hpre_rsa_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "rsa",
+ .cra_driver_name = "hpre-rsa",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+#ifdef CONFIG_CRYPTO_DH
+static struct kpp_alg dh = {
+ .set_secret = hpre_dh_set_secret,
+ .generate_public_key = hpre_dh_compute_value,
+ .compute_shared_secret = hpre_dh_compute_value,
+ .max_size = hpre_dh_max_size,
+ .init = hpre_dh_init_tfm,
+ .exit = hpre_dh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "dh",
+ .cra_driver_name = "hpre-dh",
+ .cra_module = THIS_MODULE,
+ },
+};
+#endif
+
+int hpre_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&hpre_alg_lock);
+ if (++hpre_active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+#ifdef CONFIG_CRYPTO_DH
+ ret = crypto_register_kpp(&dh);
+ if (ret) {
+ crypto_unregister_akcipher(&rsa);
+ goto unlock;
+ }
+#endif
+ }
+
+unlock:
+ mutex_unlock(&hpre_alg_lock);
+ return ret;
+}
+
+void hpre_algs_unregister(void)
+{
+ mutex_lock(&hpre_alg_lock);
+ if (--hpre_active_devs == 0) {
+ crypto_unregister_akcipher(&rsa);
+#ifdef CONFIG_CRYPTO_DH
+ crypto_unregister_kpp(&dh);
+#endif
+ }
+ mutex_unlock(&hpre_alg_lock);
+}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
new file mode 100644
index 000000000000..34e0424410bf
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/topology.h>
+#include "hpre.h"
+
+#define HPRE_VF_NUM 63
+#define HPRE_QUEUE_NUM_V2 1024
+#define HPRE_QM_ABNML_INT_MASK 0x100004
+#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HPRE_COMM_CNT_CLR_CE 0x0
+#define HPRE_CTRL_CNT_CLR_CE 0x301000
+#define HPRE_FSM_MAX_CNT 0x301008
+#define HPRE_VFG_AXQOS 0x30100c
+#define HPRE_VFG_AXCACHE 0x301010
+#define HPRE_RDCHN_INI_CFG 0x301014
+#define HPRE_AWUSR_FP_CFG 0x301018
+#define HPRE_BD_ENDIAN 0x301020
+#define HPRE_ECC_BYPASS 0x301024
+#define HPRE_RAS_WIDTH_CFG 0x301028
+#define HPRE_POISON_BYPASS 0x30102c
+#define HPRE_BD_ARUSR_CFG 0x301030
+#define HPRE_BD_AWUSR_CFG 0x301034
+#define HPRE_TYPES_ENB 0x301038
+#define HPRE_DATA_RUSER_CFG 0x30103c
+#define HPRE_DATA_WUSER_CFG 0x301040
+#define HPRE_INT_MASK 0x301400
+#define HPRE_INT_STATUS 0x301800
+#define HPRE_CORE_INT_ENABLE 0
+#define HPRE_CORE_INT_DISABLE 0x003fffff
+#define HPRE_RAS_ECC_1BIT_TH 0x30140c
+#define HPRE_RDCHN_INI_ST 0x301a00
+#define HPRE_CLSTR_BASE 0x302000
+#define HPRE_CORE_EN_OFFSET 0x04
+#define HPRE_CORE_INI_CFG_OFFSET 0x20
+#define HPRE_CORE_INI_STATUS_OFFSET 0x80
+#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
+#define HPRE_CORE_IS_SCHD_OFFSET 0x90
+
+#define HPRE_RAS_CE_ENB 0x301410
+#define HPRE_HAC_RAS_CE_ENABLE 0x3f
+#define HPRE_RAS_NFE_ENB 0x301414
+#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0
+#define HPRE_RAS_FE_ENB 0x301418
+#define HPRE_HAC_RAS_FE_ENABLE 0
+
+#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
+#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
+#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
+#define HPRE_HAC_ECC1_CNT 0x301a04
+#define HPRE_HAC_ECC2_CNT 0x301a08
+#define HPRE_HAC_INT_STATUS 0x301800
+#define HPRE_HAC_SOURCE_INT 0x301600
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
+#define MASTER_TRANS_RETURN_RW 3
+#define HPRE_MASTER_TRANS_RETURN 0x300150
+#define HPRE_MASTER_GLOBAL_CTRL 0x300000
+#define HPRE_CLSTR_ADDR_INTRVL 0x1000
+#define HPRE_CLUSTER_INQURY 0x100
+#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
+#define HPRE_TIMEOUT_ABNML_BIT 6
+#define HPRE_PASID_EN_BIT 9
+#define HPRE_REG_RD_INTVRL_US 10
+#define HPRE_REG_RD_TMOUT_US 1000
+#define HPRE_DBGFS_VAL_MAX_LEN 20
+#define HPRE_PCI_DEVICE_ID 0xa258
+#define HPRE_PCI_VF_DEVICE_ID 0xa259
+#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_QM_USR_CFG_MASK 0xfffffffe
+#define HPRE_QM_AXI_CFG_MASK 0xffff
+#define HPRE_QM_VFG_AX_MASK 0xff
+#define HPRE_BD_USR_MASK 0x3
+#define HPRE_CLUSTER_CORE_MASK 0xf
+
+#define HPRE_VIA_MSI_DSM 1
+
+static LIST_HEAD(hpre_list);
+static DEFINE_MUTEX(hpre_list_lock);
+static const char hpre_name[] = "hisi_hpre";
+static struct dentry *hpre_debugfs_root;
+static const struct pci_device_id hpre_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
+
+struct hpre_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char * const hpre_debug_file_name[] = {
+ [HPRE_CURRENT_QM] = "current_qm",
+ [HPRE_CLEAR_ENABLE] = "rdclr_en",
+ [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
+};
+
+static const struct hpre_hw_error hpre_hw_errors[] = {
+ { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
+ { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
+ { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
+ { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
+ { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
+ { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
+ { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
+ { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
+ { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
+ { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
+ { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
+ { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { /* sentinel */ }
+};
+
+static const u64 hpre_cluster_offsets[] = {
+ [HPRE_CLUSTER0] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER1] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER2] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER3] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
+};
+
+static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+ {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
+ {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
+ {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
+ {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
+ {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
+};
+
+static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+ {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
+ {"AXQOS ", HPRE_VFG_AXQOS},
+ {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
+ {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1},
+ {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1},
+ {"BD_ENDIAN ", HPRE_BD_ENDIAN},
+ {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
+ {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
+ {"POISON_BYPASS ", HPRE_POISON_BYPASS},
+ {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
+ {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
+ {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
+ {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
+ {"INT_STATUS ", HPRE_INT_STATUS},
+};
+
+static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = HPRE_QUEUE_NUM_V2;
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ if (rev_id != QM_HW_V2)
+ return -EINVAL;
+
+ q_num = HPRE_QUEUE_NUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || n == 0 || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = hpre_pf_q_num_set,
+ .get = param_get_int,
+};
+
+static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
+MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static inline void hpre_add_to_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_add_tail(&hpre->list, &hpre_list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+static inline void hpre_remove_from_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_del(&hpre->list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+struct hpre *hpre_find_device(int node)
+{
+ struct hpre *hpre, *ret = NULL;
+ int min_distance = INT_MAX;
+ struct device *dev;
+ int dev_node = 0;
+
+ mutex_lock(&hpre_list_lock);
+ list_for_each_entry(hpre, &hpre_list, list) {
+ dev = &hpre->qm.pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ ret = hpre;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ mutex_unlock(&hpre_list_lock);
+
+ return ret;
+}
+
+static int hpre_cfg_by_dsm(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ union acpi_object *obj;
+ guid_t guid;
+
+ if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
+ dev_err(dev, "Hpre GUID failed\n");
+ return -EINVAL;
+ }
+
+ /* Switch over to MSI handling due to non-standard PCI implementation */
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
+ 0, HPRE_VIA_MSI_DSM, NULL);
+ if (!obj) {
+ dev_err(dev, "ACPI handle failed!\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ int ret, i;
+ u32 val;
+
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+
+ /* HPRE need more time, we close this interrupt */
+ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
+ writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+
+ writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
+ writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
+ writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
+ writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
+ writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
+
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
+ writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
+ val & BIT(0),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev, "read rd channel timeout fail!\n");
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(HPRE_CLUSTER_CORE_MASK,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & HPRE_CLUSTER_CORE_MASK) ==
+ HPRE_CLUSTER_CORE_MASK),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
+
+ return ret;
+}
+
+static void hpre_cnt_regs_clear(struct hisi_qm *qm)
+{
+ unsigned long offset;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear clusterX/cluster_ctrl */
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
+ writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+ }
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void hpre_hw_error_disable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* disable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+}
+
+static void hpre_hw_error_enable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* enable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+}
+
+static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
+{
+ struct hpre *hpre = container_of(file->debug, struct hpre, debug);
+
+ return &hpre->qm;
+}
+
+static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ struct hpre_debug *debug = file->debug;
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ u32 num_vfs = hpre->num_vfs;
+ u32 vfq_num, tmp;
+
+
+ if (val > num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (val == 0) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (val == num_vfs) {
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
+ } else {
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ HPRE_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ u32 tmp;
+
+ if (val != 1 && val != 0)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE +
+ cluster_index * HPRE_CLSTR_ADDR_INTRVL;
+
+ return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
+}
+
+static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
+ HPRE_CLSTR_ADDR_INTRVL;
+
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+
+ return 0;
+}
+
+static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ val = hpre_current_qm_read(file);
+ break;
+ case HPRE_CLEAR_ENABLE:
+ val = hpre_clear_enable_read(file);
+ break;
+ case HPRE_CLUSTER_CTRL:
+ val = hpre_cluster_inqry_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&file->lock);
+ ret = sprintf(tbuf, "%u\n", val);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ ret = hpre_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLEAR_ENABLE:
+ ret = hpre_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLUSTER_CTRL:
+ ret = hpre_cluster_inqry_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations hpre_ctrl_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hpre_ctrl_debug_read,
+ .write = hpre_ctrl_debug_write,
+};
+
+static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
+ enum hpre_ctrl_dbgfs_file type, int indx)
+{
+ struct dentry *tmp, *file_dir;
+
+ if (dir)
+ file_dir = dir;
+ else
+ file_dir = dbg->debug_root;
+
+ if (type >= HPRE_DEBUG_FILE_NUM)
+ return -EINVAL;
+
+ spin_lock_init(&dbg->files[indx].lock);
+ dbg->files[indx].debug = dbg;
+ dbg->files[indx].type = type;
+ dbg->files[indx].index = indx;
+ tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_com_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
+ regset->base = qm->io_base;
+
+ tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ char buf[HPRE_DBGFS_VAL_MAX_LEN];
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d, *tmp;
+ int i, ret;
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ sprintf(buf, "cluster%d", i);
+
+ tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ if (!tmp_d)
+ return -ENOENT;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_cluster_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ regset->base = qm->io_base + hpre_cluster_offsets[i];
+
+ tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ if (!tmp)
+ return -ENOENT;
+ ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
+ i + HPRE_CLUSTER_CTRL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hpre_ctrl_debug_init(struct hpre_debug *debug)
+{
+ int ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
+ HPRE_CURRENT_QM);
+ if (ret)
+ return ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
+ HPRE_CLEAR_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = hpre_pf_comm_regs_debugfs_init(debug);
+ if (ret)
+ return ret;
+
+ return hpre_cluster_debugfs_init(debug);
+}
+
+static int hpre_debugfs_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct dentry *dir;
+ int ret;
+
+ dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
+ if (!dir)
+ return -ENOENT;
+
+ qm->debug.debug_root = dir;
+
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
+ hpre->debug.debug_root = dir;
+ ret = hpre_ctrl_debug_init(&hpre->debug);
+ if (ret)
+ goto failed_to_create;
+ }
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(qm->debug.debug_root);
+ return ret;
+}
+
+static void hpre_debugfs_exit(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
+static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id < 0)
+ return -ENODEV;
+
+ if (rev_id == QM_HW_V1) {
+ pci_warn(pdev, "HPRE version 1 is not supported!\n");
+ return -EINVAL;
+ }
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+ qm->sqe_size = HPRE_SQE_SIZE;
+ qm->dev_name = hpre_name;
+ qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ if (pdev->is_physfn) {
+ qm->qp_base = HPRE_PF_DEF_Q_BASE;
+ qm->qp_num = hpre_pf_q_num;
+ }
+ qm->use_dma_api = true;
+
+ return 0;
+}
+
+static void hpre_hw_err_init(struct hpre *hpre)
+{
+ hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
+ 0, QM_DB_RANDOM_INVALID);
+ hpre_hw_error_enable(hpre);
+}
+
+static int hpre_pf_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
+
+ ret = hpre_set_user_domain_and_cache(hpre);
+ if (ret)
+ return ret;
+
+ hpre_hw_err_init(hpre);
+
+ return 0;
+}
+
+static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_qm *qm;
+ struct hpre *hpre;
+ int ret;
+
+ hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
+ if (!hpre)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, hpre);
+
+ qm = &hpre->qm;
+ ret = hpre_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
+
+ ret = hisi_qm_init(qm);
+ if (ret)
+ return ret;
+
+ if (pdev->is_physfn) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ goto err_with_qm_init;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_with_qm_init;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_with_err_init;
+
+ ret = hpre_debugfs_init(hpre);
+ if (ret)
+ dev_warn(&pdev->dev, "init debugfs fail!\n");
+
+ hpre_add_to_list(hpre);
+
+ ret = hpre_algs_register();
+ if (ret < 0) {
+ hpre_remove_from_list(hpre);
+ pci_err(pdev, "fail to register algs to crypto!\n");
+ goto err_with_qm_start;
+ }
+ return 0;
+
+err_with_qm_start:
+ hisi_qm_stop(qm);
+
+err_with_err_init:
+ if (pdev->is_physfn)
+ hpre_hw_error_disable(hpre);
+
+err_with_qm_init:
+ hisi_qm_uninit(qm);
+
+ return ret;
+}
+
+static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 qp_num = qm->qp_num;
+ int q_num, remain_q_num, i;
+ u32 q_base = qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+
+ /* If remaining queues are not enough, return error. */
+ if (remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int hpre_clear_vft_config(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 num_vfs = hpre->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ hpre->num_vfs = 0;
+
+ return 0;
+}
+
+static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
+ ret = hpre_vf_q_assign(hpre, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ hpre->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ hpre_clear_vft_config(hpre);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int hpre_sriov_disable(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return hpre_clear_vft_config(hpre);
+}
+
+static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return hpre_sriov_enable(pdev, num_vfs);
+ else
+ return hpre_sriov_disable(pdev);
+}
+
+static void hpre_remove(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ hpre_algs_unregister();
+ hpre_remove_from_list(hpre);
+ if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
+ ret = hpre_sriov_disable(pdev);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
+ if (qm->fun_type == QM_HW_PF) {
+ hpre_cnt_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ }
+
+ hpre_debugfs_exit(hpre);
+ hisi_qm_stop(qm);
+ if (qm->fun_type == QM_HW_PF)
+ hpre_hw_error_disable(hpre);
+ hisi_qm_uninit(qm);
+}
+
+static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
+{
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &hpre->qm.pdev->dev;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
+}
+
+static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
+ if (err_sts) {
+ hpre_log_hw_error(hpre, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, hpre_ret;
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
+
+ /* log hpre error */
+ hpre_ret = hpre_hw_error_handle(hpre);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hpre_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers hpre_err_handler = {
+ .error_detected = hpre_error_detected,
+};
+
+static struct pci_driver hpre_pci_driver = {
+ .name = hpre_name,
+ .id_table = hpre_dev_ids,
+ .probe = hpre_probe,
+ .remove = hpre_remove,
+ .sriov_configure = hpre_sriov_configure,
+ .err_handler = &hpre_err_handler,
+};
+
+static void hpre_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
+ if (IS_ERR_OR_NULL(hpre_debugfs_root))
+ hpre_debugfs_root = NULL;
+}
+
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
+static int __init hpre_init(void)
+{
+ int ret;
+
+ hpre_register_debugfs();
+
+ ret = pci_register_driver(&hpre_pci_driver);
+ if (ret) {
+ hpre_unregister_debugfs();
+ pr_err("hpre: can't register hisi hpre driver.\n");
+ }
+
+ return ret;
+}
+
+static void __exit hpre_exit(void)
+{
+ pci_unregister_driver(&hpre_pci_driver);
+ hpre_unregister_debugfs();
+}
+
+module_init(hpre_init);
+module_exit(hpre_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f975c393a603..b57da5ef8b5b 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -59,17 +59,17 @@
#define QM_CQ_PHASE_SHIFT 0
#define QM_CQ_FLAG_SHIFT 1
-#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1)
+#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1)
+#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1)
+#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_DOORBELL_CMD_SQ 0
@@ -169,17 +169,17 @@
#define QM_MK_SQC_DW3_V2(sqe_sz) \
((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define INIT_QC_COMMON(qc, base, pasid) do { \
- (qc)->head = 0; \
- (qc)->tail = 0; \
- (qc)->base_l = lower_32_bits(base); \
- (qc)->base_h = upper_32_bits(base); \
- (qc)->dw3 = 0; \
- (qc)->w8 = 0; \
- (qc)->rsvd0 = 0; \
- (qc)->pasid = pasid; \
- (qc)->w11 = 0; \
- (qc)->rsvd1 = 0; \
+#define INIT_QC_COMMON(qc, base, pasid) do { \
+ (qc)->head = 0; \
+ (qc)->tail = 0; \
+ (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
+ (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
+ (qc)->dw3 = 0; \
+ (qc)->w8 = 0; \
+ (qc)->rsvd0 = 0; \
+ (qc)->pasid = cpu_to_le16(pasid); \
+ (qc)->w11 = 0; \
+ (qc)->rsvd1 = 0; \
} while (0)
enum vft_type {
@@ -331,12 +331,18 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
unsigned long tmp0 = 0, tmp1 = 0;
+ if (!IS_ENABLED(CONFIG_ARM64)) {
+ memcpy_toio(fun_base, src, 16);
+ wmb();
+ return;
+ }
+
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
"dsb sy\n"
: "=&r" (tmp0),
"=&r" (tmp1),
- "+Q" (*((char *)fun_base))
+ "+Q" (*((char __iomem *)fun_base))
: "Q" (*((char *)src))
: "memory");
}
@@ -350,12 +356,12 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
queue, cmd, (unsigned long long)dma_addr);
- mailbox.w0 = cmd |
+ mailbox.w0 = cpu_to_le16(cmd |
(op ? 0x1 << QM_MB_OP_SHIFT : 0) |
- (0x1 << QM_MB_BUSY_SHIFT);
- mailbox.queue_num = queue;
- mailbox.base_l = lower_32_bits(dma_addr);
- mailbox.base_h = upper_32_bits(dma_addr);
+ (0x1 << QM_MB_BUSY_SHIFT));
+ mailbox.queue_num = cpu_to_le16(queue);
+ mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
+ mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
mailbox.rsvd = 0;
mutex_lock(&qm->mailbox_lock);
@@ -442,7 +448,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
- u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK;
+ u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
return qm->qp_array[cqn];
}
@@ -464,7 +470,8 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
if (qp->req_cb) {
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
dma_rmb();
- qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head);
+ qp->req_cb(qp, qp->sqe + qm->sqe_size *
+ le16_to_cpu(cqe->sq_head));
qm_cq_head_update(qp);
cqe = qp->cqe + qp->qp_status.cq_head;
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
@@ -542,7 +549,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_NONE;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT;
+ type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
if (type < ARRAY_SIZE(qm_fifo_overflow))
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
@@ -646,7 +653,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
- qp_status->cqc_phase = 1;
+ qp_status->cqc_phase = true;
qp_status->flags = 0;
}
@@ -963,13 +970,11 @@ static const struct file_operations qm_regs_fops = {
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d, *tmp;
+ struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
- &qm_debug_fops);
- if (IS_ERR(tmp))
- return -ENOENT;
+ debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ &qm_debug_fops);
file->index = index;
mutex_init(&file->lock);
@@ -981,9 +986,6 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
- dev_info(&qm->pdev->dev,
- "QM v%d does not support hw error handle\n", qm->ver);
-
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1123,6 +1125,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
}
set_bit(qp_id, qm->qp_bitmap);
qm->qp_array[qp_id] = qp;
+ qm->qp_in_used++;
write_unlock(&qm->qps_lock);
@@ -1187,6 +1190,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
write_lock(&qm->qps_lock);
qm->qp_array[qp->qp_id] = NULL;
clear_bit(qp->qp_id, qm->qp_bitmap);
+ qm->qp_in_used--;
write_unlock(&qm->qps_lock);
kfree(qp);
@@ -1218,14 +1222,14 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
- sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size);
- sqc->w8 = QM_Q_DEPTH - 1;
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
+ sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size);
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
- sqc->cq_num = qp_id;
- sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type);
+ sqc->cq_num = cpu_to_le16(qp_id);
+ sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
@@ -1245,13 +1249,13 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
- cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4);
- cqc->w8 = QM_Q_DEPTH - 1;
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
+ cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- cqc->dw3 = QM_MK_CQC_DW3_V2(4);
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
- cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT;
+ cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
@@ -1392,6 +1396,24 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
/**
+ * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
+ * @qm: The qm which want to get free qp.
+ *
+ * This function return free number of qp in qm.
+ */
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
+{
+ int ret;
+
+ read_lock(&qm->qps_lock);
+ ret = qm->qp_num - qm->qp_in_used;
+ read_unlock(&qm->qps_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
*
@@ -1454,6 +1476,7 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_free_irq_vectors;
+ qm->qp_in_used = 0;
mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock);
@@ -1560,8 +1583,8 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm)
status->eq_head = 0;
status->aeq_head = 0;
- status->eqc_phase = 1;
- status->aeqc_phase = 1;
+ status->eqc_phase = true;
+ status->aeqc_phase = true;
}
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
@@ -1585,11 +1608,11 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- eqc->base_l = lower_32_bits(qm->eqe_dma);
- eqc->base_h = upper_32_bits(qm->eqe_dma);
+ eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
+ eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
- eqc->dw3 = QM_EQE_AEQE_SIZE;
- eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
+ eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -1606,9 +1629,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = lower_32_bits(qm->aeqe_dma);
- aeqc->base_h = upper_32_bits(qm->aeqe_dma);
- aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
@@ -1780,12 +1803,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- if (IS_ERR(qm_d))
- return -ENOENT;
qm->debug.qm_d = qm_d;
/* only show this in PF */
@@ -1796,12 +1817,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -1862,8 +1878,7 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
if (!qm->ops->hw_error_init) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
@@ -1877,11 +1892,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-int hisi_qm_hw_error_handle(struct hisi_qm *qm)
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
return PCI_ERS_RESULT_NONE;
}
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 70e672ae86bf..078b8f1f1b77 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -75,6 +75,8 @@
#define QM_Q_DEPTH 1024
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+
enum qp_state {
QP_STOP,
};
@@ -132,6 +134,7 @@ struct hisi_qm {
u32 sqe_size;
u32 qp_base;
u32 qp_num;
+ u32 qp_in_used;
u32 ctrl_qp_num;
struct qm_dma qdma;
@@ -204,12 +207,24 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi);
-int hisi_qm_hw_error_handle(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/Makefile b/drivers/crypto/hisilicon/sec2/Makefile
new file mode 100644
index 000000000000..b4f6cf14be3a
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o
+hisi_sec2-objs = sec_main.o sec_crypto.o
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
new file mode 100644
index 000000000000..26754d0570ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
+
+#include <linux/list.h>
+
+#include "../qm.h"
+#include "sec_crypto.h"
+
+/* Cipher resource per hardware SEC queue */
+struct sec_cipher_res {
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ /* Cipher supported only at present */
+ struct sec_cipher_req c_req;
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ int fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @get_res: Get resources for TFM on the SEC device
+ * @resource_alloc: Allocate resources for queue context on the SEC device
+ * @resource_free: Free resources for queue context on the SEC device
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
+
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req **req_list;
+ struct idr req_idr;
+ void *alg_meta_data;
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
+ struct sec_cipher_ctx c_ctx;
+};
+
+enum sec_endian {
+ SEC_LE = 0,
+ SEC_32BE,
+ SEC_64BE
+};
+
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
+
+struct sec_dfx {
+ u64 send_cnt;
+ u64 recv_cnt;
+};
+
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+struct sec_dev *sec_find_device(int node);
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
new file mode 100644
index 000000000000..dc1eb97d57f7
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+
+#include "sec.h"
+#include "sec_crypto.h"
+
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+
+static DEFINE_MUTEX(sec_algs_lock);
+static unsigned int sec_active_devs;
+
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
+ ctx->hlf_q_num;
+
+ return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
+
+static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
+
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ int req_id;
+
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (req_id < 0) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
+ }
+
+ req->qp_ctx = qp_ctx;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
+}
+
+static void sec_free_req_id(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ return;
+ }
+
+ qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ idr_remove(&qp_ctx->req_idr, req_id);
+ mutex_unlock(&qp_ctx->req_lock);
+}
+
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
+{
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ u16 done, flag;
+ u8 type;
+ struct sec_req *req;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (type == SEC_BD_TYPE2) {
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (req->err_type || done != 0x1 || flag != 0x2)
+ dev_err(SEC_CTX_DEV(req->ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ } else {
+ pr_err("err bd type [%d]\n", type);
+ return;
+ }
+
+ __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+
+ req->ctx->req_op->buf_unmap(req->ctx, req);
+
+ req->ctx->req_op->callback(req->ctx, req);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ mutex_unlock(&qp_ctx->req_lock);
+ __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+
+ if (ret == -EBUSY)
+ return -ENOBUFS;
+
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
+
+ return ret;
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
+ qp = hisi_qm_create_qp(qm, alg_type);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp->req_cb = sec_req_cb;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ mutex_init(&qp_ctx->req_lock);
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
+
+ qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
+ if (!qp_ctx->req_list)
+ goto err_destroy_idr;
+
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_in_pool) {
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_free_req_list;
+ }
+
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_out_pool) {
+ dev_err(dev, "fail to create sgl pool for output!\n");
+ goto err_free_c_in_pool;
+ }
+
+ ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ if (ret)
+ goto err_free_c_out_pool;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_queue_free;
+
+ return 0;
+
+err_queue_free:
+ ctx->req_op->resource_free(ctx, qp_ctx);
+err_free_c_out_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+err_free_c_in_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ hisi_qm_release_qp(qp);
+
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ hisi_qm_stop_qp(qp_ctx->qp);
+ ctx->req_op->resource_free(ctx, qp_ctx);
+
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+
+ idr_destroy(&qp_ctx->req_idr);
+ kfree(qp_ctx->req_list);
+ hisi_qm_release_qp(qp_ctx->qp);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx;
+ struct sec_dev *sec;
+ struct device *dev;
+ struct hisi_qm *qm;
+ int i, ret;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+
+ sec = sec_find_device(cpu_to_node(smp_processor_id()));
+ if (!sec) {
+ pr_err("find no Hisilicon SEC device!\n");
+ return -ENODEV;
+ }
+ ctx->sec = sec;
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+ ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ if (ret)
+ goto err_sec_release_qp_ctx;
+ }
+
+ c_ctx = &ctx->c_ctx;
+ c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
+ if (c_ctx->ivsize > SEC_IV_SIZE) {
+ dev_err(dev, "get error iv size!\n");
+ ret = -EINVAL;
+ goto err_sec_release_qp_ctx;
+ }
+ c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key) {
+ ret = -ENOMEM;
+ goto err_sec_release_qp_ctx;
+ }
+
+ return 0;
+
+err_sec_release_qp_ctx:
+ for (i = i - 1; i >= 0; i--)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+ return ret;
+}
+
+static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int i = 0;
+
+ if (c_ctx->c_key) {
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+ c_ctx->c_key = NULL;
+ }
+
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+}
+
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ switch (keylen) {
+ case SEC_DES3_2KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
+ break;
+ case SEC_DES3_3KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ if (c_mode == SEC_CMODE_XTS) {
+ switch (keylen) {
+ case SEC_XTS_MIN_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case SEC_XTS_MAX_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: xts mode key error!\n");
+ return -EINVAL;
+ }
+ } else {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aes key error!\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ if (c_mode == SEC_CMODE_XTS) {
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ return ret;
+ }
+ }
+
+ c_ctx->c_alg = c_alg;
+ c_ctx->c_mode = c_mode;
+
+ switch (c_alg) {
+ case SEC_CALG_3DES:
+ ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ break;
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
+ ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ return ret;
+ }
+
+ memcpy(c_ctx->c_key, key, keylen);
+
+ return 0;
+}
+
+#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
+static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
+ u32 keylen) \
+{ \
+ return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
+}
+
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
+
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
+
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
+
+static int sec_skcipher_get_res(struct sec_ctx *ctx,
+ struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
+ struct sec_cipher_req *c_req = &req->c_req;
+ int req_id = req->req_id;
+
+ c_req->c_ivin = c_res[req_id].c_ivin;
+ c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
+
+ return 0;
+}
+
+static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_res *res;
+ int i;
+
+ res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin) {
+ kfree(res);
+ return -ENOMEM;
+ }
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+ qp_ctx->alg_meta_data = res;
+
+ return 0;
+}
+
+static void sec_skcipher_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_cipher_res *res = qp_ctx->alg_meta_data;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!res)
+ return;
+
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
+ kfree(res);
+}
+
+static int sec_skcipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
+
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ return PTR_ERR(c_req->c_in);
+ }
+
+ if (dst == src) {
+ c_req->c_out = c_req->c_in;
+ c_req->c_out_dma = c_req->c_in_dma;
+ } else {
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
+ if (IS_ERR(c_req->c_out)) {
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
+ c_req->sk_req->src, c_req->sk_req->dst);
+}
+
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sk_req = c_req->sk_req;
+
+ if (sk_req->dst != sk_req->src)
+ hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+}
+
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (ret)
+ return ret;
+
+ ctx->req_op->do_transfer(ctx, req);
+
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (ret)
+ goto unmap_req_buf;
+
+ return ret;
+
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
+
+ return ret;
+}
+
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
+}
+
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ c_req->c_len = sk_req->cryptlen;
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+}
+
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 de = 0;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
+ else
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
+
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
+
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
+
+ /* Just set DST address type */
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
+
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
+
+ return 0;
+}
+
+static void sec_update_iv(struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ size_t sz;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
+ iv_size, sk_req->cryptlen - iv_size);
+ if (sz != iv_size)
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+}
+
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+
+ /* IV output at encrypto of CBC mode */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req);
+
+ if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+
+ sk_req->base.complete(&sk_req->base, req->err_type);
+}
+
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_put_queue_id(ctx, req);
+}
+
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int issue_id, ret;
+
+ /* To load balance */
+ issue_id = sec_get_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[issue_id];
+
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (req->req_id < 0) {
+ sec_put_queue_id(ctx, req);
+ return req->req_id;
+ }
+
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = 1;
+ else
+ req->fake_busy = 0;
+
+ ret = ctx->req_op->get_res(ctx, req);
+ if (ret) {
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_request_uninit(ctx, req);
+ dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
+ }
+
+ return ret;
+}
+
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_request_init(ctx, req);
+ if (ret)
+ return ret;
+
+ ret = sec_request_transfer(ctx, req);
+ if (ret)
+ goto err_uninit_req;
+
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req);
+
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (ret != -EBUSY && ret != -EINPROGRESS) {
+ dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ goto err_send_req;
+ }
+
+ return ret;
+
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
+ ctx->c_ctx.ivsize);
+
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
+
+ return ret;
+}
+
+static struct sec_req_op sec_req_ops_tbl = {
+ .get_res = sec_skcipher_get_res,
+ .resource_alloc = sec_skcipher_resource_alloc,
+ .resource_free = sec_skcipher_resource_free,
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->req_op = &sec_req_ops_tbl;
+
+ return sec_skcipher_init(tfm);
+}
+
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_exit(tfm);
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct skcipher_request *sk_req)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!sk_req->src || !sk_req->dst) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
+ }
+
+ if (c_alg == SEC_CALG_3DES) {
+ if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
+}
+
+static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ if (!sk_req->cryptlen)
+ return 0;
+
+ ret = sec_skcipher_param_check(ctx, sk_req);
+ if (ret)
+ return ret;
+
+ req->c_req.sk_req = sk_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, true);
+}
+
+static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, false);
+}
+
+#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_skcipher_decrypt,\
+ .encrypt = sec_skcipher_encrypt,\
+ .min_keysize = sec_min_key_size,\
+ .max_keysize = sec_max_key_size,\
+ .ivsize = iv_size,\
+},
+
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
+ max_key_size, blk_size, iv_size) \
+ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
+
+static struct skcipher_alg sec_algs[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+};
+
+int sec_register_to_crypto(void)
+{
+ int ret = 0;
+
+ /* To avoid repeat register */
+ mutex_lock(&sec_algs_lock);
+ if (++sec_active_devs == 1)
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+
+ return ret;
+}
+
+void sec_unregister_from_crypto(void)
+{
+ mutex_lock(&sec_algs_lock);
+ if (--sec_active_devs == 0)
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
new file mode 100644
index 000000000000..097dce828340
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
+
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_COMM_SCENE 0
+
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+struct sec_sqe_type2 {
+
+ /*
+ * mac_len: 0~5 bits
+ * a_key_len: 6~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ /* Just using type2 BD now */
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
new file mode 100644
index 000000000000..74f0654028c9
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/topology.h>
+
+#include "sec.h"
+
+#define SEC_VF_NUM 63
+#define SEC_QUEUE_NUM_V1 4096
+#define SEC_QUEUE_NUM_V2 1024
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
+
+#define SEC_XTS_MIV_ENABLE_REG 0x301384
+#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
+#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0xfffff7fd
+#define SEC_BD_ERR_CHK_EN2 0xffffbfff
+
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_SOURCE 0x301010
+#define SEC_CORE_INT_MASK 0x301000
+#define SEC_CORE_INT_STATUS 0x301008
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_CORE_INT_DISABLE 0x0
+#define SEC_CORE_INT_ENABLE 0x1ff
+
+#define SEC_RAS_CE_REG 0x50
+#define SEC_RAS_FE_REG 0x54
+#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_ENB_MSK 0x88
+#define SEC_RAS_FE_ENB_MSK 0x0
+#define SEC_RAS_NFE_ENB_MSK 0x177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x0100
+#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_QM_ABNORMAL_INT_MASK 0x100004
+
+#define SEC_CONTROL_REG 0x0200
+#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
+#define SEC_CLK_GATE_DISABLE (~BIT(3))
+#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
+#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
+
+#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG2 0x038c
+
+#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
+#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
+
+#define SEC_DELAY_10_US 10
+#define SEC_POLL_TIMEOUT_US 1000
+#define SEC_VF_CNT_MASK 0xffffffc0
+#define SEC_DBGFS_VAL_MAX_LEN 20
+
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
+
+struct sec_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static LIST_HEAD(sec_list);
+static DEFINE_MUTEX(sec_list_lock);
+
+static const struct sec_hw_error sec_hw_errors[] = {
+ {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
+ {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
+ {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
+ {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
+ {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
+ {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
+ {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
+ {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
+ {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
+ { /* sentinel */ }
+};
+
+struct sec_dev *sec_find_device(int node)
+{
+#define SEC_NUMA_MAX_DISTANCE 100
+ int min_distance = SEC_NUMA_MAX_DISTANCE;
+ int dev_node = 0, free_qp_num = 0;
+ struct sec_dev *sec, *ret = NULL;
+ struct hisi_qm *qm;
+ struct device *dev;
+
+ mutex_lock(&sec_list_lock);
+ list_for_each_entry(sec, &sec_list, list) {
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ free_qp_num = hisi_qm_get_free_qp_num(qm);
+ if (free_qp_num >= sec->ctx_q_num) {
+ ret = sec;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ }
+ mutex_unlock(&sec_list_lock);
+
+ return ret;
+}
+
+static const char * const sec_dbg_file_name[] = {
+ [SEC_CURRENT_QM] = "current_qm",
+ [SEC_CLEAR_ENABLE] = "clear_enable",
+};
+
+static struct debugfs_reg32 sec_dfx_regs[] = {
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_SAA_EN ", 0x301270},
+ {"SEC_BD_LATENCY_MIN ", 0x301600},
+ {"SEC_BD_LATENCY_MAX ", 0x301608},
+ {"SEC_BD_LATENCY_AVG ", 0x30160C},
+ {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
+ {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
+ {"SEC_BD_NUM_IN_SEC ", 0x301680},
+ {"SEC_ECC_1BIT_CNT ", 0x301C00},
+ {"SEC_ECC_1BIT_INFO ", 0x301C04},
+ {"SEC_ECC_2BIT_CNT ", 0x301C10},
+ {"SEC_ECC_2BIT_INFO ", 0x301C14},
+ {"SEC_BD_SAA0 ", 0x301C20},
+ {"SEC_BD_SAA1 ", 0x301C24},
+ {"SEC_BD_SAA2 ", 0x301C28},
+ {"SEC_BD_SAA3 ", 0x301C2C},
+ {"SEC_BD_SAA4 ", 0x301C30},
+ {"SEC_BD_SAA5 ", 0x301C34},
+ {"SEC_BD_SAA6 ", 0x301C38},
+ {"SEC_BD_SAA7 ", 0x301C3C},
+ {"SEC_BD_SAA8 ", 0x301C40},
+};
+
+static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ SEC_PF_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
+ pr_info("No device, suppose queue number is %d!\n", q_num);
+ } else {
+ rev_id = pdev->revision;
+
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = SEC_QUEUE_NUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = SEC_QUEUE_NUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = sec_pf_q_num_set,
+ .get = param_get_int,
+};
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
+
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 ctx_q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
+
+ if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
+ return -EINVAL;
+ }
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
+ .get = param_get_int,
+};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+
+static const struct pci_device_id sec_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
+
+static inline void sec_add_to_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_add_tail(&sec->list, &sec_list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static inline void sec_remove_from_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_del(&sec->list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static u8 sec_get_endian(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 reg;
+
+ /*
+ * As for VF, it is a wrong way to get endian setting by
+ * reading a register of the engine
+ */
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
+ return SEC_LE;
+ }
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
+ SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
+ /* BD little endian mode */
+ if (!(reg & BIT(0)))
+ return SEC_LE;
+
+ /* BD 32-bits big endian mode */
+ else if (!(reg & BIT(1)))
+ return SEC_32BE;
+
+ /* BD 64-bits big endian mode */
+ else
+ return SEC_64BE;
+}
+
+static int sec_engine_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+ u32 reg;
+
+ /* disable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg &= SEC_CLK_GATE_DISABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ return ret;
+ }
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= (0x1 << SEC_TRNG_EN_SHIFT);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN2,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
+
+ /* enable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= SEC_CLK_GATE_ENABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* config endian */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(sec);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* Enable sm4 xts mode multiple iv */
+ writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
+ qm->io_base + SEC_XTS_MIV_ENABLE_REG);
+
+ return 0;
+}
+
+static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+
+ /* qm user domain */
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
+
+ /* qm cache */
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
+
+ /* enable sqc,cqc writeback */
+ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
+ CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
+
+ return sec_engine_init(sec);
+}
+
+/* sec_debug_regs_clear() - clear the sec debug regs */
+static void sec_debug_regs_clear(struct hisi_qm *qm)
+{
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void sec_hw_error_enable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ if (qm->ver == QM_HW_V1) {
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
+ return;
+ }
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_disable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_init(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
+ QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
+ | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
+ QM_DB_RANDOM_INVALID);
+ sec_hw_error_enable(sec);
+}
+
+static void sec_hw_error_uninit(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ sec_hw_error_disable(sec);
+ writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
+}
+
+static u32 sec_current_qm_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
+ u32 vfq_num;
+ u32 tmp;
+
+ if (val > sec->num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+
+ if (val == sec->num_vfs)
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num -
+ (sec->num_vfs - 1) * vfq_num;
+ else
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ SEC_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ u32 tmp;
+
+ if (val != 1 && val)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ val = sec_current_qm_read(file);
+ break;
+ case SEC_CLEAR_ENABLE:
+ val = sec_clear_enable_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+
+ spin_unlock_irq(&file->lock);
+ ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= SEC_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ ret = sec_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case SEC_CLEAR_ENABLE:
+ ret = sec_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+ err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations sec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
+};
+
+static int sec_core_debug_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct sec_dfx *dfx = &sec->debug.dfx;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d;
+
+ tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOENT;
+
+ regset->regs = sec_dfx_regs;
+ regset->nregs = ARRAY_SIZE(sec_dfx_regs);
+ regset->base = qm->io_base;
+
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
+ debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+
+ debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+
+ return 0;
+}
+
+static int sec_debug_init(struct sec_dev *sec)
+{
+ int i;
+
+ for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = &sec->qm;
+
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
+ sec->qm.debug.debug_root,
+ sec->debug.files + i,
+ &sec_dbg_fops);
+ }
+
+ return sec_core_debug_init(sec);
+}
+
+static int sec_debugfs_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = sec_debug_init(sec);
+ if (ret)
+ goto failed_to_create;
+ }
+
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(sec_debugfs_root);
+
+ return ret;
+}
+
+static void sec_debugfs_exit(struct sec_dev *sec)
+{
+ debugfs_remove_recursive(sec->qm.debug.debug_root);
+}
+
+static int sec_pf_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
+ break;
+
+ case QM_HW_V2:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = sec_set_user_domain_and_cache(sec);
+ if (ret)
+ return ret;
+
+ sec_hw_error_init(sec);
+ sec_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -ENODEV;
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+ qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ qm->use_dma_api = true;
+
+ return hisi_qm_init(qm);
+}
+
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
+{
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+
+ return sec_pf_probe_init(sec);
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void sec_probe_uninit(struct sec_dev *sec)
+{
+ sec_hw_error_uninit(sec);
+}
+
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sec_dev *sec;
+ struct hisi_qm *qm;
+ int ret;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, sec);
+
+ sec->ctx_q_num = ctx_q_num;
+
+ qm = &sec->qm;
+
+ ret = sec_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to pre init qm!\n");
+ return ret;
+ }
+
+ ret = sec_probe_init(qm, sec);
+ if (ret) {
+ pci_err(pdev, "Failed to probe!\n");
+ goto err_qm_uninit;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start sec qm!\n");
+ goto err_probe_uninit;
+ }
+
+ ret = sec_debugfs_init(sec);
+ if (ret)
+ pci_warn(pdev, "Failed to init debugfs!\n");
+
+ sec_add_to_list(sec);
+
+ ret = sec_register_to_crypto();
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ sec_remove_from_list(sec);
+ sec_debugfs_exit(sec);
+ hisi_qm_stop(qm);
+
+err_probe_uninit:
+ sec_probe_uninit(sec);
+
+err_qm_uninit:
+ sec_qm_uninit(qm);
+
+ return ret;
+}
+
+/* now we only support equal assignment */
+static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 qp_num = qm->qp_num;
+ u32 q_base = qp_num;
+ u32 q_num, remain_q_num;
+ int i, j, ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+ q_num = remain_q_num / num_vfs;
+
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int sec_clear_vft_config(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 num_vfs = sec->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ sec->num_vfs = 0;
+
+ return 0;
+}
+
+static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ int pre_existing_vfs, ret;
+ u32 num_vfs;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+
+ if (pre_existing_vfs) {
+ pci_err(pdev, "Can't enable VF. Please disable at first!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
+
+ ret = sec_vf_q_assign(sec, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ sec->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ sec_clear_vft_config(sec);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int sec_sriov_disable(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in sec_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return sec_clear_vft_config(sec);
+}
+
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return sec_sriov_enable(pdev, num_vfs);
+ else
+ return sec_sriov_disable(pdev);
+}
+
+static void sec_remove(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &sec->qm;
+
+ sec_unregister_from_crypto();
+
+ sec_remove_from_list(sec);
+
+ if (qm->fun_type == QM_HW_PF && sec->num_vfs)
+ (void)sec_sriov_disable(pdev);
+
+ sec_debugfs_exit(sec);
+
+ (void)hisi_qm_stop(qm);
+
+ if (qm->fun_type == QM_HW_PF)
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(sec);
+
+ sec_qm_uninit(qm);
+}
+
+static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
+{
+ const struct sec_hw_error *errs = sec_hw_errors;
+ struct device *dev = &sec->qm.pdev->dev;
+ u32 err_val;
+
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
+
+ if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
+ err_val = readl(sec->qm.io_base +
+ SEC_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ dev_err(dev, "multi ecc sram addr=0x%x\n",
+ SEC_ECC_ADDR(err_val));
+ }
+ }
+ errs++;
+ }
+}
+
+static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
+ if (err_sts) {
+ sec_log_hw_error(sec, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, sec_ret;
+
+ if (!sec) {
+ pci_err(pdev, "Can't recover error during device init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&sec->qm);
+
+ /* log sec error */
+ sec_ret = sec_hw_error_handle(sec);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return sec_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers sec_err_handler = {
+ .error_detected = sec_error_detected,
+};
+
+static struct pci_driver sec_pci_driver = {
+ .name = "hisi_sec2",
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
+};
+
+static void sec_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
+}
+
+static void sec_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(sec_debugfs_root);
+}
+
+static int __init sec_init(void)
+{
+ int ret;
+
+ sec_register_debugfs();
+
+ ret = pci_register_driver(&sec_pci_driver);
+ if (ret < 0) {
+ sec_unregister_debugfs();
+ pr_err("Failed to register pci driver.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit sec_exit(void)
+{
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
+}
+
+module_init(sec_init);
+module_exit(sec_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
+MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index e083d172b618..012023c347b1 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -2,37 +2,13 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include "./sgl.h"
+#include <linux/slab.h>
+#include "qm.h"
#define HISI_ACC_SGL_SGE_NR_MIN 1
-#define HISI_ACC_SGL_SGE_NR_MAX 255
-#define HISI_ACC_SGL_SGE_NR_DEF 10
#define HISI_ACC_SGL_NR_MAX 256
#define HISI_ACC_SGL_ALIGN_SIZE 64
-
-static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp)
-{
- int ret;
- u32 n;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops acc_sgl_sge_ops = {
- .set = acc_sgl_sge_set,
- .get = param_get_int,
-};
-
-static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF;
-module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444);
-MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)");
+#define HISI_ACC_MEM_BLOCK_NR 5
struct acc_hw_sge {
dma_addr_t buf;
@@ -55,37 +31,91 @@ struct hisi_acc_hw_sgl {
struct acc_hw_sge sge_entries[];
} __aligned(1);
+struct hisi_acc_sgl_pool {
+ struct mem_block {
+ struct hisi_acc_hw_sgl *sgl;
+ dma_addr_t sgl_dma;
+ size_t size;
+ } mem_block[HISI_ACC_MEM_BLOCK_NR];
+ u32 sgl_num_per_block;
+ u32 block_num;
+ u32 count;
+ u32 sge_nr;
+ size_t sgl_size;
+};
+
/**
* hisi_acc_create_sgl_pool() - Create a hw sgl pool.
* @dev: The device which hw sgl pool belongs to.
- * @pool: Pointer of pool.
* @count: Count of hisi_acc_hw_sgl in pool.
+ * @sge_nr: The count of sge in hw_sgl
*
* This function creates a hw sgl pool, after this user can get hw sgl memory
* from it.
*/
-int hisi_acc_create_sgl_pool(struct device *dev,
- struct hisi_acc_sgl_pool *pool, u32 count)
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr)
{
- u32 sgl_size;
- u32 size;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ struct hisi_acc_sgl_pool *pool;
+ struct mem_block *block;
+ u32 i, j;
- if (!dev || !pool || !count)
- return -EINVAL;
+ if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
+ return ERR_PTR(-EINVAL);
- sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr +
+ sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
- size = sgl_size * count;
+ block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1));
+ sgl_num_per_block = block_size / sgl_size;
+ block_num = count / sgl_num_per_block;
+ remain_sgl = count % sgl_num_per_block;
+
+ if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
+ (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ block = pool->mem_block;
+
+ for (i = 0; i < block_num; i++) {
+ block[i].sgl = dma_alloc_coherent(dev, block_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
+
+ block[i].size = block_size;
+ }
+
+ if (remain_sgl > 0) {
+ block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
- pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL);
- if (!pool->sgl)
- return -ENOMEM;
+ block[i].size = remain_sgl * sgl_size;
+ }
- pool->size = size;
+ pool->sgl_num_per_block = sgl_num_per_block;
+ pool->block_num = remain_sgl ? block_num + 1 : block_num;
pool->count = count;
pool->sgl_size = sgl_size;
+ pool->sge_nr = sge_nr;
- return 0;
+ return pool;
+
+err_free_mem:
+ for (j = 0; j < i; j++) {
+ dma_free_coherent(dev, block_size, block[j].sgl,
+ block[j].sgl_dma);
+ memset(block + j, 0, sizeof(*block));
+ }
+ kfree(pool);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
@@ -98,38 +128,57 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
*/
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
- dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma);
- memset(pool, 0, sizeof(struct hisi_acc_sgl_pool));
+ struct mem_block *block;
+ int i;
+
+ if (!dev || !pool)
+ return;
+
+ block = pool->mem_block;
+
+ for (i = 0; i < pool->block_num; i++)
+ dma_free_coherent(dev, block[i].size, block[i].sgl,
+ block[i].sgl_dma);
+
+ kfree(pool);
}
EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
-struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index,
- dma_addr_t *hw_sgl_dma)
+static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma)
{
- if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl)
+ struct mem_block *block;
+ u32 block_index, offset;
+
+ if (!pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
- *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index;
- return (void *)pool->sgl + pool->sgl_size * index;
-}
+ block = pool->mem_block;
+ block_index = index / pool->sgl_num_per_block;
+ offset = index % pool->sgl_num_per_block;
-void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {}
+ *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
+ return (void *)block[block_index].sgl + pool->sgl_size * offset;
+}
static void sg_map_to_hw_sg(struct scatterlist *sgl,
struct acc_hw_sge *hw_sge)
{
- hw_sge->buf = sgl->dma_address;
- hw_sge->len = sgl->dma_length;
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
- hw_sgl->entry_sum_in_sgl++;
+ u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
+
+ var++;
+ hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
}
static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
{
- hw_sgl->entry_sum_in_chain = sum;
+ hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
/**
@@ -153,10 +202,13 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int sg_n = sg_nents(sgl);
- int i, ret;
+ int i, ret, sg_n;
- if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr)
+ if (!dev || !sgl || !pool || !hw_sgl_dma)
+ return ERR_PTR(-EINVAL);
+
+ sg_n = sg_nents(sgl);
+ if (sg_n > pool->sge_nr)
return ERR_PTR(-EINVAL);
ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
@@ -164,11 +216,12 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
return ERR_PTR(-EINVAL);
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (!curr_hw_sgl) {
- ret = -ENOMEM;
- goto err_unmap_sg;
+ if (IS_ERR(curr_hw_sgl)) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ return ERR_PTR(-ENOMEM);
+
}
- curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr;
+ curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
for_each_sg(sgl, sg, sg_n, i) {
@@ -177,14 +230,10 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sge++;
}
- update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr);
+ update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
-
-err_unmap_sg:
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
@@ -201,6 +250,9 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
struct hisi_acc_hw_sgl *hw_sgl)
{
+ if (!dev || !sgl || !hw_sgl)
+ return;
+
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
hw_sgl->entry_sum_in_chain = 0;
diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h
deleted file mode 100644
index 3ac8871c7acf..000000000000
--- a/drivers/crypto/hisilicon/sgl.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 HiSilicon Limited. */
-#ifndef HISI_ACC_SGL_H
-#define HISI_ACC_SGL_H
-
-struct hisi_acc_sgl_pool {
- struct hisi_acc_hw_sgl *sgl;
- dma_addr_t sgl_dma;
- size_t size;
- u32 count;
- size_t sgl_size;
-};
-
-struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
-void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
-int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool,
- u32 count);
-void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool);
-#endif
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index ffb00d987d02..79fc4dd3fe00 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -8,7 +8,6 @@
#include <linux/list.h>
#include "../qm.h"
-#include "../sgl.h"
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 59023545a1c4..795428c1d07e 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -22,6 +22,7 @@
#define HZIP_CTX_Q_NUM 2
#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
+#define HZIP_SGL_SGE_NR 10
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
@@ -41,7 +42,7 @@ enum hisi_zip_alg_type {
#define TO_HEAD(req_type) \
(((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
- ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \
+ ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
struct hisi_zip_req {
struct acomp_req *req;
@@ -67,7 +68,7 @@ struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
- struct hisi_acc_sgl_pool sgl_pool;
+ struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
struct hisi_zip_ctx *ctx;
};
@@ -78,6 +79,30 @@ struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
};
+static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ u16 n;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou16(val, 10, &n);
+ if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sgl_sge_nr_ops = {
+ .set = sgl_sge_nr_set,
+ .get = param_get_int,
+};
+
+static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
+module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
+MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
+
static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
@@ -265,14 +290,15 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_qp_ctx *tmp;
- int i, ret;
+ struct device *dev;
+ int i;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
- ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev,
- &tmp->sgl_pool,
- QM_Q_DEPTH << 1);
- if (ret < 0) {
+ dev = &tmp->qp->qm->pdev->dev;
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ sgl_sge_nr);
+ if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
goto err_free_sgl_pool0;
return -ENOMEM;
@@ -283,7 +309,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
err_free_sgl_pool0:
hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
- &ctx->qp_ctx[QPC_COMP].sgl_pool);
+ ctx->qp_ctx[QPC_COMP].sgl_pool);
return -ENOMEM;
}
@@ -293,7 +319,7 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
- &ctx->qp_ctx[i].sgl_pool);
+ ctx->qp_ctx[i].sgl_pool);
}
static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
@@ -512,7 +538,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool;
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
dma_addr_t input;
dma_addr_t output;
int ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 1b2ee96c888d..e1bab1a91333 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -79,47 +79,80 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-LIST_HEAD(hisi_zip_list);
-DEFINE_MUTEX(hisi_zip_list_lock);
+static LIST_HEAD(hisi_zip_list);
+static DEFINE_MUTEX(hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
-static struct hisi_zip *find_zip_device_numa(int node)
+struct hisi_zip_resource {
+ struct hisi_zip *hzip;
+ int distance;
+ struct list_head list;
+};
+
+static void free_list(struct list_head *head)
{
- struct hisi_zip *zip = NULL;
- struct hisi_zip *hisi_zip;
- int min_distance = HZIP_NUMA_DISTANCE;
- struct device *dev;
+ struct hisi_zip_resource *res, *tmp;
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = &hisi_zip->qm.pdev->dev;
- if (node_distance(dev->numa_node, node) < min_distance) {
- zip = hisi_zip;
- min_distance = node_distance(dev->numa_node, node);
- }
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
-
- return zip;
}
-#endif
struct hisi_zip *find_zip_device(int node)
{
- struct hisi_zip *zip = NULL;
+ struct hisi_zip_resource *res, *tmp;
+ struct hisi_zip *ret = NULL;
+ struct hisi_zip *hisi_zip;
+ struct list_head *n;
+ struct device *dev;
+ LIST_HEAD(head);
mutex_lock(&hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
- zip = find_zip_device_numa(node);
-#else
- zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto err;
+
+ dev = &hisi_zip->qm.pdev->dev;
+ res->hzip = hisi_zip;
+ res->distance = node_distance(dev_to_node(dev), node);
+
+ n = &head;
+ list_for_each_entry(tmp, &head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
+ ret = tmp->hzip;
+ break;
+ }
+ }
+
+ free_list(&head);
+ } else {
+ ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
+ }
+
mutex_unlock(&hisi_zip_list_lock);
- return zip;
+ return ret;
+
+err:
+ free_list(&head);
+ mutex_unlock(&hisi_zip_list_lock);
+ return NULL;
}
struct hisi_zip_hw_error {
@@ -267,6 +300,10 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
static int uacce_mode;
module_param(uacce_mode, int, 0);
+static u32 vfs_num;
+module_param(vfs_num, uint, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
@@ -335,8 +372,7 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
- qm->ver);
+ dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
return;
}
@@ -511,7 +547,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
@@ -521,10 +557,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
else
sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
- if (!tmp_d)
- return -ENOENT;
-
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return -ENOENT;
@@ -533,9 +565,8 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
}
return 0;
@@ -543,7 +574,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
- struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
@@ -551,11 +581,9 @@ static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
ctrl->files[i].ctrl = ctrl;
ctrl->files[i].index = i;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ ctrl->debug_root, ctrl->files + i,
+ &ctrl_debug_fops);
}
return hisi_zip_core_debug_init(ctrl);
@@ -569,8 +597,6 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
int ret;
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
- if (!dev_d)
- return -ENOENT;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
@@ -652,90 +678,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
-static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
- struct hisi_qm *qm;
- int ret;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
- return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
-
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- switch (uacce_mode) {
- case 0:
- qm->use_dma_api = true;
- break;
- case 1:
- qm->use_dma_api = false;
- break;
- case 2:
- qm->use_dma_api = true;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hisi_qm_init(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
- return ret;
- }
-
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- }
-
- ret = hisi_qm_start(qm);
- if (ret)
- goto err_qm_uninit;
-
- ret = hisi_zip_debugfs_init(hisi_zip);
- if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- hisi_zip_add_to_list(hisi_zip);
-
- return 0;
-
-err_qm_uninit:
- hisi_qm_uninit(qm);
- return ret;
-}
-
/* Currently we only support equal assignment */
static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
{
@@ -832,6 +774,100 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev)
return hisi_zip_clear_vft_config(hisi_zip);
}
+static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_zip *hisi_zip;
+ enum qm_hw_ver rev_id;
+ struct hisi_qm *qm;
+ int ret;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -EINVAL;
+
+ hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
+ if (!hisi_zip)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, hisi_zip);
+
+ qm = &hisi_zip->qm;
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
+ QM_HW_VF;
+ switch (uacce_mode) {
+ case 0:
+ qm->use_dma_api = true;
+ break;
+ case 1:
+ qm->use_dma_api = false;
+ break;
+ case 2:
+ qm->use_dma_api = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init qm!\n");
+ return ret;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
+ if (ret)
+ return ret;
+
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2)
+ /* v2 starts to support get vft by mailbox */
+ hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ ret = hisi_zip_debugfs_init(hisi_zip);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
+
+ hisi_zip_add_to_list(hisi_zip);
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ hisi_zip_remove_from_list(hisi_zip);
+ hisi_zip_debugfs_exit(hisi_zip);
+ hisi_qm_stop(qm);
+err_qm_uninit:
+ hisi_qm_uninit(qm);
+ return ret;
+}
+
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
@@ -945,7 +981,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : 0,
+ hisi_zip_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
@@ -955,8 +991,6 @@ static void hisi_zip_register_debugfs(void)
return;
hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
- if (IS_ERR_OR_NULL(hzip_debugfs_root))
- hzip_debugfs_root = NULL;
}
static void hisi_zip_unregister_debugfs(void)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4ab1bde8dd9b..64894d8b442a 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -75,9 +75,9 @@ static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
}
static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
- int maxbanks, u32 probemask)
+ int maxbanks, u32 probemask, u32 stride)
{
- u32 val, addrhi, addrlo, addrmid;
+ u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
int actbank;
/*
@@ -87,32 +87,37 @@ static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
addrhi = 1 << (16 + maxbanks);
addrlo = 0;
actbank = min(maxbanks - 1, 0);
- while ((addrhi - addrlo) > 32) {
+ while ((addrhi - addrlo) > stride) {
/* write marker to lowest address in top half */
addrmid = (addrhi + addrlo) >> 1;
+ marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
- writel((addrmid | (addrlo << 16)) & probemask,
+ writel(marker,
priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- /* write marker to lowest address in bottom half */
- eip197_trc_cache_banksel(priv, addrlo, &actbank);
- writel((addrlo | (addrhi << 16)) & probemask,
- priv->base + EIP197_CLASSIFICATION_RAMS +
- (addrlo & 0xffff));
+ /* write invalid markers to possible aliases */
+ delta = 1 << __fls(addrmid);
+ while (delta >= stride) {
+ addralias = addrmid - delta;
+ eip197_trc_cache_banksel(priv, addralias, &actbank);
+ writel(~marker,
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ (addralias & 0xffff));
+ delta >>= 1;
+ }
/* read back marker from top half */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- if (val == ((addrmid | (addrlo << 16)) & probemask)) {
+ if ((val & probemask) == marker)
/* read back correct, continue with top half */
addrlo = addrmid;
- } else {
+ else
/* not read back correct, continue with bottom half */
addrhi = addrmid;
- }
}
return addrhi;
}
@@ -150,7 +155,7 @@ static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
htable_offset + i * sizeof(u32));
}
-static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, dsize, asize;
int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
@@ -183,7 +188,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed data RAM size in bytes */
- dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
+ dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
/*
* Now probe the administration RAM size pretty much the same way
@@ -196,11 +201,18 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed admin RAM size in admin words */
- asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
+ asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
/* Clear any ECC errors detected while probing! */
writel(0, priv->base + EIP197_TRC_ECCCTRL);
+ /* Sanity check probing results */
+ if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
+ dev_err(priv->dev, "Record cache probing failed (%d,%d).",
+ dsize, asize);
+ return -ENODEV;
+ }
+
/*
* Determine optimal configuration from RAM sizes
* Note that we assume that the physical RAM configuration is sane
@@ -221,9 +233,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Step #3: Determine log2 of hash table size */
cs_ht_sz = __fls(asize - cs_rc_max) - 2;
/* Step #4: determine current size of hash table in dwords */
- cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */
+ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
/* Step #5: add back excess words and see if we can fit more records */
- cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4));
+ cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
/* Clear the cache RAMs */
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
@@ -251,6 +263,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
+ return 0;
}
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
@@ -298,13 +311,14 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
const struct firmware *fw)
{
- const u32 *data = (const u32 *)fw->data;
+ const __be32 *data = (const __be32 *)fw->data;
int i;
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
writel(be32_to_cpu(data[i]),
- priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ i * sizeof(__be32));
/* Exclude final 2 NOPs from size */
return i - EIP197_FW_TERMINAL_NOPS;
@@ -471,6 +485,14 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
cd_size_rnd) - 1;
}
+ /*
+ * Since we're using command desc's way larger than formally specified,
+ * we need to check whether we can fit even 1 for low-end EIP196's!
+ */
+ if (!cd_fetch_cnt) {
+ dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
+ return -ENODEV;
+ }
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
@@ -479,12 +501,12 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (cd_fetch_cnt * priv->config.cd_offset),
+ (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -527,13 +549,13 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
priv->config.rd_size,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((rd_fetch_cnt *
(rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (rd_fetch_cnt * priv->config.rd_offset),
+ (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -559,7 +581,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 val;
- int i, ret, pe;
+ int i, ret, pe, opbuflo, opbufhi;
dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
priv->config.pes, priv->config.rings);
@@ -595,8 +617,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(EIP197_DxE_THR_CTRL_RESET_PE,
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Reset HIA input interface arbiter (EIP197 only) */
+ if (priv->flags & EIP197_PE_ARB)
+ /* Reset HIA input interface arbiter (if present) */
writel(EIP197_HIA_RA_PE_CTRL_RESET,
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
@@ -639,9 +661,16 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
;
/* DMA transfer size to use */
+ if (priv->hwconfig.hwnumpes > 4) {
+ opbuflo = 9;
+ opbufhi = 10;
+ } else {
+ opbuflo = 7;
+ opbufhi = 8;
+ }
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
- EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling
@@ -655,8 +684,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
/* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
- EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
/* Processing Engine configuration */
@@ -696,7 +725,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -719,7 +748,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -736,19 +765,28 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->flags & SAFEXCEL_HW_EIP197) {
- eip197_trc_cache_init(priv);
- priv->flags |= EIP197_TRC_CACHE;
+ if (priv->flags & EIP197_SIMPLE_TRC) {
+ writel(EIP197_STRC_CONFIG_INIT |
+ EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
+ EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
+ priv->base + EIP197_STRC_CONFIG);
+ writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
+ EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
+ } else if (priv->flags & SAFEXCEL_HW_EIP197) {
+ ret = eip197_trc_cache_init(priv);
+ if (ret)
+ return ret;
+ }
+ if (priv->flags & EIP197_ICE) {
ret = eip197_load_firmwares(priv);
if (ret)
return ret;
}
- safexcel_hw_setup_cdesc_rings(priv);
- safexcel_hw_setup_rdesc_rings(priv);
-
- return 0;
+ return safexcel_hw_setup_cdesc_rings(priv) ?:
+ safexcel_hw_setup_rdesc_rings(priv) ?:
+ 0;
}
/* Called with ring's lock taken */
@@ -836,20 +874,24 @@ finalize:
spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
- writel((rdesc * priv->config.rd_offset) << 2,
+ writel((rdesc * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
- writel((cdesc * priv->config.cd_offset) << 2,
+ writel((cdesc * priv->config.cd_offset),
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc)
+ void *rdp)
{
- if (likely((!rdesc->descriptor_overflow) &&
- (!rdesc->buffer_overflow) &&
- (!rdesc->result_data.error_code)))
+ struct safexcel_result_desc *rdesc = rdp;
+ struct result_data_desc *result_data = rdp + priv->config.res_offset;
+
+ if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
+ ((!rdesc->descriptor_overflow) &&
+ (!rdesc->buffer_overflow) &&
+ (!result_data->error_code))))
return 0;
if (rdesc->descriptor_overflow)
@@ -858,13 +900,14 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->buffer_overflow)
dev_err(priv->dev, "Buffer overflow detected");
- if (rdesc->result_data.error_code & 0x4066) {
+ if (result_data->error_code & 0x4066) {
/* Fatal error (bits 1,2,5,6 & 14) */
dev_err(priv->dev,
"result descriptor error (%x)",
- rdesc->result_data.error_code);
+ result_data->error_code);
+
return -EIO;
- } else if (rdesc->result_data.error_code &
+ } else if (result_data->error_code &
(BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
/*
* Give priority over authentication fails:
@@ -872,7 +915,7 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
* something wrong with the input!
*/
return -EINVAL;
- } else if (rdesc->result_data.error_code & BIT(9)) {
+ } else if (result_data->error_code & BIT(9)) {
/* Authentication failed */
return -EBADMSG;
}
@@ -1003,7 +1046,7 @@ handle_results:
acknowledge:
if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
- EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ (tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
/* If the number of requests overflowed the counter, try to proceed more
@@ -1120,6 +1163,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
irq_name, irq);
return irq;
}
+ } else {
+ return -ENXIO;
}
ret = devm_request_threaded_irq(dev, irq, handler,
@@ -1169,6 +1214,44 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
+ &safexcel_alg_crc32,
+ &safexcel_alg_cbcmac,
+ &safexcel_alg_xcbcmac,
+ &safexcel_alg_cmac,
+ &safexcel_alg_chacha20,
+ &safexcel_alg_chachapoly,
+ &safexcel_alg_chachapoly_esp,
+ &safexcel_alg_sm3,
+ &safexcel_alg_hmac_sm3,
+ &safexcel_alg_ecb_sm4,
+ &safexcel_alg_cbc_sm4,
+ &safexcel_alg_ofb_sm4,
+ &safexcel_alg_cfb_sm4,
+ &safexcel_alg_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
+ &safexcel_alg_sha3_224,
+ &safexcel_alg_sha3_256,
+ &safexcel_alg_sha3_384,
+ &safexcel_alg_sha3_512,
+ &safexcel_alg_hmac_sha3_224,
+ &safexcel_alg_hmac_sha3_256,
+ &safexcel_alg_hmac_sha3_384,
+ &safexcel_alg_hmac_sha3_512,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des,
+ &safexcel_alg_rfc4106_gcm,
+ &safexcel_alg_rfc4543_gcm,
+ &safexcel_alg_rfc4309_ccm,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -1238,30 +1321,28 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask = 0;
-
- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
-
- /* Read number of PEs from the engine */
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Wider field width for all EIP197 type engines */
- mask = EIP197_N_PES_MASK;
- else
- /* Narrow field width for EIP97 type engine */
- mask = EIP97_N_PES_MASK;
+ u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
- priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+ priv->config.pes = priv->hwconfig.hwnumpes;
+ priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
+ /* Cannot currently support more rings than we have ring AICs! */
+ priv->config.rings = min_t(u32, priv->config.rings,
+ priv->hwconfig.hwnumraic);
- priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
-
- val = (val & GENMASK(27, 25)) >> 25;
- mask = BIT(val) - 1;
-
- priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+ priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
- priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+ /* res token is behind the descr, but ofs must be rounded to buswdth */
+ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
+ /* now the size of the descr is this 1st part plus the result struct */
+ priv->config.rd_size = priv->config.res_offset +
+ EIP197_RD64_RESULT_SIZE;
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+
+ /* convert dwords to bytes */
+ priv->config.cd_offset *= sizeof(u32);
+ priv->config.rd_offset *= sizeof(u32);
+ priv->config.res_offset *= sizeof(u32);
}
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
@@ -1307,7 +1388,7 @@ static int safexcel_probe_generic(void *pdev,
int is_pci_dev)
{
struct device *dev = priv->dev;
- u32 peid, version, mask, val, hiaopt;
+ u32 peid, version, mask, val, hiaopt, hwopt, peopt;
int i, ret, hwctg;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
@@ -1369,13 +1450,16 @@ static int safexcel_probe_generic(void *pdev,
*/
version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
if (((priv->flags & SAFEXCEL_HW_EIP197) &&
- (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
+ (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
+ (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
((!(priv->flags & SAFEXCEL_HW_EIP197) &&
(EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
/*
* We did not find the device that matched our initial probing
* (or our initial probing failed) Report appropriate error.
*/
+ dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
+ version);
return -ENODEV;
}
@@ -1383,6 +1467,14 @@ static int safexcel_probe_generic(void *pdev,
hwctg = version >> 28;
peid = version & 255;
+ /* Detect EIP206 processing pipe */
+ version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
+ dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
+
/* Detect EIP96 packet engine and version */
version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
@@ -1391,10 +1483,13 @@ static int safexcel_probe_generic(void *pdev,
}
priv->hwconfig.pever = EIP197_VERSION_MASK(version);
+ hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
+ peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
+
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
EIP197_HWDATAW_MASK;
priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
@@ -1403,6 +1498,19 @@ static int safexcel_probe_generic(void *pdev,
priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
EIP197_RFSIZE_MASK) +
EIP197_RFSIZE_ADJUST;
+ priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
+ EIP197_N_PES_MASK;
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
+ priv->flags |= EIP197_PE_ARB;
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1)
+ priv->flags |= EIP197_ICE;
+ /* If not a full TRC, then assume simple TRC */
+ if (!(hwopt & EIP197_OPT_HAS_TRC))
+ priv->flags |= EIP197_SIMPLE_TRC;
+ /* EIP197 always has SOME form of TRC */
+ priv->flags |= EIP197_TRC_CACHE;
} else {
/* EIP97 */
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
@@ -1411,6 +1519,23 @@ static int safexcel_probe_generic(void *pdev,
EIP97_CFSIZE_MASK;
priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
EIP97_RFSIZE_MASK;
+ priv->hwconfig.hwnumpes = 1; /* by definition */
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ }
+
+ /* Scan for ring AIC's */
+ for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
+ version = readl(EIP197_HIA_AIC_R(priv) +
+ EIP197_HIA_AIC_R_VERSION(i));
+ if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
+ break;
+ }
+ priv->hwconfig.hwnumraic = i;
+ /* Low-end EIP196 may not have any ring AIC's ... */
+ if (!priv->hwconfig.hwnumraic) {
+ dev_err(priv->dev, "No ring interrupt controller present!\n");
+ return -ENODEV;
}
/* Get supported algorithms from EIP96 transform engine */
@@ -1418,10 +1543,12 @@ static int safexcel_probe_generic(void *pdev,
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
- dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
- peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
- priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
- priv->hwconfig.hwrfsize, priv->hwconfig.pever,
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
+ peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
+ priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
+ priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
+ priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
+ priv->hwconfig.ppver, priv->hwconfig.pever,
priv->hwconfig.algo_flags);
safexcel_configure(priv);
@@ -1545,7 +1672,6 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
}
}
-#if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */
static int safexcel_probe(struct platform_device *pdev)
@@ -1623,6 +1749,7 @@ static int safexcel_remove(struct platform_device *pdev)
safexcel_unregister_algorithms(priv);
safexcel_hw_reset_rings(priv);
+ clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1664,9 +1791,7 @@ static struct platform_driver crypto_safexcel = {
.of_match_table = safexcel_of_match_table,
},
};
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */
static int safexcel_pci_probe(struct pci_dev *pdev,
@@ -1757,7 +1882,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev,
return rc;
}
-void safexcel_pci_remove(struct pci_dev *pdev)
+static void safexcel_pci_remove(struct pci_dev *pdev)
{
struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
int i;
@@ -1787,54 +1912,32 @@ static struct pci_driver safexcel_pci_driver = {
.probe = safexcel_pci_probe,
.remove = safexcel_pci_remove,
};
-#endif
-
-/* Unfortunately, we have to resort to global variables here */
-#if IS_ENABLED(CONFIG_PCI)
-int pcireg_rc = -EINVAL; /* Default safe value */
-#endif
-#if IS_ENABLED(CONFIG_OF)
-int ofreg_rc = -EINVAL; /* Default safe value */
-#endif
static int __init safexcel_init(void)
{
-#if IS_ENABLED(CONFIG_PCI)
+ int ret;
+
/* Register PCI driver */
- pcireg_rc = pci_register_driver(&safexcel_pci_driver);
-#endif
+ ret = pci_register_driver(&safexcel_pci_driver);
-#if IS_ENABLED(CONFIG_OF)
/* Register platform driver */
- ofreg_rc = platform_driver_register(&crypto_safexcel);
- #if IS_ENABLED(CONFIG_PCI)
- /* Return success if either PCI or OF registered OK */
- return pcireg_rc ? ofreg_rc : 0;
- #else
- return ofreg_rc;
- #endif
-#else
- #if IS_ENABLED(CONFIG_PCI)
- return pcireg_rc;
- #else
- return -EINVAL;
- #endif
-#endif
+ if (IS_ENABLED(CONFIG_OF) && !ret) {
+ ret = platform_driver_register(&crypto_safexcel);
+ if (ret)
+ pci_unregister_driver(&safexcel_pci_driver);
+ }
+
+ return ret;
}
static void __exit safexcel_exit(void)
{
-#if IS_ENABLED(CONFIG_OF)
/* Unregister platform driver */
- if (!ofreg_rc)
+ if (IS_ENABLED(CONFIG_OF))
platform_driver_unregister(&crypto_safexcel);
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* Unregister PCI driver if successfully registered before */
- if (!pcireg_rc)
- pci_unregister_driver(&safexcel_pci_driver);
-#endif
+ pci_unregister_driver(&safexcel_pci_driver);
}
module_init(safexcel_init);
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..b4624b5687ce 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -17,8 +17,11 @@
#define EIP197_HIA_VERSION_BE 0xca35
#define EIP197_HIA_VERSION_LE 0x35ca
#define EIP97_VERSION_LE 0x9e61
+#define EIP196_VERSION_LE 0x3bc4
#define EIP197_VERSION_LE 0x3ac5
#define EIP96_VERSION_LE 0x9f60
+#define EIP201_VERSION_LE 0x36c9
+#define EIP206_VERSION_LE 0x31ce
#define EIP197_REG_LO16(reg) (reg & 0xffff)
#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
@@ -26,12 +29,22 @@
((reg >> 4) & 0xf0) | \
((reg >> 12) & 0xf))
+/* EIP197 HIA OPTIONS ENCODING */
+#define EIP197_HIA_OPT_HAS_PE_ARB BIT(29)
+
+/* EIP206 OPTIONS ENCODING */
+#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
+
+/* EIP197 OPTIONS ENCODING */
+#define EIP197_OPT_HAS_TRC BIT(31)
+
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 18
+#define EIP197_MAX_TOKENS 19
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
+#define EIP197_MAX_RING_AIC 14
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
@@ -138,6 +151,7 @@
#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_VERSION(r) (0xe01c - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
#define EIP197_HIA_AIC_G_ACK 0xf810
@@ -157,12 +171,16 @@
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL2(n) (0x102c + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
+#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
+#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
+#define EIP197_OPTIONS 0xfff8
#define EIP197_VERSION 0xfffc
/* EIP197-specific registers, no indirection */
@@ -178,6 +196,7 @@
#define EIP197_TRC_ECCADMINSTAT 0xf0838
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
+#define EIP197_STRC_CONFIG 0xf43f0
#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n)))
#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n)))
#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n)))
@@ -213,7 +232,6 @@
/* EIP197_HIA_xDR_PROC_COUNT */
#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
-#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -228,6 +246,8 @@
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
/* EIP197_HIA_OPTIONS */
+#define EIP197_N_RINGS_OFFSET 0
+#define EIP197_N_RINGS_MASK GENMASK(3, 0)
#define EIP197_N_PES_OFFSET 4
#define EIP197_N_PES_MASK GENMASK(4, 0)
#define EIP97_N_PES_MASK GENMASK(2, 0)
@@ -237,13 +257,13 @@
#define EIP197_CFSIZE_OFFSET 9
#define EIP197_CFSIZE_ADJUST 4
#define EIP97_CFSIZE_OFFSET 8
-#define EIP197_CFSIZE_MASK GENMASK(3, 0)
-#define EIP97_CFSIZE_MASK GENMASK(4, 0)
+#define EIP197_CFSIZE_MASK GENMASK(2, 0)
+#define EIP97_CFSIZE_MASK GENMASK(3, 0)
#define EIP197_RFSIZE_OFFSET 12
#define EIP197_RFSIZE_ADJUST 4
#define EIP97_RFSIZE_OFFSET 12
-#define EIP197_RFSIZE_MASK GENMASK(3, 0)
-#define EIP97_RFSIZE_MASK GENMASK(4, 0)
+#define EIP197_RFSIZE_MASK GENMASK(2, 0)
+#define EIP97_RFSIZE_MASK GENMASK(3, 0)
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
@@ -327,13 +347,21 @@
#define EIP197_ADDRESS_MODE BIT(8)
#define EIP197_CONTROL_MODE BIT(9)
+/* EIP197_PE_EIP96_TOKEN_CTRL2 */
+#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
+
+/* EIP197_STRC_CONFIG */
+#define EIP197_STRC_CONFIG_INIT BIT(31)
+#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
+#define EIP197_STRC_CONFIG_SMALL_REC(s) (s<<0)
+
/* EIP197_FLUE_CONFIG */
#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004
/* Context Control */
struct safexcel_context_record {
- u32 control0;
- u32 control1;
+ __le32 control0;
+ __le32 control1;
__le32 data[40];
} __packed;
@@ -358,10 +386,14 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 (0x8 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM4 (0xd << 17)
+#define CONTEXT_CONTROL_DIGEST_INITIAL (0x0 << 21)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CRC32 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
@@ -371,17 +403,25 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM3 (0x7 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256 (0xb << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224 (0xc << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512 (0xd << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384 (0xe << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_POLY1305 (0xf << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
/* control1 */
#define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0)
+#define CONTEXT_CONTROL_CHACHA20_MODE_256_32 (2 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17))
+#define CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK (12 << 0)
#define CONTEXT_CONTROL_IV0 BIT(5)
#define CONTEXT_CONTROL_IV1 BIT(6)
#define CONTEXT_CONTROL_IV2 BIT(7)
@@ -394,6 +434,13 @@ struct safexcel_context_record {
#define EIP197_XCM_MODE_GCM 1
#define EIP197_XCM_MODE_CCM 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP_GMAC 3
+#define EIP197_AEAD_IPSEC_IV_SIZE 8
+#define EIP197_AEAD_IPSEC_NONCE_SIZE 4
+#define EIP197_AEAD_IPSEC_COUNTER_SIZE 4
+#define EIP197_AEAD_IPSEC_CCM_NONCE_SIZE 3
+
/* The hash counter given to the engine in the context has a granularity of
* 64 bits.
*/
@@ -423,6 +470,8 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
+#define EIP197_MIN_DSIZE 1024
+#define EIP197_MIN_ASIZE 8
#define EIP197_CS_TRC_REC_WC 64
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
@@ -447,7 +496,7 @@ struct result_data_desc {
u16 application_id;
u16 rsvd1;
- u32 rsvd2;
+ u32 rsvd2[5];
} __packed;
@@ -465,16 +514,15 @@ struct safexcel_result_desc {
u32 data_lo;
u32 data_hi;
-
- struct result_data_desc result_data;
} __packed;
/*
* The EIP(1)97 only needs to fetch the descriptor part of
* the result descriptor, not the result token part!
*/
-#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\
- sizeof(struct result_data_desc)) /\
+#define EIP197_RD64_FETCH_SIZE (sizeof(struct safexcel_result_desc) /\
+ sizeof(u32))
+#define EIP197_RD64_RESULT_SIZE (sizeof(struct result_data_desc) /\
sizeof(u32))
struct safexcel_token {
@@ -561,6 +609,9 @@ struct safexcel_command_desc {
struct safexcel_control_data_desc control_data;
} __packed;
+#define EIP197_CD64_FETCH_SIZE (sizeof(struct safexcel_command_desc) /\
+ sizeof(u32))
+
/*
* Internal structures & functions
*/
@@ -604,6 +655,7 @@ struct safexcel_config {
u32 rd_size;
u32 rd_offset;
+ u32 res_offset;
};
struct safexcel_work_data {
@@ -654,6 +706,12 @@ enum safexcel_eip_version {
/* Priority we use for advertising our algorithms */
#define SAFEXCEL_CRA_PRIORITY 300
+/* SM3 digest result for zero length message */
+#define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \
+ "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \
+ "\x22\xBE\xC8\xC7\x28\xFE\xFB\x74" \
+ "\x7E\xD0\x35\xEB\x50\x82\xAA\x2B"
+
/* EIP algorithm presence flags */
enum safexcel_eip_algorithms {
SAFEXCEL_ALG_BC0 = BIT(5),
@@ -697,16 +755,23 @@ struct safexcel_register_offsets {
enum safexcel_flags {
EIP197_TRC_CACHE = BIT(0),
SAFEXCEL_HW_EIP197 = BIT(1),
+ EIP197_PE_ARB = BIT(2),
+ EIP197_ICE = BIT(3),
+ EIP197_SIMPLE_TRC = BIT(4),
};
struct safexcel_hwconfig {
enum safexcel_eip_algorithms algo_flags;
int hwver;
int hiaver;
+ int ppver;
int pever;
int hwdataw;
int hwcfsize;
int hwrfsize;
+ int hwnumpes;
+ int hwnumrings;
+ int hwnumraic;
};
struct safexcel_crypto_priv {
@@ -778,7 +843,7 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc);
+ void *rdp);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
@@ -853,5 +918,43 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
+extern struct safexcel_alg_template safexcel_alg_crc32;
+extern struct safexcel_alg_template safexcel_alg_cbcmac;
+extern struct safexcel_alg_template safexcel_alg_xcbcmac;
+extern struct safexcel_alg_template safexcel_alg_cmac;
+extern struct safexcel_alg_template safexcel_alg_chacha20;
+extern struct safexcel_alg_template safexcel_alg_chachapoly;
+extern struct safexcel_alg_template safexcel_alg_chachapoly_esp;
+extern struct safexcel_alg_template safexcel_alg_sm3;
+extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
+extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
+extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_rfc4106_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4543_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4309_ccm;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index ef51f8c2b473..c02995694b41 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -5,18 +5,22 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/chacha.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
+#include <crypto/poly1305.h>
#include <crypto/sha.h>
+#include <crypto/sm3.h>
+#include <crypto/sm4.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -33,6 +37,8 @@ enum safexcel_cipher_alg {
SAFEXCEL_DES,
SAFEXCEL_3DES,
SAFEXCEL_AES,
+ SAFEXCEL_CHACHA20,
+ SAFEXCEL_SM4,
};
struct safexcel_cipher_ctx {
@@ -41,8 +47,8 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- bool aead;
- int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
__le32 key[16];
u32 nonce;
@@ -51,10 +57,11 @@ struct safexcel_cipher_ctx {
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
struct crypto_cipher *hkaes;
+ struct crypto_aead *fback;
};
struct safexcel_cipher_req {
@@ -70,24 +77,50 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
{
u32 block_sz = 0;
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20 ||
+ ctx->xcm == EIP197_XCM_MODE_CCM) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(1);
+ }
return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+ } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
+ (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 96 bit IV part */
memcpy(&cdesc->control_data.token[0], iv, 12);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ *(__be32 *)&cdesc->control_data.token[3] =
+ cpu_to_be32(1);
+ }
+
+ return;
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+ /* 96 bit nonce part */
+ memcpy(&cdesc->control_data.token[0], &iv[4], 12);
+ /* 32 bit counter */
+ cdesc->control_data.token[3] = *(u32 *)iv;
return;
} else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
@@ -112,10 +145,16 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
+ case SAFEXCEL_SM4:
+ block_sz = SM4_BLOCK_SIZE;
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
case SAFEXCEL_AES:
block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
+ default:
+ break;
}
memcpy(cdesc->control_data.token, iv, block_sz);
}
@@ -153,72 +192,84 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
if (direction == SAFEXCEL_ENCRYPT) {
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 13);
+ EIP197_MAX_TOKENS - 14);
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
} else {
cryptlen -= digestsize;
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
+ EIP197_MAX_TOKENS - 15);
- token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[13].packet_length = digestsize |
+ token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ token[14].packet_length = digestsize |
EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
- if (likely(cryptlen)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[10].packet_length = cryptlen;
- token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[10].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[6].packet_length = assoclen;
+ token[6].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[11].packet_length = cryptlen;
+ token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ /* Do not send to crypt engine in case of GMAC */
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
} else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
}
if (!ctx->xcm)
return;
- token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[8].packet_length = 0;
- token[8].instructions = AES_BLOCK_SIZE;
+ token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ token[9].packet_length = 0;
+ token[9].instructions = AES_BLOCK_SIZE;
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[9].packet_length = AES_BLOCK_SIZE;
- token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[10].packet_length = AES_BLOCK_SIZE;
+ token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
- if (ctx->xcm == EIP197_XCM_MODE_GCM) {
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
- } else {
+ if (ctx->xcm != EIP197_XCM_MODE_GCM) {
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
u8 *cbcmaciv = (u8 *)&token[1];
- u32 *aadlen = (u32 *)&token[5];
+ __le32 *aadlen = (__le32 *)&token[5];
/* Construct IV block B0 for the CBC-MAC */
token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -227,17 +278,18 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
EIP197_TOKEN_INS_TYPE_HASH;
/* Variable length IV part */
- memcpy(cbcmaciv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
/* fixup flags byte */
cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
/* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
/* insert lower 2 bytes of message length */
cbcmaciv[14] = cryptlen >> 8;
cbcmaciv[15] = cryptlen & 255;
if (assoclen) {
- *aadlen = cpu_to_le32(cpu_to_be16(assoclen));
+ *aadlen = cpu_to_le32((assoclen >> 8) |
+ ((assoclen & 0xff) << 8));
assoclen += 2;
}
@@ -252,13 +304,13 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
/* Align crypto data towards hash engine */
- token[10].stat = 0;
+ token[11].stat = 0;
- token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
cryptlen &= 15;
- token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
+ token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[7].instructions = EIP197_TOKEN_INS_LAST |
@@ -284,7 +336,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -309,25 +361,29 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
- int err = -EINVAL;
+ int err = -EINVAL, i;
- if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+ if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
- /* Minimum keysize is minimum AES key size + nonce size */
- if (keys.enckeylen < (AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE))
+ /* Must have at least space for the nonce here */
+ if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
- keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
}
/* Encryption key */
switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
+ if (unlikely(err))
+ goto badkey_expflags;
+ break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
@@ -338,14 +394,24 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
if (unlikely(err))
goto badkey;
break;
+ case SAFEXCEL_SM4:
+ if (unlikely(keys.enckeylen != SM4_KEY_SIZE))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
goto badkey;
}
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
- memcmp(ctx->key, keys.enckey, keys.enckeylen))
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++) {
+ if (le32_to_cpu(ctx->key[i]) !=
+ ((u32 *)keys.enckey)[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+ }
+ }
/* Auth key */
switch (ctx->hash_alg) {
@@ -374,6 +440,11 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
+ if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -388,7 +459,8 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->base.needs_inv = true;
/* Now copy the keys into the context */
- memcpy(ctx->key, keys.enckey, keys.enckeylen);
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
+ ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
memcpy(ctx->ipad, &istate.state, ctx->state_sz);
@@ -423,6 +495,17 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
CONTEXT_CONTROL_DIGEST_XCM |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* Chacha20-Poly1305 */
+ cdesc->control_data.control0 =
+ CONTEXT_CONTROL_KEY_EN |
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 |
+ (sreq->direction == SAFEXCEL_ENCRYPT ?
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT :
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) |
+ ctx->hash_alg |
+ CONTEXT_CONTROL_SIZE(ctrl_size);
+ return 0;
} else {
ctrl_size += ctx->state_sz / sizeof(u32) * 2;
cdesc->control_data.control0 =
@@ -431,17 +514,21 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
}
- if (sreq->direction == SAFEXCEL_ENCRYPT)
- cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
- CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ if (sreq->direction == SAFEXCEL_ENCRYPT &&
+ (ctx->xcm == EIP197_XCM_MODE_CCM ||
+ ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC))
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT;
+ else if (sreq->direction == SAFEXCEL_ENCRYPT)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ else if (ctx->xcm == EIP197_XCM_MODE_CCM)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN;
else
cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
- CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
} else {
if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 =
@@ -480,6 +567,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->key_len >> ctx->xts);
return -EINVAL;
}
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20;
+ } else if (ctx->alg == SAFEXCEL_SM4) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_SM4;
}
return 0;
@@ -1295,7 +1388,7 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1451,13 +1544,11 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma) {
+ if (ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
- }
memcpy(ctx->key, key, len);
-
ctx->key_len = len;
return 0;
@@ -1777,6 +1868,312 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
},
};
+static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha1_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha1_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1972,7 +2369,7 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1991,8 +2388,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i + keylen / sizeof(u32)] !=
- cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) !=
+ aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2082,7 +2479,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2109,7 +2506,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
+ if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2199,7 +2596,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2303,3 +2700,938 @@ struct safexcel_alg_template safexcel_alg_ccm = {
},
},
};
+
+static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
+ const u8 *key)
+{
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, CHACHA_KEY_SIZE);
+ ctx->key_len = CHACHA_KEY_SIZE;
+}
+
+static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_chacha20 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_chacha20_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "safexcel-chacha20",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_chacha20_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm);
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP &&
+ len > EIP197_AEAD_IPSEC_NONCE_SIZE) {
+ /* ESP variant has nonce appended to key */
+ len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
+ ctx->nonce = *(u32 *)(key + len);
+ }
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+ u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1];
+ int ret = 0;
+
+ /*
+ * Instead of wasting time detecting umpteen silly corner cases,
+ * just dump all "small" requests to the fallback implementation.
+ * HW would not be faster on such small requests anyway.
+ */
+ if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP ||
+ req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) &&
+ req->cryptlen > POLY1305_DIGEST_SIZE)) {
+ return safexcel_queue_req(&req->base, creq, dir);
+ }
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ memcpy(key, ctx->key, CHACHA_KEY_SIZE);
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* ESP variant has nonce appended to the key */
+ key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce;
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE +
+ EIP197_AEAD_IPSEC_NONCE_SIZE);
+ } else {
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE);
+ }
+ if (ret) {
+ crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) &
+ CRYPTO_TFM_REQ_MASK);
+ return ret;
+ }
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_chachapoly_encrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_chachapoly_decrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *aead = __crypto_aead_cast(tfm);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fback)));
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
+ CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
+ ctx->state_sz = 0; /* Precomputed by HW */
+ return 0;
+}
+
+static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->fback);
+ safexcel_aead_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapoly_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_chachapoly_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305-esp",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapolyesp_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (len != SM4_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, SM4_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, SM4_KEY_SIZE);
+ ctx->key_len = SM4_KEY_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm4_blk_encrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_sm4_blk_decrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "safexcel-ecb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ecb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "safexcel-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cbc_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ofb(sm4)",
+ .cra_driver_name = "safexcel-ofb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cfb(sm4)",
+ .cra_driver_name = "safexcel-cfb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+ /* exclude the nonce here */
+ len -= CTR_RFC3686_NONCE_SIZE;
+
+ return safexcel_skcipher_sm4_setkey(ctfm, key, len);
+}
+
+static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4ctr_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ /* Add nonce size */
+ .min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .base = {
+ .cra_name = "rfc3686(ctr(sm4))",
+ .cra_driver_name = "safexcel-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ctr_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->state_sz = SHA1_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_sm4_blk_encrypt,
+ .decrypt = safexcel_aead_sm4_blk_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?:
+ safexcel_aead_setkey(ctfm, key, len);
+}
+
+static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setauthsize(ctx->fback, authsize);
+}
+
+static int safexcel_aead_fallback_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen || req->assoclen) /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen)
+ /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ ctx->state_sz = SM3_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_fallback_setkey,
+ .setauthsize = safexcel_aead_fallback_setauthsize,
+ .encrypt = safexcel_aead_sm4cbc_sm3_encrypt,
+ .decrypt = safexcel_aead_sm4cbc_sm3_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sm3_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sha1_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sm3_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sm3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+
+ len -= CTR_RFC3686_NONCE_SIZE;
+ return safexcel_aead_gcm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int safexcel_rfc4106_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_encrypt(req);
+}
+
+static int safexcel_rfc4106_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_decrypt(req);
+}
+
+static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4106_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4106-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4106_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != GHASH_DIGEST_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4543_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4543_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4543-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4543_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */
+ *(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1;
+ /* last 3 bytes of key are the nonce! */
+ memcpy((u8 *)&ctx->nonce + 1, key + len -
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE,
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE);
+
+ len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE;
+ return safexcel_aead_ccm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ /* Borrowed from crypto/ccm.c */
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_ccm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
+ .alg.aead = {
+ .setkey = safexcel_rfc4309_ccm_setkey,
+ .setauthsize = safexcel_rfc4309_ccm_setauthsize,
+ .encrypt = safexcel_rfc4309_ccm_encrypt,
+ .decrypt = safexcel_rfc4309_ccm_decrypt,
+ .ivsize = EIP197_AEAD_IPSEC_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "safexcel-rfc4309-ccm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4309_ccm_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2effb6d21e8b..2134daef24f6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -5,9 +5,13 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <crypto/aes.h>
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
+#include <crypto/sha3.h>
+#include <crypto/skcipher.h>
+#include <crypto/sm3.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
@@ -19,9 +23,19 @@ struct safexcel_ahash_ctx {
struct safexcel_crypto_priv *priv;
u32 alg;
-
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 key_sz;
+ bool cbcmac;
+ bool do_fallback;
+ bool fb_init_done;
+ bool fb_do_setkey;
+
+ __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+ __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+
+ struct crypto_cipher *kaes;
+ struct crypto_ahash *fback;
+ struct crypto_shash *shpre;
+ struct shash_desc *shdesc;
};
struct safexcel_ahash_req {
@@ -31,6 +45,8 @@ struct safexcel_ahash_req {
bool needs_inv;
bool hmac_zlen;
bool len_is_le;
+ bool not_first;
+ bool xcbcmac;
int nents;
dma_addr_t result_dma;
@@ -39,7 +55,9 @@ struct safexcel_ahash_req {
u8 state_sz; /* expected state size, only set once */
u8 block_sz; /* block size, only set once */
- u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u8 digest_sz; /* output digest size, only set once */
+ __le32 state[SHA3_512_BLOCK_SIZE /
+ sizeof(__le32)] __aligned(sizeof(__le32));
u64 len;
u64 processed;
@@ -57,21 +75,31 @@ static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
- u32 input_length, u32 result_length)
+ u32 input_length, u32 result_length,
+ bool cbcmac)
{
struct safexcel_token *token =
(struct safexcel_token *)cdesc->control_data.token;
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = input_length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[1].packet_length = result_length;
- token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ input_length &= 15;
+ if (unlikely(cbcmac && input_length)) {
+ token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[1].packet_length = 16 - input_length;
+ token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ } else {
+ token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ }
+
+ token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[2].packet_length = result_length;
+ token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
}
@@ -82,29 +110,48 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_crypto_priv *priv = ctx->priv;
u64 count = 0;
- cdesc->control_data.control0 |= ctx->alg;
+ cdesc->control_data.control0 = ctx->alg;
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (!req->processed) {
- /* First - and possibly only - block of basic hash only */
- if (req->finish) {
+ if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
+ if (req->xcbcmac)
+ memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
+ else
+ memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
+
+ if (!req->finish && req->xcbcmac)
cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_NO_FINISH_HASH |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ else
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ return;
+ } else if (!req->processed) {
+ /* First - and possibly only - block of basic hash only */
+ if (req->finish)
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- } else {
- cdesc->control_data.control0 |=
+ else
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
CONTEXT_CONTROL_NO_FINISH_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- }
return;
}
@@ -204,7 +251,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
}
if (sreq->result_dma) {
- dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+ dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
DMA_FROM_DEVICE);
sreq->result_dma = 0;
}
@@ -223,7 +270,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
- memcpy(sreq->state, ctx->opad, sreq->state_sz);
+ memcpy(sreq->state, ctx->opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
@@ -238,8 +285,14 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
+ if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
+ /* Undo final XOR with 0xffffffff ...*/
+ *(__le32 *)areq->result = ~sreq->state[0];
+ } else {
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
+ }
}
cache_len = safexcel_queued_len(sreq);
@@ -261,10 +314,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra = 0, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
+ u64 queued, len;
- queued = len = safexcel_queued_len(req);
+ queued = safexcel_queued_len(req);
if (queued <= HASH_CACHE_SIZE)
cache_len = queued;
else
@@ -287,15 +340,52 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
areq->nbytes - extra);
queued -= extra;
- len -= extra;
if (!queued) {
*commands = 0;
*results = 0;
return 0;
}
+
+ extra = 0;
}
+ if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
+ if (unlikely(cache_len < AES_BLOCK_SIZE)) {
+ /*
+ * Cache contains less than 1 full block, complete.
+ */
+ extra = AES_BLOCK_SIZE - cache_len;
+ if (queued > cache_len) {
+ /* More data follows: borrow bytes */
+ u64 tmp = queued - cache_len;
+
+ skip = min_t(u64, tmp, extra);
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ req->cache + cache_len,
+ skip, 0);
+ }
+ extra -= skip;
+ memset(req->cache + cache_len + skip, 0, extra);
+ if (!ctx->cbcmac && extra) {
+ // 10- padding for XCBCMAC & CMAC
+ req->cache[cache_len + skip] = 0x80;
+ // HW will use K2 iso K3 - compensate!
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)req->cache)[i] ^=
+ cpu_to_be32(le32_to_cpu(
+ ctx->ipad[i] ^ ctx->ipad[i + 4]));
+ }
+ cache_len = AES_BLOCK_SIZE;
+ queued = queued + extra;
+ }
+
+ /* XCBC continue: XOR previous result into 1st word */
+ crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
+ }
+
+ len = queued;
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
@@ -306,8 +396,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- req->cache_dma, cache_len, len,
- ctx->base.ctxr_dma);
+ req->cache_dma, cache_len,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -319,10 +409,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
goto send_command;
}
- /* Skip descriptor generation for zero-length requests */
- if (!areq->nbytes)
- goto send_command;
-
/* Now handle the current ahash request buffer(s) */
req->nents = dma_map_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src,
@@ -336,26 +422,34 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
+ if (unlikely(sglen <= skip)) {
+ skip -= sglen;
+ continue;
+ }
+
/* Do not overflow the request */
- if (queued < sglen)
+ if ((queued + skip) <= sglen)
sglen = queued;
+ else
+ sglen -= skip;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
- sg_dma_address(sg),
- sglen, len, ctx->base.ctxr_dma);
+ sg_dma_address(sg) + skip, sglen,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
}
- n_cdesc++;
- if (n_cdesc == 1)
+ if (!n_cdesc)
first_cdesc = cdesc;
+ n_cdesc++;
queued -= sglen;
if (!queued)
break;
+ skip = 0;
}
send_command:
@@ -363,9 +457,9 @@ send_command:
safexcel_context_control(ctx, req, first_cdesc);
/* Add the token */
- safexcel_hash_token(first_cdesc, len, req->state_sz);
+ safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
- req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+ req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
@@ -374,7 +468,7 @@ send_command:
/* Add a result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
- req->state_sz);
+ req->digest_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
goto unmap_result;
@@ -382,17 +476,20 @@ send_command:
safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
+ req->processed += len - extra;
*commands = n_cdesc;
*results = 1;
return 0;
unmap_result:
- dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+ dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
DMA_FROM_DEVICE);
unmap_sg:
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ if (req->nents) {
+ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ req->nents = 0;
+ }
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
@@ -590,16 +687,12 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (ctx->base.ctxr) {
if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
- req->processed &&
- (/* invalidate for basic hash continuation finish */
- (req->finish &&
- (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
+ /* invalidate for *any* non-XCBC continuation */
+ ((req->not_first && !req->xcbcmac) ||
/* invalidate if (i)digest changed */
memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
- /* invalidate for HMAC continuation finish */
- (req->finish && (req->processed != req->block_sz)) ||
/* invalidate for HMAC finish with odigest changed */
- (req->finish &&
+ (req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
ctx->opad, req->state_sz))))
/*
@@ -622,6 +715,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (!ctx->base.ctxr)
return -ENOMEM;
}
+ req->not_first = true;
ring = ctx->base.ring;
@@ -691,8 +785,34 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
memcpy(areq->result, sha512_zero_message_hash,
SHA512_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
+ memcpy(areq->result,
+ EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
+ }
return 0;
+ } else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
+ req->len == sizeof(u32) && !areq->nbytes)) {
+ /* Zero length CRC32 */
+ memcpy(areq->result, ctx->ipad, sizeof(u32));
+ return 0;
+ } else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length CBC MAC */
+ memset(areq->result, 0, AES_BLOCK_SIZE);
+ return 0;
+ } else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length (X)CBC/CMAC */
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)areq->result)[i] =
+ cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
+ areq->result[0] ^= 0x80; // 10- padding
+ crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
!areq->nbytes)) {
@@ -792,6 +912,7 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
+ ctx->fb_do_setkey = false;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -808,6 +929,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
return 0;
@@ -889,6 +1011,7 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
req->hmac = true;
@@ -1125,6 +1248,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1180,6 +1304,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1248,6 +1373,7 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1318,6 +1444,7 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1375,6 +1502,7 @@ static int safexcel_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1430,6 +1558,7 @@ static int safexcel_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1498,6 +1627,7 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1568,6 +1698,7 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1625,6 +1756,7 @@ static int safexcel_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
return 0;
@@ -1686,6 +1818,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
req->len_is_le = true; /* MD5 is little endian! ... */
req->hmac = true;
@@ -1740,3 +1873,1247 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
},
};
+
+static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret = safexcel_ahash_cra_init(tfm);
+
+ /* Default 'key' is all zeroes */
+ memset(ctx->ipad, 0, sizeof(u32));
+ return ret;
+}
+
+static int safexcel_crc32_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded key */
+ req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = sizeof(u32);
+ req->processed = sizeof(u32);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = sizeof(u32);
+ req->digest_sz = sizeof(u32);
+ req->block_sz = sizeof(u32);
+
+ return 0;
+}
+
+static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+ if (keylen != sizeof(u32)) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->ipad, key, sizeof(u32));
+ return 0;
+}
+
+static int safexcel_crc32_digest(struct ahash_request *areq)
+{
+ return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_crc32 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_crc32_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_crc32_digest,
+ .setkey = safexcel_crc32_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = sizeof(u32),
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "safexcel-crc32",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_crc32_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cbcmac_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded keys */
+ memcpy(req->state, ctx->ipad, ctx->key_sz);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = AES_BLOCK_SIZE;
+ req->processed = AES_BLOCK_SIZE;
+
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = ctx->key_sz;
+ req->digest_sz = AES_BLOCK_SIZE;
+ req->block_sz = AES_BLOCK_SIZE;
+ req->xcbcmac = true;
+
+ return 0;
+}
+
+static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = true;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_cbcmac_digest(struct ahash_request *areq)
+{
+ return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_cbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cbcmac(aes)",
+ .cra_driver_name = "safexcel-cbcmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ /* precompute the XCBC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] =
+ cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
+
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+ ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
+ return PTR_ERR_OR_ZERO(ctx->kaes);
+}
+
+static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(ctx->kaes);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_xcbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_xcbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "safexcel-xcbc-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ __be64 consts[4];
+ u64 _const[2];
+ u8 msb_mask, gfmask;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] =
+ cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
+
+ /* precompute the CMAC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ /* code below borrowed from crypto/cmac.c */
+ /* encrypt the zero block */
+ memset(consts, 0, AES_BLOCK_SIZE);
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+
+ gfmask = 0x87;
+ _const[0] = be64_to_cpu(consts[1]);
+ _const[1] = be64_to_cpu(consts[0]);
+
+ /* gf(2^128) multiply zero-ciphertext with u and u^2 */
+ for (i = 0; i < 4; i += 2) {
+ msb_mask = ((s64)_const[1] >> 63) & gfmask;
+ _const[1] = (_const[1] << 1) | (_const[0] >> 63);
+ _const[0] = (_const[0] << 1) ^ msb_mask;
+
+ consts[i + 0] = cpu_to_be64(_const[1]);
+ consts[i + 1] = cpu_to_be64(_const[0]);
+ }
+ /* end of code borrowed from crypto/cmac.c */
+
+ for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "safexcel-cmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sm3_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "safexcel-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
+ SM3_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from ipad precompute */
+ memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
+ /* Already processed the key^ipad part now! */
+ req->len = SM3_BLOCK_SIZE;
+ req->processed = SM3_BLOCK_SIZE;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+ req->hmac = true;
+
+ return 0;
+}
+
+static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sm3_digest,
+ .setkey = safexcel_hmac_sm3_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "safexcel-hmac-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_224_DIGEST_SIZE;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_fbcheck(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ ahash_request_set_tfm(subreq, ctx->fback);
+ ahash_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(subreq, req->src, req->result,
+ req->nbytes);
+ if (!ctx->fb_init_done) {
+ if (ctx->fb_do_setkey) {
+ /* Set fallback cipher HMAC key */
+ u8 key[SHA3_224_BLOCK_SIZE];
+
+ memcpy(key, ctx->ipad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ memcpy(key +
+ crypto_ahash_blocksize(ctx->fback) / 2,
+ ctx->opad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ ret = crypto_ahash_setkey(ctx->fback, key,
+ crypto_ahash_blocksize(ctx->fback));
+ memzero_explicit(key,
+ crypto_ahash_blocksize(ctx->fback));
+ ctx->fb_do_setkey = false;
+ }
+ ret = ret ?: crypto_ahash_init(subreq);
+ ctx->fb_init_done = true;
+ }
+ }
+ return ret;
+}
+
+static int safexcel_sha3_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
+}
+
+static int safexcel_sha3_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
+}
+
+static int safexcel_sha3_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback |= !req->nbytes;
+ if (ctx->do_fallback)
+ /* Update or ex/import happened or len 0, cannot use the HW */
+ return safexcel_sha3_fbcheck(req) ?:
+ crypto_ahash_finup(subreq);
+ else
+ return safexcel_ahash_finup(req);
+}
+
+static int safexcel_sha3_digest_fallback(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ ctx->fb_init_done = false;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
+}
+
+static int safexcel_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_sha3_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
+}
+
+static int safexcel_sha3_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
+ // return safexcel_ahash_import(req, in);
+}
+
+static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ /* Update statesize from fallback algorithm! */
+ crypto_hash_alg_common(ahash)->statesize =
+ crypto_ahash_statesize(ctx->fback);
+ crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ctx->fback)));
+ return 0;
+}
+
+static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_224_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "safexcel-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_256_DIGEST_SIZE;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_256_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "safexcel-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_384_DIGEST_SIZE;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_384_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "safexcel-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_512_DIGEST_SIZE;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_512_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "safexcel-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_sha3_cra_init(tfm);
+ if (ret)
+ return ret;
+
+ /* Allocate precalc basic digest implementation */
+ ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shpre))
+ return PTR_ERR(ctx->shpre);
+
+ ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
+ crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
+ if (!ctx->shdesc) {
+ crypto_free_shash(ctx->shpre);
+ return -ENOMEM;
+ }
+ ctx->shdesc->tfm = ctx->shpre;
+ return 0;
+}
+
+static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ crypto_free_shash(ctx->shpre);
+ kfree(ctx->shdesc);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret = 0;
+
+ if (keylen > crypto_ahash_blocksize(tfm)) {
+ /*
+ * If the key is larger than the blocksize, then hash it
+ * first using our fallback cipher
+ */
+ ret = crypto_shash_digest(ctx->shdesc, key, keylen,
+ (u8 *)ctx->ipad);
+ keylen = crypto_shash_digestsize(ctx->shpre);
+
+ /*
+ * If the digest is larger than half the blocksize, we need to
+ * move the rest to opad due to the way our HMAC infra works.
+ */
+ if (keylen > crypto_ahash_blocksize(tfm) / 2)
+ /* Buffers overlap, need to use memmove iso memcpy! */
+ memmove(ctx->opad,
+ (u8 *)ctx->ipad +
+ crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ /*
+ * Copy the key to our ipad & opad buffers
+ * Note that ipad and opad each contain one half of the key,
+ * to match the existing HMAC driver infrastructure.
+ */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memcpy(ctx->ipad, key, keylen);
+ } else {
+ memcpy(ctx->ipad, key,
+ crypto_ahash_blocksize(tfm) / 2);
+ memcpy(ctx->opad,
+ key + crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ }
+ }
+
+ /* Pad key with zeroes */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memset((u8 *)ctx->ipad + keylen, 0,
+ crypto_ahash_blocksize(tfm) / 2 - keylen);
+ memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ memset((u8 *)ctx->opad + keylen -
+ crypto_ahash_blocksize(tfm) / 2, 0,
+ crypto_ahash_blocksize(tfm) - keylen);
+ }
+
+ /* If doing fallback, still need to set the new key! */
+ ctx->fb_do_setkey = true;
+ return ret;
+}
+
+static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_224_BLOCK_SIZE;
+ req->processed = SHA3_224_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_224_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_224_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_224_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-224)",
+ .cra_driver_name = "safexcel-hmac-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_224_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_256_BLOCK_SIZE;
+ req->processed = SHA3_256_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_256_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_256_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_256_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-256)",
+ .cra_driver_name = "safexcel-hmac-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_256_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_384_BLOCK_SIZE;
+ req->processed = SHA3_384_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_384_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_384_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_384_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-384)",
+ .cra_driver_name = "safexcel-hmac-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_384_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_512_BLOCK_SIZE;
+ req->processed = SHA3_512_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_512_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_512_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
+}
+struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_512_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-512)",
+ .cra_driver_name = "safexcel-hmac-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_512_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 0f269b89cfd4..9237ba745c2f 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,7 +14,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
- cdr->offset = sizeof(u32) * priv->config.cd_offset;
+ cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
- rdr->offset = sizeof(u32) * priv->config.rd_offset;
+ rdr->offset = priv->config.rd_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -180,6 +180,7 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->first_seg = first;
rdesc->last_seg = last;
+ rdesc->result_size = EIP197_RD64_RESULT_SIZE;
rdesc->particle_size = len;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9181523ba760..391e3b4df364 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -23,6 +23,7 @@
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -137,7 +138,7 @@ struct crypt_ctl {
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
@@ -186,7 +187,7 @@ struct ixp_ctx {
};
struct ixp_alg {
- struct crypto_alg crypto;
+ struct skcipher_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
@@ -239,17 +240,17 @@ static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
}
static int setup_crypt_desc(void)
@@ -378,8 +379,8 @@ static void one_packet(dma_addr_t phys)
break;
}
case CTL_FLAG_PERFORM_ABLK: {
- struct ablkcipher_request *req = crypt->data.ablk_req;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = crypt->data.ablk_req;
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
if (req_ctx->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
@@ -571,10 +572,10 @@ static int init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static int init_tfm_ablk(struct crypto_tfm *tfm)
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
- return init_tfm(tfm);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
+ return init_tfm(crypto_skcipher_tfm(tfm));
}
static int init_tfm_aead(struct crypto_aead *tfm)
@@ -590,6 +591,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
free_sa_dir(&ctx->decrypt);
}
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+ exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
static void exit_tfm_aead(struct crypto_aead *tfm)
{
exit_tfm(crypto_aead_tfm(tfm));
@@ -809,10 +815,10 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
return buf;
}
-static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
@@ -845,17 +851,17 @@ out:
return ret;
}
-static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
ablk_setkey(tfm, key, key_len);
}
-static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
@@ -868,16 +874,16 @@ static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ablk_setkey(tfm, key, key_len);
}
-static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+static int ablk_perform(struct skcipher_request *req, int encrypt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
@@ -902,8 +908,8 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
- BUG_ON(ivsize && !req->info);
- memcpy(crypt->iv, req->info, ivsize);
+ BUG_ON(ivsize && !req->iv);
+ memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
@@ -941,22 +947,22 @@ free_buf_dest:
return -ENOMEM;
}
-static int ablk_encrypt(struct ablkcipher_request *req)
+static int ablk_encrypt(struct skcipher_request *req)
{
return ablk_perform(req, 1);
}
-static int ablk_decrypt(struct ablkcipher_request *req)
+static int ablk_decrypt(struct skcipher_request *req)
{
return ablk_perform(req, 0);
}
-static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
- u8 *info = req->info;
+ u8 *info = req->iv;
int ret;
/* set up counter block */
@@ -967,9 +973,9 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- req->info = iv;
+ req->iv = iv;
ret = ablk_perform(req, 1);
- req->info = info;
+ req->iv = info;
return ret;
}
@@ -1212,107 +1218,91 @@ static int aead_decrypt(struct aead_request *req)
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
- .cra_name = "cbc(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
- .cra_name = "ecb(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
- .cra_name = "ctr(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_rfc3686_setkey,
- .encrypt = ablk_rfc3686_crypt,
- .decrypt = ablk_rfc3686_crypt }
- }
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_rfc3686_setkey,
+ .encrypt = ablk_rfc3686_crypt,
+ .decrypt = ablk_rfc3686_crypt,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
@@ -1421,10 +1411,10 @@ static int __init ixp_module_init(void)
return err;
}
for (i=0; i< num; i++) {
- struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
+ struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
- if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s"IXP_POSTFIX, cra->cra_name) >=
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
@@ -1434,26 +1424,24 @@ static int __init ixp_module_init(void)
}
/* block ciphers */
- cra->cra_type = &crypto_ablkcipher_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- if (!cra->cra_ablkcipher.setkey)
- cra->cra_ablkcipher.setkey = ablk_setkey;
- if (!cra->cra_ablkcipher.encrypt)
- cra->cra_ablkcipher.encrypt = ablk_encrypt;
- if (!cra->cra_ablkcipher.decrypt)
- cra->cra_ablkcipher.decrypt = ablk_decrypt;
- cra->cra_init = init_tfm_ablk;
-
- cra->cra_ctxsize = sizeof(struct ixp_ctx);
- cra->cra_module = THIS_MODULE;
- cra->cra_alignmask = 3;
- cra->cra_priority = 300;
- cra->cra_exit = exit_tfm;
- if (crypto_register_alg(cra))
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ if (!cra->setkey)
+ cra->setkey = ablk_setkey;
+ if (!cra->encrypt)
+ cra->encrypt = ablk_encrypt;
+ if (!cra->decrypt)
+ cra->decrypt = ablk_decrypt;
+ cra->init = init_tfm_ablk;
+ cra->exit = exit_tfm_ablk;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+ if (crypto_register_skcipher(cra))
printk(KERN_ERR "Failed to register '%s'\n",
- cra->cra_name);
+ cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
}
@@ -1504,7 +1492,7 @@ static void __exit ixp_module_exit(void)
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
- crypto_unregister_alg(&ixp4xx_algos[i].crypto);
+ crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index d63a6ee905c9..f1ed3b85c0d2 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -232,13 +232,13 @@ struct mv_cesa_sec_accel_desc {
};
/**
- * struct mv_cesa_blkcipher_op_ctx - cipher operation context
+ * struct mv_cesa_skcipher_op_ctx - cipher operation context
* @key: cipher key
* @iv: cipher IV
*
* Context associated to a cipher operation.
*/
-struct mv_cesa_blkcipher_op_ctx {
+struct mv_cesa_skcipher_op_ctx {
u32 key[8];
u32 iv[4];
};
@@ -265,7 +265,7 @@ struct mv_cesa_hash_op_ctx {
struct mv_cesa_op_ctx {
struct mv_cesa_sec_accel_desc desc;
union {
- struct mv_cesa_blkcipher_op_ctx blkcipher;
+ struct mv_cesa_skcipher_op_ctx skcipher;
struct mv_cesa_hash_op_ctx hash;
} ctx;
};
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 84ceddfee76b..d8e8c857770c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -209,7 +209,7 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
} else {
memcpy_fromio(skreq->iv,
@@ -470,7 +470,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -523,7 +523,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
@@ -575,7 +575,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -628,7 +628,7 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
@@ -694,7 +694,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
key = ctx->aes.key_enc;
for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
- tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
+ tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
if (ctx->aes.key_length == 24)
cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
@@ -755,7 +755,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90c9644fb8a8..90880a81c534 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -11,6 +11,7 @@
#include <crypto/aes.h>
#include <crypto/gcm.h>
+#include <crypto/internal/skcipher.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -414,7 +415,7 @@ exit:
static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
size_t len)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_info *info = &ctx->info;
u32 cnt = 0;
@@ -450,7 +451,7 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
return;
}
- mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+ mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
AES_BLOCK_SIZE);
ctr:
info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
@@ -552,13 +553,13 @@ static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
aes->resume = mtk_aes_transfer_complete;
- return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
+ return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
}
static inline struct mtk_aes_ctr_ctx *
@@ -571,7 +572,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct scatterlist *src, *dst;
u32 start, end, ctr, blocks;
size_t datalen;
@@ -579,11 +580,11 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Check for transfer completion. */
cctx->offset += aes->total;
- if (cctx->offset >= req->nbytes)
+ if (cctx->offset >= req->cryptlen)
return mtk_aes_transfer_complete(cryp, aes);
/* Compute data length. */
- datalen = req->nbytes - cctx->offset;
+ datalen = req->cryptlen - cctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(cctx->iv[3]);
@@ -591,7 +592,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
start = ctr;
end = start + blocks - 1;
if (end < start) {
- ctr |= 0xffffffff;
+ ctr = 0xffffffff;
datalen = AES_BLOCK_SIZE * -start;
fragmented = true;
}
@@ -620,12 +621,12 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
- memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
cctx->offset = 0;
aes->total = 0;
aes->resume = mtk_aes_ctr_transfer;
@@ -634,10 +635,10 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
}
/* Check and set the AES key to transform state buffer */
-static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
+static int mtk_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
@@ -651,7 +652,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -661,10 +662,10 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
return 0;
}
-static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
+static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct mtk_aes_reqctx *rctx;
struct mtk_cryp *cryp;
@@ -672,185 +673,168 @@ static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
if (!cryp)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
}
-static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ECB);
}
-static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
}
-static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CBC);
}
-static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
}
-static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CTR);
}
-static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
}
-static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_OFB);
}
-static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
}
-static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int mtk_aes_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_start;
return 0;
}
-static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cbc_encrypt,
- .decrypt = mtk_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cbc_encrypt,
+ .decrypt = mtk_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ecb_encrypt,
- .decrypt = mtk_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ecb_encrypt,
+ .decrypt = mtk_aes_ecb_decrypt,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_ctr_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ctr_encrypt,
- .decrypt = mtk_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ctr_encrypt,
+ .decrypt = mtk_aes_ctr_decrypt,
+ .init = mtk_aes_ctr_init_tfm,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ofb_encrypt,
- .decrypt = mtk_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ofb_encrypt,
+ .decrypt = mtk_aes_ofb_decrypt,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cfb_encrypt,
- .decrypt = mtk_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cfb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cfb_encrypt,
+ .decrypt = mtk_aes_cfb_decrypt,
},
};
@@ -1259,7 +1243,7 @@ static void mtk_aes_unregister_algs(void)
crypto_unregister_aead(&aes_gcm_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int mtk_aes_register_algs(void)
@@ -1267,7 +1251,7 @@ static int mtk_aes_register_algs(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1280,7 +1264,7 @@ static int mtk_aes_register_algs(void)
err_aes_algs:
for (; i--; )
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
return err;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bf8d2197bc11..f438b425c655 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
* Encryption (AES128)
*/
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
- struct ablkcipher_request *req, int init)
+ struct skcipher_request *req, int init)
{
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
@@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
{
struct dcp *sdcp = global_sdcp;
- struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+ struct skcipher_request *req = skcipher_request_cast(arq);
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
@@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
if (!rctx->ecb) {
/* Copy the CBC IV just past the key. */
- memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+ memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
/* CBC needs the INIT set. */
init = 1;
} else {
@@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
- limit_hit = tlen > req->nbytes;
+ limit_hit = tlen > req->cryptlen;
if (limit_hit)
- len = req->nbytes - (tlen - len);
+ len = req->cryptlen - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
/* Copy the IV for CBC for chaining */
if (!rctx->ecb) {
if (rctx->enc)
- memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
else
- memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
}
@@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data)
return 0;
}
-static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (enc)
ret = crypto_skcipher_encrypt(subreq);
@@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
return ret;
}
-static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
{
struct dcp *sdcp = global_sdcp;
struct crypto_async_request *arq = &req->base;
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
if (unlikely(actx->key_len != AES_KEYSIZE_128))
@@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
return ret;
}
-static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 1);
}
-static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 1);
}
-static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 0);
}
-static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 0);
}
-static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
- struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
unsigned int ret;
/*
@@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
return PTR_ERR(blk);
actx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
return 0;
}
-static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(actx->fallback);
}
@@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
}
/* AES 128 ECB and AES 128 CBC */
-static struct crypto_alg dcp_aes_algs[] = {
+static struct skcipher_alg dcp_aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_ecb_encrypt,
- .decrypt = mxs_dcp_aes_ecb_decrypt
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_cbc_encrypt,
- .decrypt = mxs_dcp_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
},
};
@@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
- ret = crypto_register_algs(dcp_aes_algs,
- ARRAY_SIZE(dcp_aes_algs));
+ ret = crypto_register_skciphers(dcp_aes_algs,
+ ARRAY_SIZE(dcp_aes_algs));
if (ret) {
/* Failed to register algorithm. */
dev_err(dev, "Failed to register AES crypto!\n");
@@ -1139,7 +1129,7 @@ err_unregister_sha1:
err_unregister_aes:
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
err_destroy_aes_thread:
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
@@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
crypto_unregister_ahash(&dcp_sha1_alg);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 40b81013213e..63bd565048f4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
@@ -657,7 +658,7 @@ static int n2_hmac_async_digest(struct ahash_request *req)
ctx->hash_key_len);
}
-struct n2_cipher_context {
+struct n2_skcipher_context {
int key_len;
int enc_type;
union {
@@ -683,7 +684,7 @@ struct n2_crypto_chunk {
};
struct n2_request_context {
- struct ablkcipher_walk walk;
+ struct skcipher_walk walk;
struct list_head chunk_list;
struct n2_crypto_chunk chunk;
u8 temp_iv[16];
@@ -708,29 +709,29 @@ struct n2_request_context {
* is not a valid sequence.
*/
-struct n2_cipher_alg {
+struct n2_skcipher_alg {
struct list_head entry;
u8 enc_type;
- struct crypto_alg alg;
+ struct skcipher_alg skcipher;
};
-static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct n2_cipher_alg, alg);
+ return container_of(alg, struct n2_skcipher_alg, skcipher);
}
-struct n2_cipher_request_context {
- struct ablkcipher_walk walk;
+struct n2_skcipher_request_context {
+ struct skcipher_walk walk;
};
-static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
@@ -745,7 +746,7 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -754,15 +755,15 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(skcipher, key);
if (err)
return err;
@@ -773,15 +774,15 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(skcipher, key);
if (err)
return err;
@@ -792,12 +793,12 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
u8 *s = ctx->key.arc4;
u8 *x = s + 256;
u8 *y = x + 1;
@@ -822,7 +823,7 @@ static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
{
int this_len = nbytes;
@@ -830,10 +831,11 @@ static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
return this_len > (1 << 16) ? (1 << 16) : this_len;
}
-static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
+ struct n2_crypto_chunk *cp,
struct spu_queue *qp, bool encrypt)
{
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
struct cwq_initial_entry *ent;
bool in_place;
int i;
@@ -877,18 +879,17 @@ static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
}
-static int n2_compute_chunks(struct ablkcipher_request *req)
+static int n2_compute_chunks(struct skcipher_request *req)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct skcipher_walk *walk = &rctx->walk;
struct n2_crypto_chunk *chunk;
unsigned long dest_prev;
unsigned int tot_len;
bool prev_in_place;
int err, nbytes;
- ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
- err = ablkcipher_walk_phys(req, walk);
+ err = skcipher_walk_async(walk, req);
if (err)
return err;
@@ -910,12 +911,12 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
bool in_place;
int this_len;
- src_paddr = (page_to_phys(walk->src.page) +
- walk->src.offset);
- dest_paddr = (page_to_phys(walk->dst.page) +
- walk->dst.offset);
+ src_paddr = (page_to_phys(walk->src.phys.page) +
+ walk->src.phys.offset);
+ dest_paddr = (page_to_phys(walk->dst.phys.page) +
+ walk->dst.phys.offset);
in_place = (src_paddr == dest_paddr);
- this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+ this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
if (chunk->arr_len != 0) {
if (in_place != prev_in_place ||
@@ -946,7 +947,7 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
prev_in_place = in_place;
tot_len += this_len;
- err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ err = skcipher_walk_done(walk, nbytes - this_len);
if (err)
break;
}
@@ -958,15 +959,14 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
return err;
}
-static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
struct n2_crypto_chunk *c, *tmp;
if (final_iv)
memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
- ablkcipher_walk_complete(&rctx->walk);
list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
list_del(&c->entry);
if (unlikely(c != &rctx->chunk))
@@ -975,10 +975,10 @@ static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
}
-static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
unsigned long flags, hv_ret;
@@ -1017,20 +1017,20 @@ out:
return err;
}
-static int n2_encrypt_ecb(struct ablkcipher_request *req)
+static int n2_encrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, true);
}
-static int n2_decrypt_ecb(struct ablkcipher_request *req)
+static int n2_decrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, false);
}
-static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned long flags, hv_ret, iv_paddr;
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
@@ -1107,32 +1107,32 @@ out:
return err;
}
-static int n2_encrypt_chaining(struct ablkcipher_request *req)
+static int n2_encrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, true);
}
-static int n2_decrypt_chaining(struct ablkcipher_request *req)
+static int n2_decrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, false);
}
-struct n2_cipher_tmpl {
+struct n2_skcipher_tmpl {
const char *name;
const char *drv_name;
u8 block_size;
u8 enc_type;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static const struct n2_cipher_tmpl cipher_tmpls[] = {
+static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
/* ARC4: only ECB is supported (chaining bits ignored) */
{ .name = "ecb(arc4)",
.drv_name = "ecb-arc4",
.block_size = 1,
.enc_type = (ENC_TYPE_ALG_RC4_STREAM |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 1,
.max_keysize = 256,
.setkey = n2_arc4_setkey,
@@ -1147,7 +1147,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1160,7 +1160,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1174,7 +1174,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1189,7 +1189,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1202,7 +1202,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1216,7 +1216,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1230,7 +1230,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = n2_aes_setkey,
@@ -1243,7 +1243,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1257,7 +1257,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_COUNTER),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1268,9 +1268,9 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
},
};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
struct n2_hash_tmpl {
const char *name;
@@ -1344,14 +1344,14 @@ static int algs_registered;
static void __n2_unregister_algs(void)
{
- struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_skcipher_alg *skcipher, *skcipher_tmp;
struct n2_ahash_alg *alg, *alg_tmp;
struct n2_hmac_alg *hmac, *hmac_tmp;
- list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&cipher->alg);
- list_del(&cipher->entry);
- kfree(cipher);
+ list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&skcipher->skcipher);
+ list_del(&skcipher->entry);
+ kfree(skcipher);
}
list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
crypto_unregister_ahash(&hmac->derived.alg);
@@ -1365,44 +1365,42 @@ static void __n2_unregister_algs(void)
}
}
-static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
return 0;
}
-static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
{
- struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct crypto_alg *alg;
+ struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct skcipher_alg *alg;
int err;
if (!p)
return -ENOMEM;
- alg = &p->alg;
+ alg = &p->skcipher;
+ *alg = tmpl->skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->cra_priority = N2_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->cra_blocksize = tmpl->block_size;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->base.cra_priority = N2_CRA_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->base.cra_blocksize = tmpl->block_size;
p->enc_type = tmpl->enc_type;
- alg->cra_ctxsize = sizeof(struct n2_cipher_context);
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_u.ablkcipher = tmpl->ablkcipher;
- alg->cra_init = n2_cipher_cra_init;
- alg->cra_module = THIS_MODULE;
-
- list_add(&p->entry, &cipher_algs);
- err = crypto_register_alg(alg);
+ alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
+ alg->base.cra_module = THIS_MODULE;
+ alg->init = n2_skcipher_init_tfm;
+
+ list_add(&p->entry, &skcipher_algs);
+ err = crypto_register_skcipher(alg);
if (err) {
- pr_err("%s alg registration failed\n", alg->cra_name);
+ pr_err("%s alg registration failed\n", alg->base.cra_name);
list_del(&p->entry);
kfree(p);
} else {
- pr_info("%s alg registered\n", alg->cra_name);
+ pr_info("%s alg registered\n", alg->base.cra_name);
}
return err;
}
@@ -1517,7 +1515,7 @@ static int n2_register_algs(void)
}
}
for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
if (err) {
__n2_unregister_algs();
goto out;
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index e631f9979127..92e921eceed7 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int cbc_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,11 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_cbc.iv);
+ rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
@@ -83,56 +82,46 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_encrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return cbc_aes_nx_crypt(req, 1);
}
-static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_decrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return cbc_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_cbc_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_nx_set_key,
- .encrypt = cbc_aes_nx_encrypt,
- .decrypt = cbc_aes_nx_decrypt,
- }
+struct skcipher_alg nx_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_cbc_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5be8f01c5da8..4c9362eebefd 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,67 +481,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 191e226a11a1..6d5ce1a66f1e 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -19,11 +19,11 @@
#include "nx.h"
-static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -51,11 +51,11 @@ static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -69,12 +69,10 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
-static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -83,10 +81,11 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_ctr.iv);
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -96,59 +95,51 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
- memcpy(iv + CTR_RFC3686_NONCE_SIZE,
- desc->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = iv;
-
- return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ return ctr_aes_nx_crypt(req, iv);
}
-struct crypto_alg nx_ctr3686_aes_alg = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ctr3686_aes_nx_set_key,
- .encrypt = ctr3686_aes_nx_crypt,
- .decrypt = ctr3686_aes_nx_crypt,
- }
+struct skcipher_alg nx_ctr3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ctr_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index c67570470c9d..77e338dc33f1 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int ecb_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,10 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, NULL);
+ rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src,
+ &to_process, processed, NULL);
if (rc)
goto out;
@@ -83,7 +81,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
@@ -92,46 +90,36 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_encrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return ecb_aes_nx_crypt(req, 1);
}
-static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_decrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return ecb_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 0xf,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ecb_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_nx_set_key,
- .encrypt = ecb_aes_nx_encrypt,
- .decrypt = ecb_aes_nx_decrypt,
- }
+struct skcipher_alg nx_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_alignmask = 0xf,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ecb_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 7d3d67871270..19c6ed5baea4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
- unsigned int assoclen)
+static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
- memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
do {
/*
@@ -240,8 +239,7 @@ out:
return rc;
}
-static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
- int enc)
+static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
len = AES_BLOCK_SIZE;
/* Encrypt the counter/IV */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
&len, nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE)
@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = rctx->iv;
/* initialize the counter */
- *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
if (nbytes == 0) {
if (assoclen == 0)
- rc = gcm_empty(req, &desc, enc);
+ rc = gcm_empty(req, rctx->iv, enc);
else
- rc = gmac(req, &desc, assoclen);
+ rc = gmac(req, rctx->iv, assoclen);
if (rc)
goto out;
else
@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+ rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
req->src, &to_process,
processed + req->assoclen,
csbcpb->cpb.aes_gcm.iv_or_cnt);
@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
if (rc)
goto out;
- memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_s0,
@@ -471,11 +467,6 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_gcm_aes_alg = {
.base = {
.cra_name = "gcm(aes)",
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 28817880c76d..f03c238f5a31 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg,
* scatterlists based on them.
*
* @nx_ctx: NX crypto context for the lists we're building
- * @desc: the block cipher descriptor for the operation
+ * @iv: iv data, if the algorithm requires it
* @dst: destination scatterlist
* @src: source scatterlist
* @nbytes: length of data described in the scatterlists
* @offset: number of bytes to fast-forward past at the beginning of
* scatterlists.
- * @iv: destination for the iv data, if the algorithm requires it
+ * @oiv: destination for the iv data, if the algorithm requires it
*
- * This is common code shared by all the AES algorithms. It uses the block
- * cipher walk routines to traverse input and output scatterlists, building
+ * This is common code shared by all the AES algorithms. It uses the crypto
+ * scatterlist walk routines to traverse input and output scatterlists, building
* corresponding NX scatterlists
*/
int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
- struct blkcipher_desc *desc,
+ const u8 *iv,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int *nbytes,
unsigned int offset,
- u8 *iv)
+ u8 *oiv)
{
unsigned int delta = 0;
unsigned int total = *nbytes;
@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- if (iv)
- memcpy(iv, desc->info, AES_BLOCK_SIZE);
+ if (oiv)
+ memcpy(oiv, iv, AES_BLOCK_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
@@ -511,10 +511,10 @@ static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
return true;
}
-static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
- crypto_register_alg(alg) : 0;
+ crypto_register_skcipher(alg) : 0;
}
static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -531,10 +531,10 @@ static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
crypto_register_shash(alg) : 0;
}
-static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
if (nx_check_props(NULL, fc, mode))
- crypto_unregister_alg(alg);
+ crypto_unregister_skcipher(alg);
}
static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -573,15 +573,16 @@ static int nx_register_algs(void)
nx_driver.of.status = NX_OKAY;
- rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
if (rc)
goto out;
- rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CTR);
if (rc)
goto out_unreg_cbc;
@@ -633,11 +634,11 @@ out_unreg_gcm4106:
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
- nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
out:
return rc;
}
@@ -704,21 +705,21 @@ int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
NX_MODE_AES_GCM);
}
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CTR);
}
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CBC);
}
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_ECB);
}
@@ -752,6 +753,11 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
+}
+
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
{
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
@@ -798,10 +804,12 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
- nx_unregister_alg(&nx_ctr3686_aes_alg,
- NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
+ NX_MODE_AES_ECB);
}
return 0;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..91c54289124a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -145,19 +145,20 @@ struct crypto_aead;
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
-int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int *,
- unsigned int, u8 *);
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int *nbytes, unsigned int offset, u8 *oiv);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int *);
@@ -175,11 +176,11 @@ void nx_debugfs_fini(struct nx_crypto_driver *);
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
-extern struct crypto_alg nx_cbc_aes_alg;
-extern struct crypto_alg nx_ecb_aes_alg;
+extern struct skcipher_alg nx_cbc_aes_alg;
+extern struct skcipher_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct skcipher_alg nx_ctr3686_aes_alg;
extern struct aead_alg nx_ccm_aes_alg;
extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index e0d44a5512ab..1975bcbee997 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -38,23 +38,23 @@ void nx_debugfs_init(struct nx_crypto_driver *drv)
drv->dfs_root = root;
debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.aes_ops);
+ root, &drv->stats.aes_ops.counter);
debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha256_ops);
+ root, &drv->stats.sha256_ops.counter);
debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha512_ops);
+ root, &drv->stats.sha512_ops.counter);
debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.aes_bytes);
+ root, &drv->stats.aes_bytes.counter);
debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha256_bytes);
+ root, &drv->stats.sha256_bytes.counter);
debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha512_bytes);
+ root, &drv->stats.sha512_bytes.counter);
debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.errors);
+ root, &drv->stats.errors.counter);
debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error);
+ root, &drv->stats.last_error.counter);
debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error_pid);
+ root, &drv->stats.last_error_pid.counter);
}
void
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 2f53fbb74100..a1fc03ed01f3 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -142,8 +142,8 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4);
if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
rctx = aead_request_ctx(dd->aead_req);
@@ -382,11 +382,11 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -403,10 +403,10 @@ int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -414,10 +414,10 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
static int omap_aes_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
@@ -427,8 +427,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -469,8 +469,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
if (!dd)
@@ -505,26 +505,26 @@ static void omap_aes_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd;
int ret;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < aes_fallback_sz) {
+ if (req->cryptlen < aes_fallback_sz) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
ret = crypto_skcipher_encrypt(subreq);
@@ -545,10 +545,10 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -571,32 +571,32 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, 0);
}
-static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CBC);
}
-static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
}
-static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CTR);
}
@@ -606,10 +606,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *req);
-static int omap_aes_cra_init(struct crypto_tfm *tfm)
+static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -618,7 +618,7 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
ctx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -657,9 +657,9 @@ static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
return 0;
}
-static void omap_aes_cra_exit(struct crypto_tfm *tfm)
+static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_sync_skcipher(ctx->fallback);
@@ -671,7 +671,10 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
{
struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- omap_aes_cra_exit(crypto_aead_tfm(tfm));
+ if (ctx->fallback)
+ crypto_free_sync_skcipher(ctx->fallback);
+
+ ctx->fallback = NULL;
if (ctx->ctr)
crypto_free_skcipher(ctx->ctr);
@@ -679,78 +682,69 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
}
};
-static struct crypto_alg algs_ctr[] = {
+static struct skcipher_alg algs_ctr[] = {
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- }
-} ,
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+}
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
@@ -1121,7 +1115,7 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct aead_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
@@ -1215,9 +1209,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1230,9 +1224,8 @@ static int omap_aes_probe(struct platform_device *pdev)
!dd->pdata->aead_algs_info->registered) {
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- algp = &aalg->base;
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.cra_name);
err = crypto_register_aead(aalg);
if (err)
@@ -1257,7 +1250,7 @@ err_aead_algs:
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1290,7 +1283,7 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d4b1f87a1c9..2d3575231e31 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -112,7 +112,7 @@ struct omap_aes_reqctx {
#define OMAP_AES_CACHE_SIZE 0
struct omap_aes_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -162,7 +162,7 @@ struct omap_aes_dev {
struct aead_queue aead_queue;
spinlock_t lock;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *aead_req;
struct crypto_engine *engine;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index b19d7e5d55ec..4c4dbc2b377e 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
@@ -98,7 +99,7 @@ struct omap_des_reqctx {
#define OMAP_DES_CACHE_SIZE 0
struct omap_des_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -139,7 +140,7 @@ struct omap_des_dev {
struct tasklet_struct done_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -261,8 +262,8 @@ static int omap_des_write_ctrl(struct omap_des_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & FLAGS_CBC) && dd->req->info)
- omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ omap_des_write_n(dd, DES_REG_IV(dd, 0), (void *)dd->req->iv, 2);
if (dd->flags & FLAGS_CBC)
val |= DES_REG_CTRL_CBC;
@@ -456,8 +457,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err;
pr_debug("total: %d\n", dd->total);
@@ -491,11 +492,11 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
static void omap_des_finish_req(struct omap_des_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -514,10 +515,10 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
}
static int omap_des_handle_queue(struct omap_des_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -525,9 +526,9 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
static int omap_des_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
int ret;
@@ -538,8 +539,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -568,8 +569,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
if (dd->out_sg_len < 0)
return dd->out_sg_len;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
@@ -582,9 +583,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
if (!dd)
@@ -619,18 +620,18 @@ static void omap_des_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_des_reqctx *rctx = skcipher_request_ctx(req);
struct omap_des_dev *dd;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -646,15 +647,15 @@ static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -664,15 +665,15 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -682,22 +683,22 @@ static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, 0);
}
-static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_CBC);
}
@@ -707,13 +708,13 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq);
-static int omap_des_cra_init(struct crypto_tfm *tfm)
+static int omap_des_init_tfm(struct crypto_skcipher *tfm)
{
- struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
pr_debug("enter\n");
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
ctx->enginectx.op.prepare_request = omap_des_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -722,103 +723,78 @@ static int omap_des_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void omap_des_cra_exit(struct crypto_tfm *tfm)
-{
- pr_debug("enter\n");
-}
-
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
}
};
@@ -976,7 +952,7 @@ static int omap_des_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_des_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct resource *res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -1071,9 +1047,9 @@ static int omap_des_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1086,7 +1062,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1119,7 +1095,7 @@ static int omap_des_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a0661250078..c5b60f50e1b5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -97,9 +98,9 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
return aes_ctx_common(crypto_tfm_ctx(tfm));
}
-static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
{
- return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+ return aes_ctx_common(crypto_skcipher_ctx(tfm));
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
@@ -162,6 +163,12 @@ ok:
return 0;
}
+static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
/* ====== Encryption/decryption routines ====== */
/* These are the real call to PadLock. */
@@ -338,25 +345,24 @@ static struct crypto_alg aes_alg = {
}
};
-static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -364,25 +370,24 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.decrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -390,48 +395,41 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+static struct skcipher_alg ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
walk.iv, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.decrypt);
@@ -439,25 +437,24 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -465,26 +462,20 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
- }
- }
+static struct skcipher_alg cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
};
static const struct x86_cpu_id padlock_cpu_id[] = {
@@ -506,13 +497,13 @@ static int __init padlock_init(void)
return -ENODEV;
}
- if ((ret = crypto_register_alg(&aes_alg)))
+ if ((ret = crypto_register_alg(&aes_alg)) != 0)
goto aes_err;
- if ((ret = crypto_register_alg(&ecb_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
goto ecb_aes_err;
- if ((ret = crypto_register_alg(&cbc_aes_alg)))
+ if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
@@ -527,7 +518,7 @@ out:
return ret;
cbc_aes_err:
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
@@ -537,8 +528,8 @@ aes_err:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&cbc_aes_alg);
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&cbc_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3cbefb41b099..29da449b3e9e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -134,7 +134,7 @@ struct spacc_engine {
struct spacc_alg {
unsigned long ctrl_default;
unsigned long type;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct spacc_engine *engine;
struct list_head entry;
int key_offs;
@@ -173,7 +173,7 @@ struct spacc_aead_ctx {
static int spacc_ablk_submit(struct spacc_req *req);
-static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
{
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
}
@@ -733,13 +733,13 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm)
* Set the DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -753,13 +753,13 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the 3DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -773,15 +773,15 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
-static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -822,15 +822,15 @@ sw_setkey_failed:
return err;
}
-static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -844,12 +844,12 @@ out:
static int spacc_ablk_need_fallback(struct spacc_req *req)
{
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
struct spacc_ablk_ctx *ctx;
- struct crypto_tfm *tfm = req->req->tfm;
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- ctx = crypto_tfm_ctx(tfm);
+ ctx = crypto_skcipher_ctx(tfm);
return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES &&
@@ -859,39 +859,39 @@ static int spacc_ablk_need_fallback(struct spacc_req *req)
static void spacc_ablk_complete(struct spacc_req *req)
{
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
if (ablk_req->src != ablk_req->dst) {
spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
- ablk_req->nbytes, DMA_TO_DEVICE);
+ ablk_req->cryptlen, DMA_TO_DEVICE);
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_FROM_DEVICE);
+ ablk_req->cryptlen, DMA_FROM_DEVICE);
} else
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_BIDIRECTIONAL);
+ ablk_req->cryptlen, DMA_BIDIRECTIONAL);
req->req->complete(req->req, req->result);
}
static int spacc_ablk_submit(struct spacc_req *req)
{
- struct crypto_tfm *tfm = req->req->tfm;
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
struct spacc_engine *engine = ctx->generic.engine;
u32 ctrl;
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
- ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+ ctx->key_len, ablk_req->iv, alg->ivsize,
NULL, 0);
writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
- writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
@@ -907,11 +907,11 @@ static int spacc_ablk_submit(struct spacc_req *req)
return -EINPROGRESS;
}
-static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+static int spacc_ablk_do_fallback(struct skcipher_request *req,
unsigned alg_type, bool is_encrypt)
{
struct crypto_tfm *old_tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
@@ -924,7 +924,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -932,12 +932,13 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
return err;
}
-static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
bool is_encrypt)
{
- struct crypto_alg *alg = req->base.tfm->__crt_alg;
- struct spacc_engine *engine = to_spacc_alg(alg)->engine;
- struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
+ struct spacc_req *dev_req = skcipher_request_ctx(req);
unsigned long flags;
int err = -ENOMEM;
@@ -956,17 +957,17 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
*/
if (req->src != req->dst) {
dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
- req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+ req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
if (!dev_req->src_ddt)
goto out;
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+ req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out_free_src;
} else {
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+ req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out;
@@ -999,65 +1000,65 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
out_free_ddts:
spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
- req->nbytes, req->src == req->dst ?
+ req->cryptlen, req->src == req->dst ?
DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
out_free_src:
if (req->src != req->dst)
spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
- req->src, req->nbytes, DMA_TO_DEVICE);
+ req->src, req->cryptlen, DMA_TO_DEVICE);
out:
return err;
}
-static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
struct spacc_engine *engine = spacc_alg->engine;
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher = crypto_alloc_sync_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->cra_name);
+ alg->base.cra_name);
return PTR_ERR(ctx->sw_cipher);
}
}
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req));
return 0;
}
-static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->sw_cipher);
}
-static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+static int spacc_ablk_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 1);
+ return spacc_ablk_setup(req, spacc_alg->type, 1);
}
-static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+static int spacc_ablk_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 0);
+ return spacc_ablk_setup(req, spacc_alg->type, 0);
}
static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
@@ -1233,27 +1234,24 @@ static struct spacc_alg ipsec_engine_algs[] = {
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1261,25 +1259,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = AES_MAX_KEY_SIZE,
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1287,26 +1283,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1314,25 +1307,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1340,26 +1330,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1367,25 +1354,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1581,25 +1565,23 @@ static struct spacc_alg l2_engine_algs[] = {
.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
SPA_CTRL_CIPH_MODE_F8,
.alg = {
- .cra_name = "f8(kasumi)",
- .cra_driver_name = "f8-kasumi-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 8,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_kasumi_f8_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = 16,
- .max_keysize = 16,
- .ivsize = 8,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "f8(kasumi)",
+ .base.cra_driver_name = "f8-kasumi-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 8,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_kasumi_f8_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = 16,
+ .max_keysize = 16,
+ .ivsize = 8,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1721,7 +1703,7 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
- err = crypto_register_alg(&engine->algs[i].alg);
+ err = crypto_register_skcipher(&engine->algs[i].alg);
if (!err) {
list_add_tail(&engine->algs[i].entry,
&engine->registered_algs);
@@ -1729,10 +1711,10 @@ static int spacc_probe(struct platform_device *pdev)
}
if (err)
dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
else
dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
}
INIT_LIST_HEAD(&engine->registered_aeads);
@@ -1781,7 +1763,7 @@ static int spacc_remove(struct platform_device *pdev)
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ crypto_unregister_skcipher(&alg->alg);
}
clk_disable_unprepare(engine->clk);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6ab7e5a88756..2006322345de 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_QAT
tristate
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
select CRYPTO_HMAC
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index b50eb55f8f57..35bca76b640f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -48,6 +48,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
@@ -122,7 +123,7 @@ struct qat_alg_aead_ctx {
char opad[SHA512_BLOCK_SIZE];
};
-struct qat_alg_ablkcipher_ctx {
+struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr;
@@ -130,7 +131,7 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -463,10 +464,10 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return 0;
}
-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -485,28 +486,28 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
}
-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
@@ -577,21 +578,21 @@ error:
return -EFAULT;
}
-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
- const uint8_t *key,
- unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen,
+ int mode)
{
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key;
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -832,12 +833,12 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
areq->base.complete(&areq->base, res);
}
-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
- struct qat_crypto_request *qat_req)
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ struct skcipher_request *sreq = qat_req->skcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -846,11 +847,11 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
- memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
- areq->base.complete(&areq->base, res);
+ sreq->base.complete(&sreq->base, res);
}
void qat_alg_callback(void *resp)
@@ -949,21 +950,21 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
@@ -990,7 +991,7 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
goto out_free_enc;
}
- ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret)
goto out_free_all;
@@ -1012,51 +1013,51 @@ out_free_instance:
return ret;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->enc_cd)
- return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
- return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CTR_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
}
-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_XTS_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
}
-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1073,17 +1074,17 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1097,26 +1098,26 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_encrypt(req);
+ return qat_alg_skcipher_encrypt(req);
}
-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1133,17 +1134,17 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1157,12 +1158,12 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_decrypt(req);
+ return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
@@ -1218,18 +1219,18 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
qat_crypto_put_instance(inst);
}
-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
ctx->tfm = tfm;
return 0;
}
-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
@@ -1308,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { {
.maxauthsize = SHA512_DIGEST_SIZE,
} };
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qat_aes_cbc",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+static struct skcipher_alg qat_skciphers[] = { {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "qat_aes_cbc",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_cbc_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "qat_aes_ctr",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_ctr_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "qat_aes_ctr",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_ctr_setkey,
+ .decrypt = qat_alg_skcipher_decrypt,
+ .encrypt = qat_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "qat_aes_xts",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "qat_aes_xts",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_xts_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
} };
int qat_algs_register(void)
{
- int ret = 0, i;
+ int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs != 1)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_skciphers(qat_skciphers,
+ ARRAY_SIZE(qat_skciphers));
if (ret)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
-
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret)
goto unreg_algs;
@@ -1403,7 +1387,7 @@ unlock:
return ret;
unreg_algs:
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock;
}
@@ -1414,7 +1398,7 @@ void qat_algs_unregister(void)
goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock:
mutex_unlock(&algs_lock);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index c77a80020cde..300bb919a33a 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -79,11 +79,11 @@ struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req;
union {
struct qat_alg_aead_ctx *aead_ctx;
- struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *skcipher_ctx;
};
union {
struct aead_request *aead_req;
- struct ablkcipher_request *ablkcipher_req;
+ struct skcipher_request *skcipher_req;
};
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 19a7f899acff..8caa04e1ec43 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -4,4 +4,4 @@ qcrypto-objs := core.o \
common.o \
dma.o \
sha.o \
- ablkcipher.o
+ skcipher.o
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5cab8f0706a8..7770660bc853 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 3fb510164326..da1188abc9ba 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -304,13 +304,13 @@ go_proc:
return 0;
}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -389,8 +389,8 @@ int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
default:
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 47fb523357ac..282d4317470d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -79,7 +80,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 08d4ce3bfddf..0a44a6eeacf5 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,7 +22,7 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+ &skcipher_ops,
&ahash_ops,
};
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0984a719144d..40a59214d2e1 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -12,11 +12,11 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
- dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+ dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
- dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+ dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 0853e74583ad..95ab16fc8fd6 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -495,7 +495,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
- base->cra_flags = CRYPTO_ALG_ASYNC;
+ base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c
index 7a98bf5cc967..fee07323f8f9 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -12,14 +12,14 @@
#include "cipher.h"
-static LIST_HEAD(ablkcipher_algs);
+static LIST_HEAD(skcipher_algs);
-static void qce_ablkcipher_done(void *data)
+static void qce_skcipher_done(void *data)
{
struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
u32 status;
@@ -32,7 +32,7 @@ static void qce_ablkcipher_done(void *data)
error = qce_dma_terminate_all(&qce->dma);
if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
error);
if (diff_dst)
@@ -43,18 +43,18 @@ static void qce_ablkcipher_done(void *data)
error = qce_check_status(qce, &status);
if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
qce->async_req_done(tmpl->qce, error);
}
static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
struct scatterlist *sg;
@@ -62,17 +62,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
gfp_t gfp;
int ret;
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
else
rctx->dst_nents = rctx->src_nents;
if (rctx->src_nents < 0) {
@@ -125,13 +125,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
+ qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
if (ret)
goto error_terminate;
@@ -149,10 +149,10 @@ error_free:
return ret;
}
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
@@ -177,13 +177,13 @@ fallback:
return ret;
}
-static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des_key(ablk, key);
+ err = verify_skcipher_des_key(ablk, key);
if (err)
return err;
@@ -192,13 +192,13 @@ static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des3_key(ablk, key);
+ err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
@@ -207,12 +207,11 @@ static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
int ret;
@@ -227,7 +226,7 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -237,36 +236,36 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qce_skcipher_encrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 1);
+ return qce_skcipher_crypt(req, 1);
}
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qce_skcipher_decrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 0);
+ return qce_skcipher_crypt(req, 0);
}
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
-struct qce_ablkcipher_def {
+struct qce_skcipher_def {
unsigned long flags;
const char *name;
const char *drv_name;
@@ -276,7 +275,7 @@ struct qce_ablkcipher_def {
unsigned int max_keysize;
};
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
+static const struct qce_skcipher_def skcipher_def[] = {
{
.flags = QCE_ALG_AES | QCE_MODE_ECB,
.name = "ecb(aes)",
@@ -351,90 +350,91 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
},
};
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
struct qce_device *qce)
{
struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg;
int ret;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl)
return -ENOMEM;
- alg = &tmpl->alg.crypto;
+ alg = &tmpl->alg.skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
- IS_DES(def->flags) ? qce_des_setkey :
- qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
+ IS_DES(def->flags) ? qce_des_setkey :
+ qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = qce_skcipher_init;
+ alg->exit = qce_skcipher_exit;
INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
tmpl->alg_flags = def->flags;
tmpl->qce = qce;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
return ret;
}
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
return 0;
}
-static void qce_ablkcipher_unregister(struct qce_device *qce)
+static void qce_skcipher_unregister(struct qce_device *qce)
{
struct qce_alg_template *tmpl, *n;
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
list_del(&tmpl->entry);
kfree(tmpl);
}
}
-static int qce_ablkcipher_register(struct qce_device *qce)
+static int qce_skcipher_register(struct qce_device *qce)
{
int ret, i;
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
if (ret)
goto err;
}
return 0;
err:
- qce_ablkcipher_unregister(qce);
+ qce_skcipher_unregister(qce);
return ret;
}
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
};
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 6e23764e6c8a..785277aca71e 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
- rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_skcipher.o \
rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index e5714ef24bf2..f385587f99af 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -264,8 +264,8 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- err = crypto_register_alg(
- &rk_cipher_algs[i]->alg.crypto);
+ err = crypto_register_skcipher(
+ &rk_cipher_algs[i]->alg.skcipher);
else
err = crypto_register_ahash(
&rk_cipher_algs[i]->alg.hash);
@@ -277,7 +277,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
@@ -290,7 +290,7 @@ static void rk_crypto_unregister(void)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 18e2b3f29336..2b49c677afdb 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
@@ -256,7 +257,7 @@ enum alg_type {
struct rk_crypto_tmp {
struct rk_crypto_info *dev;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
} alg;
enum alg_type type;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
deleted file mode 100644
index d0f4b2d18059..000000000000
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Crypto acceleration support for Rockchip RK3288
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Author: Zain Wang <zain.wang@rock-chips.com>
- *
- * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
- */
-#include "rk3288_crypto.h"
-
-#define RK_CRYPTO_DEC BIT(0)
-
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
-{
- if (base->complete)
- base->complete(base, err);
-}
-
-static int rk_handle_req(struct rk_crypto_info *dev,
- struct ablkcipher_request *req)
-{
- if (!IS_ALIGNED(req->nbytes, dev->align_size))
- return -EINVAL;
- else
- return dev->enqueue(dev, &req->base);
-}
-
-static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
-}
-
-static int rk_des_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = 0;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
- RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 ivsize, block, conf_reg = 0;
-
- block = crypto_tfm_alg_blocksize(tfm);
- ivsize = crypto_ablkcipher_ivsize(cipher);
-
- if (block == DES_BLOCK_SIZE) {
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
- RK_CRYPTO_TDES_BYTESWAP_KEY |
- RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
- conf_reg = RK_CRYPTO_DESSEL;
- } else {
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
- RK_CRYPTO_AES_KEY_CHANGE |
- RK_CRYPTO_AES_BYTESWAP_KEY |
- RK_CRYPTO_AES_BYTESWAP_IV;
- if (ctx->keylen == AES_KEYSIZE_192)
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
- else if (ctx->keylen == AES_KEYSIZE_256)
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
- }
- conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
- RK_CRYPTO_BYTESWAP_BRFIFO;
- CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
- CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
- RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
-}
-
-static void crypto_dma_start(struct rk_crypto_info *dev)
-{
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
- CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
- _SBF(RK_CRYPTO_BLOCK_START, 16));
-}
-
-static int rk_set_data_start(struct rk_crypto_info *dev)
-{
- int err;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
- dev->sg_src->offset + dev->sg_src->length - ivsize;
-
- /* Store the iv that need to be updated in chain mode.
- * And update the IV buffer to contain the next IV for decryption mode.
- */
- if (ctx->mode & RK_CRYPTO_DEC) {
- memcpy(ctx->iv, src_last_blk, ivsize);
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
- ivsize, dev->total - ivsize);
- }
-
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
- if (!err)
- crypto_dma_start(dev);
- return err;
-}
-
-static int rk_ablk_start(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- unsigned long flags;
- int err = 0;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->dst_nents = sg_nents(req->dst);
- dev->aligned = 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- rk_ablk_hw_init(dev);
- err = rk_set_data_start(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- return err;
-}
-
-static void rk_iv_copyback(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
-
- /* Update the IV buffer to contain the next IV for encryption mode. */
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
- if (dev->aligned) {
- memcpy(req->info, sg_virt(dev->sg_dst) +
- dev->sg_dst->length - ivsize, ivsize);
- } else {
- memcpy(req->info, dev->addr_vir +
- dev->count - ivsize, ivsize);
- }
- }
-}
-
-static void rk_update_iv(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *new_iv = NULL;
-
- if (ctx->mode & RK_CRYPTO_DEC) {
- new_iv = ctx->iv;
- } else {
- new_iv = page_address(sg_page(dev->sg_dst)) +
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
- }
-
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
-}
-
-/* return:
- * true some err was occurred
- * fault no err, continue
- */
-static int rk_ablk_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
-
- dev->unload_data(dev);
- if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
- dev->addr_vir, dev->count,
- dev->total - dev->left_bytes -
- dev->count)) {
- err = -EINVAL;
- goto out_rx;
- }
- }
- if (dev->left_bytes) {
- rk_update_iv(dev);
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
- dev->sg_dst = sg_next(dev->sg_dst);
- }
- err = rk_set_data_start(dev);
- } else {
- rk_iv_copyback(dev);
- /* here show the calculation is over without any err */
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
- }
-out_rx:
- return err;
-}
-
-static int rk_ablk_cra_init(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct rk_crypto_tmp *algt;
-
- algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
-
- ctx->dev = algt->dev;
- ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
- ctx->dev->start = rk_ablk_start;
- ctx->dev->update = rk_ablk_rx;
- ctx->dev->complete = rk_crypto_complete;
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
-
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
-}
-
-static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- free_page((unsigned long)ctx->dev->addr_vir);
- ctx->dev->disable_clk(ctx->dev);
-}
-
-struct rk_crypto_tmp rk_ecb_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_ecb_encrypt,
- .decrypt = rk_aes_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_cbc_encrypt,
- .decrypt = rk_aes_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_ecb_encrypt,
- .decrypt = rk_des_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_cbc_encrypt,
- .decrypt = rk_des_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_ecb_encrypt,
- .decrypt = rk_des3_ede_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_cbc_encrypt,
- .decrypt = rk_des3_ede_cbc_decrypt,
- }
- }
-};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
new file mode 100644
index 000000000000..ca4de4ddfe1f
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC BIT(0)
+
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
+{
+ if (base->complete)
+ base->complete(base, err);
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+ struct skcipher_request *req)
+{
+ if (!IS_ALIGNED(req->cryptlen, dev->align_size))
+ return -EINVAL;
+ else
+ return dev->enqueue(dev, &req->base);
+}
+
+static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+ return 0;
+}
+
+static int rk_des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des3_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = 0;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ u32 ivsize, block, conf_reg = 0;
+
+ block = crypto_tfm_alg_blocksize(tfm);
+ ivsize = crypto_skcipher_ivsize(cipher);
+
+ if (block == DES_BLOCK_SIZE) {
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ RK_CRYPTO_TDES_BYTESWAP_KEY |
+ RK_CRYPTO_TDES_BYTESWAP_IV;
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
+ conf_reg = RK_CRYPTO_DESSEL;
+ } else {
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ RK_CRYPTO_AES_KEY_CHANGE |
+ RK_CRYPTO_AES_BYTESWAP_KEY |
+ RK_CRYPTO_AES_BYTESWAP_IV;
+ if (ctx->keylen == AES_KEYSIZE_192)
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
+ }
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO;
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+ dev->sg_src->offset + dev->sg_src->length - ivsize;
+
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
+ ivsize, dev->total - ivsize);
+ }
+
+ err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+ if (!err)
+ crypto_dma_start(dev);
+ return err;
+}
+
+static int rk_ablk_start(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ unsigned long flags;
+ int err = 0;
+
+ dev->left_bytes = req->cryptlen;
+ dev->total = req->cryptlen;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->src_nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->dst_nents = sg_nents(req->dst);
+ dev->aligned = 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ rk_ablk_hw_init(dev);
+ err = rk_set_data_start(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return err;
+}
+
+static void rk_iv_copyback(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->iv, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->iv, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
+}
+
+static void rk_update_iv(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *new_iv = NULL;
+
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ new_iv = ctx->iv;
+ } else {
+ new_iv = page_address(sg_page(dev->sg_dst)) +
+ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ }
+
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+ else if (ivsize == AES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+}
+
+/* return:
+ * true some err was occurred
+ * fault no err, continue
+ */
+static int rk_ablk_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+
+ dev->unload_data(dev);
+ if (!dev->aligned) {
+ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
+ dev->addr_vir, dev->count,
+ dev->total - dev->left_bytes -
+ dev->count)) {
+ err = -EINVAL;
+ goto out_rx;
+ }
+ }
+ if (dev->left_bytes) {
+ rk_update_iv(dev);
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_err(dev->dev, "[%s:%d] Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ dev->sg_dst = sg_next(dev->sg_dst);
+ }
+ err = rk_set_data_start(dev);
+ } else {
+ rk_iv_copyback(dev);
+ /* here show the calculation is over without any err */
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
+ }
+out_rx:
+ return err;
+}
+
+static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt;
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+ ctx->dev = algt->dev;
+ ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
+ ctx->dev->start = rk_ablk_start;
+ ctx->dev->update = rk_ablk_rx;
+ ctx->dev->complete = rk_crypto_complete;
+ ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+ return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+}
+
+static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ free_page((unsigned long)ctx->dev->addr_vir);
+ ctx->dev->disable_clk(ctx->dev);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_ecb_encrypt,
+ .decrypt = rk_aes_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_cbc_encrypt,
+ .decrypt = rk_aes_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_ecb_encrypt,
+ .decrypt = rk_des_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_cbc_encrypt,
+ .decrypt = rk_des_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_ecb_encrypt,
+ .decrypt = rk_des3_ede_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_cbc_encrypt,
+ .decrypt = rk_des3_ede_cbc_decrypt,
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 010f1bb20dad..d66e20a2f54c 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -303,7 +303,7 @@ struct s5p_aes_dev {
void __iomem *aes_ioaddr;
int irq_fc;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct s5p_aes_ctx *ctx;
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
@@ -456,7 +456,7 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
if (!*sg)
return;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
free_pages((unsigned long)sg_virt(*sg), get_order(len));
kfree(*sg);
@@ -478,27 +478,27 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
- struct ablkcipher_request *req = dev->req;
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = dev->req;
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
- dev->req->nbytes);
+ dev->req->cryptlen);
s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->nbytes, 1);
+ dev->req->cryptlen, 1);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
if (reqctx->mode & FLAGS_AES_CBC)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
else if (reqctx->mode & FLAGS_AES_CTR)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct ablkcipher_request *req, int err)
+static void s5p_aes_complete(struct skcipher_request *req, int err)
{
req->base.complete(&req->base, err);
}
@@ -523,7 +523,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
if (!*dst)
return -ENOMEM;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
if (!pages) {
kfree(*dst);
@@ -531,7 +531,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
return -ENOMEM;
}
- s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+ s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
@@ -660,7 +660,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
@@ -1870,7 +1870,7 @@ static bool s5p_is_sg_aligned(struct scatterlist *sg)
}
static int s5p_set_indata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1897,7 +1897,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1925,7 +1925,7 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
- struct ablkcipher_request *req = dev->req;
+ struct skcipher_request *req = dev->req;
u32 aes_control;
unsigned long flags;
int err;
@@ -1938,12 +1938,12 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- iv = req->info;
+ iv = req->iv;
ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
iv = NULL;
- ctr = req->info;
+ ctr = req->iv;
} else {
iv = NULL; /* AES_ECB */
ctr = NULL;
@@ -2021,21 +2021,21 @@ static void s5p_tasklet_cb(unsigned long data)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- dev->req = ablkcipher_request_cast(async_req);
+ dev->req = skcipher_request_cast(async_req);
dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
- reqctx = ablkcipher_request_ctx(dev->req);
+ reqctx = skcipher_request_ctx(dev->req);
s5p_aes_crypt_start(dev, reqctx->mode);
}
static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
unsigned long flags;
int err;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
return err;
@@ -2049,17 +2049,17 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
return err;
}
-static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!req->nbytes)
+ if (!req->cryptlen)
return 0;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -2070,10 +2070,10 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return s5p_aes_handle_req(dev, req);
}
-static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
+static int s5p_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
@@ -2087,106 +2087,97 @@ static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, 0);
}
-static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
}
-static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CBC);
}
-static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
-static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+static int s5p_aes_ctr_crypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CTR);
}
-static int s5p_aes_cra_init(struct crypto_tfm *tfm)
+static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = s5p_dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
return 0;
}
-static struct crypto_alg algs[] = {
+static struct skcipher_alg algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ecb_encrypt,
- .decrypt = s5p_aes_ecb_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ecb_encrypt,
+ .decrypt = s5p_aes_ecb_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_cbc_encrypt,
- .decrypt = s5p_aes_cbc_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_cbc_encrypt,
+ .decrypt = s5p_aes_cbc_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ctr_crypt,
- .decrypt = s5p_aes_ctr_crypt,
- }
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ .init = s5p_aes_init_tfm,
},
};
@@ -2297,7 +2288,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_alg(&algs[i]);
+ err = crypto_register_skcipher(&algs[i]);
if (err)
goto err_algs;
}
@@ -2334,11 +2325,11 @@ err_hash:
err_algs:
if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
err);
for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
+ crypto_unregister_skcipher(&algs[j]);
tasklet_kill(&pdata->tasklet);
@@ -2362,7 +2353,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
+ crypto_unregister_skcipher(&algs[i]);
tasklet_kill(&pdata->tasklet);
if (pdata->use_hash) {
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8ac8ec6decd5..d4ea2f11ca68 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -547,7 +547,7 @@ unmap_in:
return -EINVAL;
}
-static int sahara_aes_process(struct ablkcipher_request *req)
+static int sahara_aes_process(struct skcipher_request *req)
{
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
@@ -558,20 +558,20 @@ static int sahara_aes_process(struct ablkcipher_request *req)
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- req->nbytes, req->src, req->dst);
+ req->cryptlen, req->src, req->dst);
/* assign new request to device */
- dev->total = req->nbytes;
+ dev->total = req->cryptlen;
dev->in_sg = req->src;
dev->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- if ((dev->flags & FLAGS_CBC) && req->info)
- memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
/* assign new context to device */
dev->ctx = ctx;
@@ -597,10 +597,10 @@ static int sahara_aes_process(struct ablkcipher_request *req)
return 0;
}
-static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
ctx->keylen = keylen;
@@ -630,16 +630,16 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
- req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
dev_err(dev->device,
"request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -648,7 +648,7 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
mutex_lock(&dev->queue_mutex);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
@@ -656,10 +656,10 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return err;
}
-static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -669,7 +669,7 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -678,10 +678,10 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -691,7 +691,7 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -700,10 +700,10 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, 0);
}
-static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -713,7 +713,7 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -722,10 +722,10 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -735,7 +735,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -744,10 +744,10 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_CBC);
}
-static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
@@ -756,14 +756,14 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->fallback);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
return 0;
}
-static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
@@ -1071,8 +1071,8 @@ static int sahara_queue_manage(void *data)
ret = sahara_sha_process(req);
} else {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
+ struct skcipher_request *req =
+ skcipher_request_cast(async_req);
ret = sahara_aes_process(req);
}
@@ -1189,48 +1189,42 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "sahara-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "sahara-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
}
};
@@ -1318,7 +1312,7 @@ static int sahara_register_algs(struct sahara_dev *dev)
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1348,7 +1342,7 @@ err_sha_v3_algs:
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -1358,7 +1352,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index ba5ea6434f9c..d347a1d6e351 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -19,6 +19,7 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#define DRIVER_NAME "stm32-cryp"
@@ -137,7 +138,7 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *areq;
size_t authsize;
@@ -395,8 +396,8 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
{
- struct ablkcipher_request *req = cryp->req;
- u32 *tmp = req->info;
+ struct skcipher_request *req = cryp->req;
+ u32 *tmp = (void *)req->iv;
if (!tmp)
return;
@@ -616,7 +617,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
case CR_TDES_CBC:
case CR_AES_CBC:
case CR_AES_CTR:
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->iv);
break;
default:
@@ -667,7 +668,7 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
else
- crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+ crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
@@ -685,11 +686,11 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq);
-static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
@@ -714,11 +715,11 @@ static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
return 0;
}
-static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int stm32_cryp_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = skcipher_request_ctx(req);
struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
if (!cryp)
@@ -726,7 +727,7 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
- return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
@@ -743,10 +744,10 @@ static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
-static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -754,7 +755,7 @@ static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -764,17 +765,17 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des_key(tfm, key) ?:
+ return verify_skcipher_des_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
@@ -818,32 +819,32 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
}
-static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
}
-static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
@@ -868,47 +869,47 @@ static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
}
-static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
}
-static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
}
-static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
}
-static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
-static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
@@ -919,7 +920,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!req && !areq)
return -EINVAL;
- ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
crypto_aead_ctx(crypto_aead_reqtfm(areq));
cryp = ctx->cryp;
@@ -927,7 +928,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
@@ -939,7 +940,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
cryp->areq = NULL;
- cryp->total_in = req->nbytes;
+ cryp->total_in = req->cryptlen;
cryp->total_out = cryp->total_in;
} else {
/*
@@ -1016,8 +1017,8 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
return stm32_cryp_prepare_req(req, NULL);
@@ -1025,11 +1026,11 @@ static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
if (!cryp)
@@ -1724,150 +1725,129 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-static struct crypto_alg crypto_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "stm32-ecb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ecb_encrypt,
- .decrypt = stm32_cryp_aes_ecb_decrypt,
- }
+static struct skcipher_alg crypto_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "stm32-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "stm32-cbc-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_cbc_encrypt,
- .decrypt = stm32_cryp_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "stm32-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "stm32-ctr-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ctr_encrypt,
- .decrypt = stm32_cryp_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "stm32-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
},
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "stm32-ecb-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_ecb_encrypt,
- .decrypt = stm32_cryp_des_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "stm32-ecb-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "stm32-cbc-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_cbc_encrypt,
- .decrypt = stm32_cryp_des_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "stm32-cbc-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "stm32-ecb-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_ecb_encrypt,
- .decrypt = stm32_cryp_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "stm32-ecb-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "stm32-cbc-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_cbc_encrypt,
- .decrypt = stm32_cryp_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "stm32-cbc-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
},
};
@@ -2010,7 +1990,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine2;
}
- ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
if (ret) {
dev_err(dev, "Could not register algs\n");
goto err_algs;
@@ -2027,7 +2007,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return 0;
err_aead_algs:
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -2059,7 +2039,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
return ret;
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 56e3068c9947..d71d65846e47 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -35,7 +35,7 @@
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
@@ -1490,10 +1490,10 @@ static int aead_decrypt(struct aead_request *req)
return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
if (ctx->keylen)
@@ -1507,39 +1507,39 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des3_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
keylen == AES_KEYSIZE_256)
- return ablkcipher_setkey(cipher, key, keylen);
+ return skcipher_setkey(cipher, key, keylen);
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct ablkcipher_request *areq)
+ struct skcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1547,20 +1547,20 @@ static void common_nonsnoop_unmap(struct device *dev,
DMA_BIDIRECTIONAL);
}
-static void ablkcipher_done(struct device *dev,
+static void skcipher_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
- struct ablkcipher_request *areq = context;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct skcipher_request *areq = context;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
- memcpy(areq->info, ctx->iv, ivsize);
+ memcpy(areq->iv, ctx->iv, ivsize);
kfree(edesc);
@@ -1568,17 +1568,17 @@ static void ablkcipher_done(struct device *dev,
}
static int common_nonsnoop(struct talitos_edesc *edesc,
- struct ablkcipher_request *areq,
+ struct skcipher_request *areq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->nbytes;
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ unsigned int cryptlen = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
int sg_count, ret;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
@@ -1638,65 +1638,65 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
return ret;
}
-static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
areq, bool encrypt)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- areq->info, 0, areq->nbytes, 0, ivsize, 0,
+ areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
areq->base.flags, encrypt);
}
-static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+static int skcipher_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, true);
+ edesc = skcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+static int skcipher_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, false);
+ edesc = skcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
static void common_nonsnoop_hash_unmap(struct device *dev,
@@ -1704,6 +1704,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
struct talitos_desc *desc = &edesc->desc;
@@ -1714,6 +1715,9 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+ if (req_ctx->last)
+ memcpy(areq->result, req_ctx->hw_context,
+ crypto_ahash_digestsize(tfm));
if (req_ctx->psrc)
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
@@ -1845,7 +1849,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (req_ctx->last)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
- areq->result, DMA_FROM_DEVICE);
+ req_ctx->hw_context, DMA_FROM_DEVICE);
else
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
@@ -2253,7 +2257,7 @@ struct talitos_alg_template {
u32 type;
u32 priority;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -2698,123 +2702,102 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- /* ABLKCIPHER algorithms. */
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ /* SKCIPHER algorithms. */
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-talitos",
+ .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_3DES,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -3032,46 +3015,45 @@ static int talitos_init_common(struct talitos_ctx *ctx,
return 0;
}
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
return talitos_init_common(ctx, talitos_alg);
}
-static int talitos_cra_init_aead(struct crypto_aead *tfm)
+static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
{
- struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.aead);
+ algt.alg.skcipher);
return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_cra_init(tfm);
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
ctx->keylen = 0;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct talitos_ahash_req_ctx));
- return 0;
+ return talitos_init_common(ctx, talitos_alg);
}
static void talitos_cra_exit(struct crypto_tfm *tfm)
@@ -3112,7 +3094,8 @@ static int talitos_remove(struct platform_device *ofdev)
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&t_alg->algt.alg.aead);
@@ -3156,15 +3139,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt = *template;
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg = &t_alg->algt.alg.crypto;
- alg->cra_init = talitos_cra_init;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ alg = &t_alg->algt.alg.skcipher.base;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
- ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
+ t_alg->algt.alg.skcipher.setkey =
+ t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
+ t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
+ t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
@@ -3461,10 +3443,10 @@ static int talitos_probe(struct platform_device *ofdev)
}
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = crypto_register_alg(
- &t_alg->algt.alg.crypto);
- alg = &t_alg->algt.alg.crypto;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(
+ &t_alg->algt.alg.skcipher);
+ alg = &t_alg->algt.alg.skcipher.base;
break;
case CRYPTO_ALG_TYPE_AEAD:
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b1c6f739f77b..b731895aa241 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -8,7 +8,7 @@ config CRYPTO_DEV_UX500_CRYP
tristate "UX500 crypto driver for CRYP block"
depends on CRYPTO_DEV_UX500
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This selects the crypto driver for the UX500_CRYP hardware. It supports
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 1628ae7a1467..95fb694a2667 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -30,6 +30,7 @@
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/platform_data/crypto-ux500.h>
@@ -828,10 +829,10 @@ static int get_nents(struct scatterlist *sg, int nbytes)
return nents;
}
-static int ablk_dma_crypt(struct ablkcipher_request *areq)
+static int ablk_dma_crypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
int bytes_written = 0;
@@ -840,8 +841,8 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- ctx->datalen = areq->nbytes;
- ctx->outlen = areq->nbytes;
+ ctx->datalen = areq->cryptlen;
+ ctx->outlen = areq->cryptlen;
ret = cryp_get_device_data(ctx, &device_data);
if (ret)
@@ -885,11 +886,11 @@ out:
return 0;
}
-static int ablk_crypt(struct ablkcipher_request *areq)
+static int ablk_crypt(struct skcipher_request *areq)
{
- struct ablkcipher_walk walk;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct skcipher_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
unsigned long src_paddr;
unsigned long dst_paddr;
@@ -902,21 +903,20 @@ static int ablk_crypt(struct ablkcipher_request *areq)
if (ret)
goto out;
- ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
- ret = ablkcipher_walk_phys(areq, &walk);
+ ret = skcipher_walk_async(&walk, areq);
if (ret) {
- pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!",
__func__);
goto out;
}
while ((nbytes = walk.nbytes) > 0) {
ctx->iv = walk.iv;
- src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset);
ctx->indata = phys_to_virt(src_paddr);
- dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset);
ctx->outdata = phys_to_virt(dst_paddr);
ctx->datalen = nbytes - (nbytes % ctx->blocksize);
@@ -926,11 +926,10 @@ static int ablk_crypt(struct ablkcipher_request *areq)
goto out;
nbytes -= ctx->datalen;
- ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ ret = skcipher_walk_done(&walk, nbytes);
if (ret)
goto out;
}
- ablkcipher_walk_complete(&walk);
out:
/* Release the device */
@@ -948,10 +947,10 @@ out:
return ret;
}
-static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -983,15 +982,15 @@ static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1002,15 +1001,15 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des3_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1021,10 +1020,10 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int cryp_blk_encrypt(struct ablkcipher_request *areq)
+static int cryp_blk_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1039,10 +1038,10 @@ static int cryp_blk_encrypt(struct ablkcipher_request *areq)
return ablk_crypt(areq);
}
-static int cryp_blk_decrypt(struct ablkcipher_request *areq)
+static int cryp_blk_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1058,19 +1057,19 @@ static int cryp_blk_decrypt(struct ablkcipher_request *areq)
struct cryp_algo_template {
enum cryp_algo_mode algomode;
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
};
-static int cryp_cra_init(struct crypto_tfm *tfm)
+static int cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct cryp_algo_template *cryp_alg = container_of(alg,
struct cryp_algo_template,
- crypto);
+ skcipher);
ctx->config.algomode = cryp_alg->algomode;
- ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+ ctx->blocksize = crypto_skcipher_blocksize(tfm);
return 0;
}
@@ -1078,205 +1077,147 @@ static int cryp_cra_init(struct crypto_tfm *tfm)
static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "aes",
- .cra_driver_name = "aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_AES_CBC,
- .crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_AES_CTR,
- .crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_DES_CBC,
- .crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_CBC,
- .crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
}
};
@@ -1293,18 +1234,18 @@ static int cryp_algs_register_all(void)
pr_debug("[%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
- ret = crypto_register_alg(&cryp_algs[i].crypto);
+ ret = crypto_register_skcipher(&cryp_algs[i].skcipher);
if (ret) {
count = i;
pr_err("[%s] alg registration failed",
- cryp_algs[i].crypto.cra_driver_name);
+ cryp_algs[i].skcipher.base.cra_driver_name);
goto unreg;
}
}
return 0;
unreg:
for (i = 0; i < count; i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
return ret;
}
@@ -1318,7 +1259,7 @@ static void cryp_algs_unregister_all(void)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
}
static int ux500_cryp_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c172a6953477..c24f2db8d5e8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -140,7 +140,6 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
{
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *channel = NULL;
- dma_cookie_t cookie;
if (direction != DMA_TO_DEVICE) {
dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
@@ -176,7 +175,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
desc->callback = hash_dma_callback;
desc->callback_param = ctx;
- cookie = dmaengine_submit(desc);
+ dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 01b625e4e5ad..fb294174e408 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
default m
help
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 42d19205166b..4b71e80951b7 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -8,6 +8,7 @@
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
@@ -16,10 +17,10 @@
#include "virtio_crypto_common.h"
-struct virtio_crypto_ablkcipher_ctx {
+struct virtio_crypto_skcipher_ctx {
struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
@@ -30,8 +31,8 @@ struct virtio_crypto_sym_request {
/* Cipher or aead */
uint32_t type;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_skcipher_ctx *skcipher_ctx;
+ struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
@@ -41,7 +42,7 @@ struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct crypto_alg algo;
+ struct skcipher_alg algo;
};
/*
@@ -49,9 +50,9 @@ struct virtio_crypto_algo {
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err);
static void virtio_crypto_dataq_sym_callback
@@ -59,7 +60,7 @@ static void virtio_crypto_dataq_sym_callback
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
int error;
/* Finish the encrypt or decrypt process */
@@ -79,8 +80,8 @@ static void virtio_crypto_dataq_sym_callback
error = -EIO;
break;
}
- ablk_req = vc_sym_req->ablkcipher_req;
- virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req = vc_sym_req->skcipher_req;
+ virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
}
@@ -105,15 +106,13 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
- pr_err("virtio_crypto: Unsupported key length: %d\n",
- key_len);
return -EINVAL;
}
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
uint32_t alg, const uint8_t *key,
unsigned int keylen,
int encrypt)
@@ -202,8 +201,8 @@ static int virtio_crypto_alg_ablkcipher_init_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_close_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_close_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
@@ -263,8 +262,8 @@ static int virtio_crypto_alg_ablkcipher_close_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_sessions(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_sessions(
+ struct virtio_crypto_skcipher_ctx *ctx,
const uint8_t *key, unsigned int keylen)
{
uint32_t alg;
@@ -280,30 +279,30 @@ static int virtio_crypto_alg_ablkcipher_init_sessions(
goto bad_key;
/* Create encryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 1);
if (ret)
return ret;
/* Create decryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 0);
if (ret) {
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
return ret;
}
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/* Note: kernel crypto API realization */
-static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const uint8_t *key,
unsigned int keylen)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
uint32_t alg;
int ret;
@@ -325,11 +324,11 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
ctx->vcrypto = vcrypto;
} else {
/* Rekeying, we should close the created sessions previously */
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
}
- ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
if (ret) {
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
@@ -341,14 +340,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
struct data_queue *data_vq)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -361,7 +360,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int sg_total;
uint8_t *iv;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -398,7 +397,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
req_data->u.sym_req.u.cipher.para.src_data_len =
- cpu_to_le32(req->nbytes);
+ cpu_to_le32(req->cryptlen);
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
if (unlikely(dst_len > U32_MAX)) {
@@ -408,9 +407,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
}
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
- req->nbytes, dst_len);
+ req->cryptlen, dst_len);
- if (unlikely(req->nbytes + dst_len + ivsize +
+ if (unlikely(req->cryptlen + dst_len + ivsize +
sizeof(vc_req->status) > vcrypto->max_size)) {
pr_err("virtio_crypto: The length is too big\n");
err = -EINVAL;
@@ -436,7 +435,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
err = -ENOMEM;
goto free;
}
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
+ if (!vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
vc_sym_req->iv = iv;
@@ -473,83 +477,93 @@ free:
return err;
}
-static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+ ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
-static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!ctx->vcrypto)
return;
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
}
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq)
{
- struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
+ struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
+ ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -558,12 +572,16 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err)
{
- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ if (vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
@@ -573,27 +591,21 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
.algo = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "virtio_crypto_aes_cbc",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = virtio_crypto_skcipher_init,
+ .exit = virtio_crypto_skcipher_exit,
+ .setkey = virtio_crypto_skcipher_setkey,
+ .decrypt = virtio_crypto_skcipher_decrypt,
+ .encrypt = virtio_crypto_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
} };
@@ -613,14 +625,14 @@ int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
- ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
- virtio_crypto_algs[i].algo.cra_name);
+ virtio_crypto_algs[i].algo.base.cra_name);
}
unlock:
@@ -644,7 +656,7 @@ void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 1)
- crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+ crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 1c6e00da5a29..a24f85c589e7 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -112,7 +112,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node,
uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq);
void
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index cab32cfec9c4..709670d2b553 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-TARGET := linux-ppc64le
+override flavour := linux-ppc64le
else
-TARGET := linux-ppc64
+override flavour := linux-ppc64
endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
targets += aesp8-ppc.S ghashp8-ppc.S