diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 10:42:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 10:42:15 -0700 |
commit | cb906953d2c3fd450655d9fa833f03690ad50c23 (patch) | |
tree | 06c5665afb24baee3ac49f62db61ca97918079b4 /arch/arm64/crypto | |
parent | 6c373ca89399c5a3f7ef210ad8f63dc3437da345 (diff) | |
parent | 3abafaf2192b1712079edfd4232b19877d6f41a5 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 4.1:
New interfaces:
- user-space interface for AEAD
- user-space interface for RNG (i.e., pseudo RNG)
New hashes:
- ARMv8 SHA1/256
- ARMv8 AES
- ARMv8 GHASH
- ARM assembler and NEON SHA256
- MIPS OCTEON SHA1/256/512
- MIPS img-hash SHA1/256 and MD5
- Power 8 VMX AES/CBC/CTR/GHASH
- PPC assembler AES, SHA1/256 and MD5
- Broadcom IPROC RNG driver
Cleanups/fixes:
- prevent internal helper algos from being exposed to user-space
- merge common code from assembly/C SHA implementations
- misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (169 commits)
crypto: arm - workaround for building with old binutils
crypto: arm/sha256 - avoid sha256 code on ARMv7-M
crypto: x86/sha512_ssse3 - move SHA-384/512 SSSE3 implementation to base layer
crypto: x86/sha256_ssse3 - move SHA-224/256 SSSE3 implementation to base layer
crypto: x86/sha1_ssse3 - move SHA-1 SSSE3 implementation to base layer
crypto: arm64/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer
crypto: arm64/sha1-ce - move SHA-1 ARMv8 implementation to base layer
crypto: arm/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer
crypto: arm/sha256 - move SHA-224/256 ASM/NEON implementation to base layer
crypto: arm/sha1-ce - move SHA-1 ARMv8 implementation to base layer
crypto: arm/sha1_neon - move SHA-1 NEON implementation to base layer
crypto: arm/sha1 - move SHA-1 ARM asm implementation to base layer
crypto: sha512-generic - move to generic glue implementation
crypto: sha256-generic - move to generic glue implementation
crypto: sha1-generic - move to generic glue implementation
crypto: sha512 - implement base layer for SHA-512
crypto: sha256 - implement base layer for SHA-256
crypto: sha1 - implement base layer for SHA-1
crypto: api - remove instance when test failed
crypto: api - Move alg ref count init to crypto_check_alg
...
Diffstat (limited to 'arch/arm64/crypto')
-rw-r--r-- | arch/arm64/crypto/aes-glue.c | 12 | ||||
-rw-r--r-- | arch/arm64/crypto/sha1-ce-core.S | 33 | ||||
-rw-r--r-- | arch/arm64/crypto/sha1-ce-glue.c | 151 | ||||
-rw-r--r-- | arch/arm64/crypto/sha2-ce-core.S | 29 | ||||
-rw-r--r-- | arch/arm64/crypto/sha2-ce-glue.c | 227 |
5 files changed, 130 insertions, 322 deletions
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b1b5b893eb20..05d9e16c0dfd 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -284,7 +284,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__ecb-aes-" MODE, .cra_driver_name = "__driver-ecb-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -302,7 +303,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__cbc-aes-" MODE, .cra_driver_name = "__driver-cbc-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -320,7 +322,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__ctr-aes-" MODE, .cra_driver_name = "__driver-ctr-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, @@ -338,7 +341,8 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "__xts-aes-" MODE, .cra_driver_name = "__driver-xts-aes-" MODE, .cra_priority = 0, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .cra_alignmask = 7, diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 09d57d98609c..033aae6d732a 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -66,8 +66,8 @@ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 /* - * void sha1_ce_transform(int blocks, u8 const *src, u32 *state, - * u8 *head, long bytes) + * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + * int blocks) */ ENTRY(sha1_ce_transform) /* load round constants */ @@ -78,25 +78,22 @@ ENTRY(sha1_ce_transform) ld1r {k3.4s}, [x6] /* load state */ - ldr dga, [x2] - ldr dgb, [x2, #16] + ldr dga, [x0] + ldr dgb, [x0, #16] - /* load partial state (if supplied) */ - cbz x3, 0f - ld1 {v8.4s-v11.4s}, [x3] - b 1f + /* load sha1_ce_state::finalize */ + ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize] /* load input */ 0: ld1 {v8.4s-v11.4s}, [x1], #64 - sub w0, w0, #1 + sub w2, w2, #1 -1: CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v9.16b, v9.16b ) CPU_LE( rev32 v10.16b, v10.16b ) CPU_LE( rev32 v11.16b, v11.16b ) -2: add t0.4s, v8.4s, k0.4s +1: add t0.4s, v8.4s, k0.4s mov dg0v.16b, dgav.16b add_update c, ev, k0, 8, 9, 10, 11, dgb @@ -127,15 +124,15 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s - cbnz w0, 0b + cbnz w2, 0b /* * Final block: add padding and total bit count. - * Skip if we have no total byte count in x4. In that case, the input - * size was not a round multiple of the block size, and the padding is - * handled by the C code. + * Skip if the input size was not a round multiple of the block size, + * the padding is handled by the C code in that case. */ cbz x4, 3f + ldr x4, [x0, #:lo12:sha1_ce_offsetof_count] movi v9.2d, #0 mov x8, #0x80000000 movi v10.2d, #0 @@ -144,10 +141,10 @@ CPU_LE( rev32 v11.16b, v11.16b ) mov x4, #0 mov v11.d[0], xzr mov v11.d[1], x7 - b 2b + b 1b /* store new state */ -3: str dga, [x2] - str dgb, [x2, #16] +3: str dga, [x0] + str dgb, [x0, #16] ret ENDPROC(sha1_ce_transform) diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 6fe83f37a750..114e7cc5de8c 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -12,144 +12,81 @@ #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> +#include <crypto/sha1_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> +#define ASM_EXPORT(sym, val) \ + asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); + MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state, - u8 *head, long bytes); +struct sha1_ce_state { + struct sha1_state sst; + u32 finalize; +}; -static int sha1_init(struct shash_desc *desc) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); +asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + int blocks); - *sctx = (struct sha1_state){ - .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, - }; - return 0; -} - -static int sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha1_ce_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - sctx->count += len; - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SHA1_BLOCK_SIZE - partial; + struct sha1_ce_state *sctx = shash_desc_ctx(desc); - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - } - - blocks = len / SHA1_BLOCK_SIZE; - len %= SHA1_BLOCK_SIZE; - - kernel_neon_begin_partial(16); - sha1_ce_transform(blocks, data, sctx->state, - partial ? sctx->buffer : NULL, 0); - kernel_neon_end(); + sctx->finalize = 0; + kernel_neon_begin_partial(16); + sha1_base_do_update(desc, data, len, + (sha1_block_fn *)sha1_ce_transform); + kernel_neon_end(); - data += blocks * SHA1_BLOCK_SIZE; - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); return 0; } -static int sha1_final(struct shash_desc *desc, u8 *out) +static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; + struct sha1_ce_state *sctx = shash_desc_ctx(desc); + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); - struct sha1_state *sctx = shash_desc_ctx(desc); - __be64 bits = cpu_to_be64(sctx->count << 3); - __be32 *dst = (__be32 *)out; - int i; - - u32 padlen = SHA1_BLOCK_SIZE - - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE); - - sha1_update(desc, padding, padlen); - sha1_update(desc, (const u8 *)&bits, sizeof(bits)); - - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha1_state){}; - return 0; -} - -static int sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int blocks; - int i; - - if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) { - sha1_update(desc, data, len); - return sha1_final(desc, out); - } + ASM_EXPORT(sha1_ce_offsetof_count, + offsetof(struct sha1_ce_state, sst.count)); + ASM_EXPORT(sha1_ce_offsetof_finalize, + offsetof(struct sha1_ce_state, finalize)); /* - * Use a fast path if the input is a multiple of 64 bytes. In - * this case, there is no need to copy data around, and we can - * perform the entire digest calculation in a single invocation - * of sha1_ce_transform() + * Allow the asm code to perform the finalization if there is no + * partial data and the input is a round multiple of the block size. */ - blocks = len / SHA1_BLOCK_SIZE; + sctx->finalize = finalize; kernel_neon_begin_partial(16); - sha1_ce_transform(blocks, data, sctx->state, NULL, len); + sha1_base_do_update(desc, data, len, + (sha1_block_fn *)sha1_ce_transform); + if (!finalize) + sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); - - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha1_state){}; - return 0; + return sha1_base_finish(desc, out); } -static int sha1_export(struct shash_desc *desc, void *out) +static int sha1_ce_final(struct shash_desc *desc, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - struct sha1_state *dst = out; - - *dst = *sctx; - return 0; -} - -static int sha1_import(struct shash_desc *desc, const void *in) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - struct sha1_state const *src = in; - - *sctx = *src; - return 0; + kernel_neon_begin_partial(16); + sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); + kernel_neon_end(); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { - .init = sha1_init, - .update = sha1_update, - .final = sha1_final, - .finup = sha1_finup, - .export = sha1_export, - .import = sha1_import, - .descsize = sizeof(struct sha1_state), + .init = sha1_base_init, + .update = sha1_ce_update, + .final = sha1_ce_final, + .finup = sha1_ce_finup, + .descsize = sizeof(struct sha1_ce_state), .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 7f29fc031ea8..5df9d9d470ad 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -73,8 +73,8 @@ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* - * void sha2_ce_transform(int blocks, u8 const *src, u32 *state, - * u8 *head, long bytes) + * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, + * int blocks) */ ENTRY(sha2_ce_transform) /* load round constants */ @@ -85,24 +85,21 @@ ENTRY(sha2_ce_transform) ld1 {v12.4s-v15.4s}, [x8] /* load state */ - ldp dga, dgb, [x2] + ldp dga, dgb, [x0] - /* load partial input (if supplied) */ - cbz x3, 0f - ld1 {v16.4s-v19.4s}, [x3] - b 1f + /* load sha256_ce_state::finalize */ + ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] /* load input */ 0: ld1 {v16.4s-v19.4s}, [x1], #64 - sub w0, w0, #1 + sub w2, w2, #1 -1: CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) -2: add t0.4s, v16.4s, v0.4s +1: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b @@ -131,15 +128,15 @@ CPU_LE( rev32 v19.16b, v19.16b ) add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ - cbnz w0, 0b + cbnz w2, 0b /* * Final block: add padding and total bit count. - * Skip if we have no total byte count in x4. In that case, the input - * size was not a round multiple of the block size, and the padding is - * handled by the C code. + * Skip if the input size was not a round multiple of the block size, + * the padding is handled by the C code in that case. */ cbz x4, 3f + ldr x4, [x0, #:lo12:sha256_ce_offsetof_count] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 @@ -148,9 +145,9 @@ CPU_LE( rev32 v19.16b, v19.16b ) mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 - b 2b + b 1b /* store new state */ -3: stp dga, dgb, [x2] +3: stp dga, dgb, [x0] ret ENDPROC(sha2_ce_transform) diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index ae67e88c28b9..1340e44c048b 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -12,206 +12,82 @@ #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> +#include <crypto/sha256_base.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> +#define ASM_EXPORT(sym, val) \ + asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); + MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state, - u8 *head, long bytes); - -static int sha224_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { - SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, - } - }; - return 0; -} - -static int sha256_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { - SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, - } - }; - return 0; -} - -static int sha2_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - sctx->count += len; - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SHA256_BLOCK_SIZE - partial; - - memcpy(sctx->buf + partial, data, p); - data += p; - len -= p; - } +struct sha256_ce_state { + struct sha256_state sst; + u32 finalize; +}; - blocks = len / SHA256_BLOCK_SIZE; - len %= SHA256_BLOCK_SIZE; +asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, + int blocks); - kernel_neon_begin_partial(28); - sha2_ce_transform(blocks, data, sctx->state, - partial ? sctx->buf : NULL, 0); - kernel_neon_end(); - - data += blocks * SHA256_BLOCK_SIZE; - partial = 0; - } - if (len) - memcpy(sctx->buf + partial, data, len); - return 0; -} - -static void sha2_final(struct shash_desc *desc) +static int sha256_ce_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; - - struct sha256_state *sctx = shash_desc_ctx(desc); - __be64 bits = cpu_to_be64(sctx->count << 3); - u32 padlen = SHA256_BLOCK_SIZE - - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE); - - sha2_update(desc, padding, padlen); - sha2_update(desc, (const u8 *)&bits, sizeof(bits)); -} - -static int sha224_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_final(desc); - - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} + struct sha256_ce_state *sctx = shash_desc_ctx(desc); -static int sha256_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_final(desc); - - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); + sctx->finalize = 0; + kernel_neon_begin_partial(28); + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha2_ce_transform); + kernel_neon_end(); - *sctx = (struct sha256_state){}; return 0; } -static void sha2_finup(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - int blocks; + struct sha256_ce_state *sctx = shash_desc_ctx(desc); + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); - if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) { - sha2_update(desc, data, len); - sha2_final(desc); - return; - } + ASM_EXPORT(sha256_ce_offsetof_count, + offsetof(struct sha256_ce_state, sst.count)); + ASM_EXPORT(sha256_ce_offsetof_finalize, + offsetof(struct sha256_ce_state, finalize)); /* - * Use a fast path if the input is a multiple of 64 bytes. In - * this case, there is no need to copy data around, and we can - * perform the entire digest calculation in a single invocation - * of sha2_ce_transform() + * Allow the asm code to perform the finalization if there is no + * partial data and the input is a round multiple of the block size. */ - blocks = len / SHA256_BLOCK_SIZE; + sctx->finalize = finalize; kernel_neon_begin_partial(28); - sha2_ce_transform(blocks, data, sctx->state, NULL, len); + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha2_ce_transform); + if (!finalize) + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); + return sha256_base_finish(desc, out); } -static int sha224_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int sha256_ce_final(struct shash_desc *desc, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_finup(desc, data, len); - - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} - -static int sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - int i; - - sha2_finup(desc, data, len); - - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) - put_unaligned_be32(sctx->state[i], dst++); - - *sctx = (struct sha256_state){}; - return 0; -} - -static int sha2_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct sha256_state *dst = out; - - *dst = *sctx; - return 0; -} - -static int sha2_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct sha256_state const *src = in; - - *sctx = *src; - return 0; + kernel_neon_begin_partial(28); + sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); + kernel_neon_end(); + return sha256_base_finish(desc, out); } static struct shash_alg algs[] = { { - .init = sha224_init, - .update = sha2_update, - .final = sha224_final, - .finup = sha224_finup, - .export = sha2_export, - .import = sha2_import, - .descsize = sizeof(struct sha256_state), + .init = sha224_base_init, + .update = sha256_ce_update, + .final = sha256_ce_final, + .finup = sha256_ce_finup, + .descsize = sizeof(struct sha256_ce_state), .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-ce", @@ -221,15 +97,12 @@ static struct shash_alg algs[] = { { .cra_module = THIS_MODULE, } }, { - .init = sha256_init, - .update = sha2_update, - .final = sha256_final, - .finup = sha256_finup, - .export = sha2_export, - .import = sha2_import, - .descsize = sizeof(struct sha256_state), + .init = sha256_base_init, + .update = sha256_ce_update, + .final = sha256_ce_final, + .finup = sha256_ce_finup, + .descsize = sizeof(struct sha256_ce_state), .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-ce", |