diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-05-28 11:16:41 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-05-28 11:16:41 +0800 |
commit | 6d7e3d89954af773c70948a54cfa2aaa15907c1c (patch) | |
tree | 6908f5e0e1af7294c00dc4e06ed583ef62f594df | |
parent | d725332208ef13241fc435eece790c9d0ea16a4e (diff) | |
parent | f858c7bcca8c20761a20593439fe998b4b67e86b (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge the crypto tree for 4.1 to pull in the changeset that disables
algif_aead.
-rw-r--r-- | arch/arm64/crypto/crc32-arm64.c | 22 | ||||
-rw-r--r-- | arch/arm64/crypto/sha1-ce-glue.c | 3 | ||||
-rw-r--r-- | arch/arm64/crypto/sha2-ce-glue.c | 3 | ||||
-rw-r--r-- | arch/s390/crypto/ghash_s390.c | 25 | ||||
-rw-r--r-- | arch/x86/crypto/sha512-avx2-asm.S | 2 | ||||
-rw-r--r-- | crypto/Kconfig | 9 | ||||
-rw-r--r-- | crypto/algif_aead.c | 9 | ||||
-rw-r--r-- | drivers/char/hw_random/bcm63xx-rng.c | 18 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 3 | ||||
-rw-r--r-- | include/linux/compiler-gcc.h | 16 | ||||
-rw-r--r-- | include/linux/compiler-intel.h | 3 | ||||
-rw-r--r-- | include/linux/compiler.h | 4 | ||||
-rw-r--r-- | lib/string.c | 2 |
13 files changed, 77 insertions, 42 deletions
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c index 9499199924ae..6a37c3c6b11d 100644 --- a/arch/arm64/crypto/crc32-arm64.c +++ b/arch/arm64/crypto/crc32-arm64.c @@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + put_unaligned_le32(ctx->crc, out); + return 0; +} + +static int chksumc_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + put_unaligned_le32(~ctx->crc, out); return 0; } static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) { - put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); + put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out); return 0; } @@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->key = 0; + return 0; +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->key = ~0; return 0; } @@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = { .setkey = chksum_setkey, .init = chksum_init, .update = chksumc_update, - .final = chksum_final, + .final = chksumc_final, .finup = chksumc_finup, .digest = chksumc_digest, .descsize = sizeof(struct chksum_desc_ctx), @@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = { .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, - .cra_init = crc32_cra_init, + .cra_init = crc32c_cra_init, } }; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 114e7cc5de8c..aefda9868627 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, static int sha1_ce_final(struct shash_desc *desc, u8 *out) { + struct sha1_ce_state *sctx = shash_desc_ctx(desc); + + sctx->finalize = 0; kernel_neon_begin_partial(16); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 1340e44c048b..7cd587564a41 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, static int sha256_ce_final(struct shash_desc *desc, u8 *out) { + struct sha256_ce_state *sctx = shash_desc_ctx(desc); + + sctx->finalize = 0; kernel_neon_begin_partial(28); sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index 7940dc90e80b..b258110da952 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -16,11 +16,12 @@ #define GHASH_DIGEST_SIZE 16 struct ghash_ctx { - u8 icv[16]; - u8 key[16]; + u8 key[GHASH_BLOCK_SIZE]; }; struct ghash_desc_ctx { + u8 icv[GHASH_BLOCK_SIZE]; + u8 key[GHASH_BLOCK_SIZE]; u8 buffer[GHASH_BLOCK_SIZE]; u32 bytes; }; @@ -28,8 +29,10 @@ struct ghash_desc_ctx { static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); memset(dctx, 0, sizeof(*dctx)); + memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE); return 0; } @@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm, } memcpy(ctx->key, key, GHASH_BLOCK_SIZE); - memset(ctx->icv, 0, GHASH_BLOCK_SIZE); return 0; } @@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); unsigned int n; u8 *buf = dctx->buffer; int ret; @@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc, src += n; if (!dctx->bytes) { - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE) return -EIO; @@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc, n = srclen & ~(GHASH_BLOCK_SIZE - 1); if (n) { - ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); + ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n); if (ret != n) return -EIO; src += n; @@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc, return 0; } -static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +static int ghash_flush(struct ghash_desc_ctx *dctx) { u8 *buf = dctx->buffer; int ret; @@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) memset(pos, 0, dctx->bytes); - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE) return -EIO; + + dctx->bytes = 0; } - dctx->bytes = 0; return 0; } static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); int ret; - ret = ghash_flush(ctx, dctx); + ret = ghash_flush(dctx); if (!ret) - memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); + memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE); return ret; } diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index a4771dcd1fcf..1f20b35d8573 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -79,7 +79,7 @@ NUM_BLKS = %rdx c = %rcx d = %r8 e = %rdx -y3 = %rdi +y3 = %rsi TBL = %rbp diff --git a/crypto/Kconfig b/crypto/Kconfig index af011a96e97d..0ff4cd44e4f8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1532,15 +1532,6 @@ config CRYPTO_USER_API_RNG This option enables the user-spaces interface for random number generator algorithms. -config CRYPTO_USER_API_AEAD - tristate "User-space interface for AEAD cipher algorithms" - depends on NET - select CRYPTO_AEAD - select CRYPTO_USER_API - help - This option enables the user-spaces interface for AEAD - cipher algorithms. - config CRYPTO_HASH_INFO bool diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 53702e9c6820..a55e4e6fa3d8 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -34,7 +34,7 @@ struct aead_ctx { /* * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES - * bytes + * pages */ #define RSGL_MAX_ENTRIES ALG_MAX_PAGES struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; @@ -436,11 +436,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, if (err < 0) goto unlock; usedpages += err; - /* chain the new scatterlist with initial list */ + /* chain the new scatterlist with previous one */ if (cnt) - scatterwalk_crypto_chain(ctx->rsgl[0].sg, - ctx->rsgl[cnt].sg, 1, - sg_nents(ctx->rsgl[cnt-1].sg)); + af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); + /* we do not need more iovecs as we have sufficient memory */ if (outlen <= usedpages) break; diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c index d1494ecd9e11..4b31f1387f37 100644 --- a/drivers/char/hw_random/bcm63xx-rng.c +++ b/drivers/char/hw_random/bcm63xx-rng.c @@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng) val &= ~RNG_EN; __raw_writel(val, priv->regs + RNG_CTRL); - clk_didsable_unprepare(prov->clk); + clk_disable_unprepare(priv->clk); } static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) @@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.init = bcm63xx_rng_init; priv->rng.cleanup = bcm63xx_rng_cleanup; - prov->rng.data_present = bcm63xx_rng_data_present; + priv->rng.data_present = bcm63xx_rng_data_present; priv->rng.data_read = bcm63xx_rng_data_read; priv->clk = devm_clk_get(&pdev->dev, "ipsec"); if (IS_ERR(priv->clk)) { - error = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "no clock for device: %d\n", error); - return error; + ret = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "no clock for device: %d\n", ret); + return ret; } if (!devm_request_mem_region(&pdev->dev, r->start, @@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) return -ENOMEM; } - error = devm_hwrng_register(&pdev->dev, &priv->rng); - if (error) { + ret = devm_hwrng_register(&pdev->dev, &priv->rng); + if (ret) { dev_err(&pdev->dev, "failed to register rng device: %d\n", - error); - return error; + ret); + return ret; } dev_info(&pdev->dev, "registered RNG driver\n"); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0889e496d886..6b0579fdbbb2 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -466,8 +466,9 @@ config CRYPTO_DEV_VMX source "drivers/crypto/vmx/Kconfig" config CRYPTO_DEV_IMGTEC_HASH - depends on MIPS || COMPILE_TEST tristate "Imagination Technologies hardware hash accelerator" + depends on MIPS || COMPILE_TEST + depends on HAS_DMA select CRYPTO_ALGAPI select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cdf13ca7cac3..371e560d13cf 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -9,10 +9,24 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) - /* Optimization barrier */ + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proofed that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") /* * This macro obfuscates arithmetic on a variable address so that gcc diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index ba147a1727e6..0c9a2f2c2802 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -13,9 +13,12 @@ /* Intel ECC compiler doesn't support gcc specific asm stmts. * It uses intrinsics to do the equivalent things. */ +#undef barrier_data #undef RELOC_HIDE #undef OPTIMIZER_HIDE_VAR +#define barrier_data(ptr) barrier() + #define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ __ptr = (unsigned long) (ptr); \ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0e41ca0e5927..867722591be2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define barrier() __memory_barrier() #endif +#ifndef barrier_data +# define barrier_data(ptr) barrier() +#endif + /* Unreachable code */ #ifndef unreachable # define unreachable() do { } while (1) diff --git a/lib/string.c b/lib/string.c index a5792019193c..bb3d4b6993c4 100644 --- a/lib/string.c +++ b/lib/string.c @@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); void memzero_explicit(void *s, size_t count) { memset(s, 0, count); - barrier(); + barrier_data(s); } EXPORT_SYMBOL(memzero_explicit); |