diff options
Diffstat (limited to 'include/crypto')
-rw-r--r-- | include/crypto/aead.h | 34 | ||||
-rw-r--r-- | include/crypto/algapi.h | 7 | ||||
-rw-r--r-- | include/crypto/arc4.h | 10 | ||||
-rw-r--r-- | include/crypto/chacha.h | 2 | ||||
-rw-r--r-- | include/crypto/crypto_wq.h | 8 | ||||
-rw-r--r-- | include/crypto/drbg.h | 2 | ||||
-rw-r--r-- | include/crypto/internal/hash.h | 6 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 60 | ||||
-rw-r--r-- | include/crypto/skcipher.h | 92 |
9 files changed, 77 insertions, 144 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 61bb10490492..3c245b1859e7 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -317,21 +317,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_aead_encrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_aead_alg(aead)->encrypt(req); - crypto_stats_aead_encrypt(cryptlen, alg, ret); - return ret; -} +int crypto_aead_encrypt(struct aead_request *req); /** * crypto_aead_decrypt() - decrypt ciphertext @@ -355,23 +341,7 @@ static inline int crypto_aead_encrypt(struct aead_request *req) * integrity of the ciphertext or the associated data was violated); * < 0 if an error occurred. */ -static inline int crypto_aead_decrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (req->cryptlen < crypto_aead_authsize(aead)) - ret = -EINVAL; - else - ret = crypto_aead_alg(aead)->decrypt(req); - crypto_stats_aead_decrypt(cryptlen, alg, ret); - return ret; -} +int crypto_aead_decrypt(struct aead_request *req); /** * DOC: Asynchronous AEAD Request Handle diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 743d626479ef..dc1106af95c3 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -189,7 +189,6 @@ void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); -int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); static inline unsigned int crypto_queue_len(struct crypto_queue *queue) { return queue->qlen; @@ -371,12 +370,6 @@ static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) return req->__ctx; } -static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, - struct crypto_ablkcipher *tfm) -{ - return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); -} - static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) { diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h index 5b2c24ab0139..f3c22fe01704 100644 --- a/include/crypto/arc4.h +++ b/include/crypto/arc4.h @@ -6,8 +6,18 @@ #ifndef _CRYPTO_ARC4_H #define _CRYPTO_ARC4_H +#include <linux/types.h> + #define ARC4_MIN_KEY_SIZE 1 #define ARC4_MAX_KEY_SIZE 256 #define ARC4_BLOCK_SIZE 1 +struct arc4_ctx { + u32 S[256]; + u32 x, y; +}; + +int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); +void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); + #endif /* _CRYPTO_ARC4_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 1fc70a69d550..d1e723c6a37d 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -41,7 +41,7 @@ static inline void chacha20_block(u32 *state, u8 *stream) } void hchacha_block(const u32 *in, u32 *out, int nrounds); -void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv); +void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h deleted file mode 100644 index 23114746ac08..000000000000 --- a/include/crypto/crypto_wq.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef CRYPTO_WQ_H -#define CRYPTO_WQ_H - -#include <linux/workqueue.h> - -extern struct workqueue_struct *kcrypto_wq; -#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 3fb581bf3b87..8c9af21efce1 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -129,6 +129,8 @@ struct drbg_state { bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ struct work_struct seed_work; /* asynchronous seeding support */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 31e0662fa429..bfc9db7b100d 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -196,12 +196,6 @@ static inline struct ahash_request *ahash_dequeue_request( return ahash_request_cast(crypto_dequeue_request(queue)); } -static inline int ahash_tfm_in_queue(struct crypto_queue *queue, - struct crypto_ahash *tfm) -{ - return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); -} - static inline void *crypto_shash_ctx(struct crypto_shash *tfm) { return crypto_tfm_ctx(&tfm->base); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fe0376d5a471..d68faa5759ad 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -200,6 +200,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( return alg->max_keysize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index ce7fa0973580..37c164234d97 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -288,66 +288,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize( return crypto_skcipher_ivsize(&tfm->base); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->chunksize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->walksize; -} - -/** - * crypto_skcipher_chunksize() - obtain chunk size - * @tfm: cipher handle - * - * The block size is set to one for ciphers such as CTR. However, - * you still need to provide incremental updates in multiples of - * the underlying block size as the IV does not have sub-block - * granularity. This is known in this API as the chunk size. - * - * Return: chunk size in bytes - */ -static inline unsigned int crypto_skcipher_chunksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -479,21 +419,7 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_skcipher_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = tfm->encrypt(req); - crypto_stats_skcipher_encrypt(cryptlen, ret, alg); - return ret; -} +int crypto_skcipher_encrypt(struct skcipher_request *req); /** * crypto_skcipher_decrypt() - decrypt ciphertext @@ -506,21 +432,7 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_skcipher_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = tfm->decrypt(req); - crypto_stats_skcipher_decrypt(cryptlen, ret, alg); - return ret; -} +int crypto_skcipher_decrypt(struct skcipher_request *req); /** * DOC: Symmetric Key Cipher Request Handle |