diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-05 17:33:35 +0000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-05 17:33:35 +0000 |
commit | cfb4b571e8b56b65d1a893bda5153647fda823b9 (patch) | |
tree | d507f6ab8f0eb8bc52ad7b9371d442ecafef35d4 /arch | |
parent | 6992ca0dd017ebaa2bf8e9dcc49f1c3b7033b082 (diff) | |
parent | 55d0a513a0e202c68af2c8f4b1e923a345227bbb (diff) |
Merge tag 's390-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Vasily Gorbik:
"The second round of s390 fixes and features for 5.6:
- Add KPROBES_ON_FTRACE support
- Add EP11 AES secure keys support
- PAES rework and prerequisites for paes-s390 ciphers selftests
- Fix page table upgrade for hugetlbfs"
* tag 's390-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/pkey/zcrypt: Support EP11 AES secure keys
s390/zcrypt: extend EP11 card and queue sysfs attributes
s390/zcrypt: add new low level ep11 functions support file
s390/zcrypt: ep11 structs rework, export zcrypt_send_ep11_cprb
s390/zcrypt: enable card/domain autoselect on ep11 cprbs
s390/crypto: enable clear key values for paes ciphers
s390/pkey: Add support for key blob with clear key value
s390/crypto: Rework on paes implementation
s390: support KPROBES_ON_FTRACE
s390/mm: fix dynamic pagetable upgrade for hugetlbfs
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/crypto/paes_s390.c | 230 | ||||
-rw-r--r-- | arch/s390/include/asm/kprobes.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/pkey.h | 69 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/zcrypt.h | 32 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 80 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 61 | ||||
-rw-r--r-- | arch/s390/kernel/mcount.S | 6 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 100 |
10 files changed, 388 insertions, 194 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 734996784d03..8abe77536d9d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -156,6 +156,7 @@ config S390 select HAVE_KERNEL_UNCOMPRESSED select HAVE_KERNEL_XZ select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_KVM select HAVE_LIVEPATCH diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index e2a85783f804..f3caeb17c85b 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -5,7 +5,7 @@ * s390 implementation of the AES Cipher Algorithm with protected keys. * * s390 Version: - * Copyright IBM Corp. 2017,2019 + * Copyright IBM Corp. 2017,2020 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com> */ @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/init.h> +#include <linux/mutex.h> #include <linux/spinlock.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> @@ -32,11 +33,11 @@ * is called. As paes can handle different kinds of key blobs * and padding is also possible, the limits need to be generous. */ -#define PAES_MIN_KEYSIZE 64 -#define PAES_MAX_KEYSIZE 256 +#define PAES_MIN_KEYSIZE 16 +#define PAES_MAX_KEYSIZE 320 static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; @@ -53,19 +54,46 @@ struct key_blob { unsigned int keylen; }; -static inline int _copy_key_to_kb(struct key_blob *kb, - const u8 *key, - unsigned int keylen) -{ - if (keylen <= sizeof(kb->keybuf)) +static inline int _key_to_kb(struct key_blob *kb, + const u8 *key, + unsigned int keylen) +{ + struct clearkey_header { + u8 type; + u8 res0[3]; + u8 version; + u8 res1[3]; + u32 keytype; + u32 len; + } __packed * h; + + switch (keylen) { + case 16: + case 24: + case 32: + /* clear key value, prepare pkey clear key token in keybuf */ + memset(kb->keybuf, 0, sizeof(kb->keybuf)); + h = (struct clearkey_header *) kb->keybuf; + h->version = 0x02; /* TOKVER_CLEAR_KEY */ + h->keytype = (keylen - 8) >> 3; + h->len = keylen; + memcpy(kb->keybuf + sizeof(*h), key, keylen); + kb->keylen = sizeof(*h) + keylen; kb->key = kb->keybuf; - else { - kb->key = kmalloc(keylen, GFP_KERNEL); - if (!kb->key) - return -ENOMEM; + break; + default: + /* other key material, let pkey handle this */ + if (keylen <= sizeof(kb->keybuf)) + kb->key = kb->keybuf; + else { + kb->key = kmalloc(keylen, GFP_KERNEL); + if (!kb->key) + return -ENOMEM; + } + memcpy(kb->key, key, keylen); + kb->keylen = keylen; + break; } - memcpy(kb->key, key, keylen); - kb->keylen = keylen; return 0; } @@ -82,16 +110,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb) struct s390_paes_ctx { struct key_blob kb; struct pkey_protkey pk; + spinlock_t pk_lock; unsigned long fc; }; struct s390_pxts_ctx { struct key_blob kb[2]; struct pkey_protkey pk[2]; + spinlock_t pk_lock; unsigned long fc; }; -static inline int __paes_convert_key(struct key_blob *kb, +static inline int __paes_keyblob2pkey(struct key_blob *kb, struct pkey_protkey *pk) { int i, ret; @@ -106,22 +136,18 @@ static inline int __paes_convert_key(struct key_blob *kb, return ret; } -static int __paes_set_key(struct s390_paes_ctx *ctx) +static inline int __paes_convert_key(struct s390_paes_ctx *ctx) { - unsigned long fc; + struct pkey_protkey pkey; - if (__paes_convert_key(&ctx->kb, &ctx->pk)) + if (__paes_keyblob2pkey(&ctx->kb, &pkey)) return -EINVAL; - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; - - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + spin_lock_bh(&ctx->pk_lock); + memcpy(&ctx->pk, &pkey, sizeof(pkey)); + spin_unlock_bh(&ctx->pk_lock); - return ctx->fc ? 0 : -EINVAL; + return 0; } static int ecb_paes_init(struct crypto_skcipher *tfm) @@ -129,6 +155,7 @@ static int ecb_paes_init(struct crypto_skcipher *tfm) struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; + spin_lock_init(&ctx->pk_lock); return 0; } @@ -140,6 +167,24 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm) _free_kb_keybuf(&ctx->kb); } +static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) +{ + unsigned long fc; + + if (__paes_convert_key(ctx)) + return -EINVAL; + + /* Pick the correct function code based on the protected key type */ + fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : + (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : + (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; + + /* Check if the function code is available */ + ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + + return ctx->fc ? 0 : -EINVAL; +} + static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -147,11 +192,11 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); - rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); + rc = _key_to_kb(&ctx->kb, in_key, key_len); if (rc) return rc; - return __paes_set_key(ctx); + return __ecb_paes_set_key(ctx); } static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) @@ -161,18 +206,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) struct skcipher_walk walk; unsigned int nbytes, n, k; int ret; + struct { + u8 key[MAXPROTKEYSIZE]; + } param; ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + + spin_lock_bh(&ctx->pk_lock); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey, + k = cpacf_km(ctx->fc | modifier, ¶m, walk.dst.virt.addr, walk.src.virt.addr, n); if (k) ret = skcipher_walk_done(&walk, nbytes - k); if (k < n) { - if (__paes_set_key(ctx) != 0) + if (__paes_convert_key(ctx)) return skcipher_walk_done(&walk, -EIO); + spin_lock_bh(&ctx->pk_lock); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); } } return ret; @@ -210,6 +268,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm) struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; + spin_lock_init(&ctx->pk_lock); return 0; } @@ -221,11 +280,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm) _free_kb_keybuf(&ctx->kb); } -static int __cbc_paes_set_key(struct s390_paes_ctx *ctx) +static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) { unsigned long fc; - if (__paes_convert_key(&ctx->kb, &ctx->pk)) + if (__paes_convert_key(ctx)) return -EINVAL; /* Pick the correct function code based on the protected key type */ @@ -246,7 +305,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); - rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); + rc = _key_to_kb(&ctx->kb, in_key, key_len); if (rc) return rc; @@ -268,8 +327,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) ret = skcipher_walk_virt(&walk, req, false); if (ret) return ret; + memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); + spin_lock_bh(&ctx->pk_lock); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); @@ -280,9 +343,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) ret = skcipher_walk_done(&walk, nbytes - k); } if (k < n) { - if (__cbc_paes_set_key(ctx) != 0) + if (__paes_convert_key(ctx)) return skcipher_walk_done(&walk, -EIO); + spin_lock_bh(&ctx->pk_lock); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); } } return ret; @@ -322,6 +387,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm) ctx->kb[0].key = NULL; ctx->kb[1].key = NULL; + spin_lock_init(&ctx->pk_lock); return 0; } @@ -334,12 +400,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm) _free_kb_keybuf(&ctx->kb[1]); } -static int __xts_paes_set_key(struct s390_pxts_ctx *ctx) +static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) +{ + struct pkey_protkey pkey0, pkey1; + + if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || + __paes_keyblob2pkey(&ctx->kb[1], &pkey1)) + return -EINVAL; + + spin_lock_bh(&ctx->pk_lock); + memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0)); + memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1)); + spin_unlock_bh(&ctx->pk_lock); + + return 0; +} + +static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) { unsigned long fc; - if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) || - __paes_convert_key(&ctx->kb[1], &ctx->pk[1])) + if (__xts_paes_convert_key(ctx)) return -EINVAL; if (ctx->pk[0].type != ctx->pk[1].type) @@ -371,10 +452,10 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, _free_kb_keybuf(&ctx->kb[0]); _free_kb_keybuf(&ctx->kb[1]); - rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len); + rc = _key_to_kb(&ctx->kb[0], in_key, key_len); if (rc) return rc; - rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len); + rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len); if (rc) return rc; @@ -416,15 +497,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) ret = skcipher_walk_virt(&walk, req, false); if (ret) return ret; + keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; -retry: + memset(&pcc_param, 0, sizeof(pcc_param)); memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); + spin_lock_bh(&ctx->pk_lock); memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); - cpacf_pcc(ctx->fc, pcc_param.key + offset); - memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); + spin_unlock_bh(&ctx->pk_lock); + cpacf_pcc(ctx->fc, pcc_param.key + offset); memcpy(xts_param.init, pcc_param.xts, 16); while ((nbytes = walk.nbytes) != 0) { @@ -435,11 +518,15 @@ retry: if (k) ret = skcipher_walk_done(&walk, nbytes - k); if (k < n) { - if (__xts_paes_set_key(ctx) != 0) + if (__xts_paes_convert_key(ctx)) return skcipher_walk_done(&walk, -EIO); - goto retry; + spin_lock_bh(&ctx->pk_lock); + memcpy(xts_param.key + offset, + ctx->pk[0].protkey, keylen); + spin_unlock_bh(&ctx->pk_lock); } } + return ret; } @@ -476,6 +563,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm) struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; + spin_lock_init(&ctx->pk_lock); return 0; } @@ -487,11 +575,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm) _free_kb_keybuf(&ctx->kb); } -static int __ctr_paes_set_key(struct s390_paes_ctx *ctx) +static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) { unsigned long fc; - if (__paes_convert_key(&ctx->kb, &ctx->pk)) + if (__paes_convert_key(ctx)) return -EINVAL; /* Pick the correct function code based on the protected key type */ @@ -513,7 +601,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); - rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); + rc = _key_to_kb(&ctx->kb, in_key, key_len); if (rc) return rc; @@ -543,49 +631,65 @@ static int ctr_paes_crypt(struct skcipher_request *req) struct skcipher_walk walk; unsigned int nbytes, n, k; int ret, locked; - - locked = spin_trylock(&ctrblk_lock); + struct { + u8 key[MAXPROTKEYSIZE]; + } param; ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + + spin_lock_bh(&ctx->pk_lock); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); + + locked = mutex_trylock(&ctrblk_lock); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { n = AES_BLOCK_SIZE; if (nbytes >= 2*AES_BLOCK_SIZE && locked) n = __ctrblk_init(ctrblk, walk.iv, nbytes); ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; - k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr, + k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr, walk.src.virt.addr, n, ctrptr); if (k) { if (ctrptr == ctrblk) memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, AES_BLOCK_SIZE); crypto_inc(walk.iv, AES_BLOCK_SIZE); - ret = skcipher_walk_done(&walk, nbytes - n); + ret = skcipher_walk_done(&walk, nbytes - k); } if (k < n) { - if (__ctr_paes_set_key(ctx) != 0) { + if (__paes_convert_key(ctx)) { if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); return skcipher_walk_done(&walk, -EIO); } + spin_lock_bh(&ctx->pk_lock); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); } } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { while (1) { - if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf, + if (cpacf_kmctr(ctx->fc, ¶m, buf, walk.src.virt.addr, AES_BLOCK_SIZE, walk.iv) == AES_BLOCK_SIZE) break; - if (__ctr_paes_set_key(ctx) != 0) + if (__paes_convert_key(ctx)) return skcipher_walk_done(&walk, -EIO); + spin_lock_bh(&ctx->pk_lock); + memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); + spin_unlock_bh(&ctx->pk_lock); } memcpy(walk.dst.virt.addr, buf, nbytes); crypto_inc(walk.iv, AES_BLOCK_SIZE); - ret = skcipher_walk_done(&walk, 0); + ret = skcipher_walk_done(&walk, nbytes); } return ret; @@ -618,12 +722,12 @@ static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) static void paes_s390_fini(void) { - if (ctrblk) - free_page((unsigned long) ctrblk); __crypto_unregister_skcipher(&ctr_paes_alg); __crypto_unregister_skcipher(&xts_paes_alg); __crypto_unregister_skcipher(&cbc_paes_alg); __crypto_unregister_skcipher(&ecb_paes_alg); + if (ctrblk) + free_page((unsigned long) ctrblk); } static int __init paes_s390_init(void) @@ -661,14 +765,14 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { - ret = crypto_register_skcipher(&ctr_paes_alg); - if (ret) - goto out_err; ctrblk = (u8 *) __get_free_page(GFP_KERNEL); if (!ctrblk) { ret = -ENOMEM; goto out_err; } + ret = crypto_register_skcipher(&ctr_paes_alg); + if (ret) + goto out_err; } return 0; diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index b106aa29bf55..09cdb632a490 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t; struct arch_specific_insn { /* copy of original instruction */ kprobe_opcode_t *insn; - unsigned int is_ftrace_insn : 1; }; struct prev_kprobe { diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a4d38092530a..85e944f04c70 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -33,6 +33,8 @@ #define ARCH_HAS_PREPARE_HUGEPAGE #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + #include <asm/setup.h> #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h index e22f0720bbb8..d27d7d329263 100644 --- a/arch/s390/include/uapi/asm/pkey.h +++ b/arch/s390/include/uapi/asm/pkey.h @@ -25,10 +25,11 @@ #define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */ #define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */ #define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */ +#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */ +#define MAXEP11AESKEYBLOBSIZE 320 /* max EP11 AES key blob size */ -/* Minimum and maximum size of a key blob */ +/* Minimum size of a key blob */ #define MINKEYBLOBSIZE SECKEYBLOBSIZE -#define MAXKEYBLOBSIZE MAXAESCIPHERKEYSIZE /* defines for the type field within the pkey_protkey struct */ #define PKEY_KEYTYPE_AES_128 1 @@ -39,6 +40,7 @@ enum pkey_key_type { PKEY_TYPE_CCA_DATA = (__u32) 1, PKEY_TYPE_CCA_CIPHER = (__u32) 2, + PKEY_TYPE_EP11 = (__u32) 3, }; /* the newer ioctls use a pkey_key_size enum for key size information */ @@ -200,7 +202,7 @@ struct pkey_kblob2pkey { /* * Generate secure key, version 2. - * Generate either a CCA AES secure key or a CCA AES cipher key. + * Generate CCA AES secure key, CCA AES cipher key or EP11 AES secure key. * There needs to be a list of apqns given with at least one entry in there. * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain * is not supported. The implementation walks through the list of apqns and @@ -210,10 +212,13 @@ struct pkey_kblob2pkey { * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to * generate a list of apqns based on the key type to generate. * The keygenflags argument is passed to the low level generation functions - * individual for the key type and has a key type specific meaning. Currently - * only CCA AES cipher keys react to this parameter: Use one or more of the - * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher - * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * individual for the key type and has a key type specific meaning. When + * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_* + * flags to widen the export possibilities. By default a cipher key is + * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * The keygenflag argument for generating an EP11 AES key should either be 0 + * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and + * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags. */ struct pkey_genseck2 { struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/ @@ -229,8 +234,8 @@ struct pkey_genseck2 { /* * Generate secure key from clear key value, version 2. - * Construct a CCA AES secure key or CCA AES cipher key from a given clear key - * value. + * Construct an CCA AES secure key, CCA AES cipher key or EP11 AES secure + * key from a given clear key value. * There needs to be a list of apqns given with at least one entry in there. * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain * is not supported. The implementation walks through the list of apqns and @@ -240,10 +245,13 @@ struct pkey_genseck2 { * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to * generate a list of apqns based on the key type to generate. * The keygenflags argument is passed to the low level generation functions - * individual for the key type and has a key type specific meaning. Currently - * only CCA AES cipher keys react to this parameter: Use one or more of the - * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher - * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * individual for the key type and has a key type specific meaning. When + * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_* + * flags to widen the export possibilities. By default a cipher key is + * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC). + * The keygenflag argument for generating an EP11 AES key should either be 0 + * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and + * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags. */ struct pkey_clr2seck2 { struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */ @@ -266,14 +274,19 @@ struct pkey_clr2seck2 { * with one apqn able to handle this key. * The function also checks for the master key verification patterns * of the key matching to the current or alternate mkvp of the apqn. - * Currently CCA AES secure keys and CCA AES cipher keys are supported. - * The flags field is updated with some additional info about the apqn mkvp + * For CCA AES secure keys and CCA AES cipher keys this means to check + * the key's mkvp against the current or old mkvp of the apqns. The flags + * field is updated with some additional info about the apqn mkvp * match: If the current mkvp matches to the key's mkvp then the * PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to * the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the * alternate mkvp is the old master key verification pattern. * CCA AES secure keys are also checked to have the CPACF export allowed * bit enabled (XPRTCPAC) in the kmf1 field. + * EP11 keys are also supported and the wkvp of the key is checked against + * the current wkvp of the apqns. There is no alternate for this type of + * key and so on a match the flag PKEY_FLAGS_MATCH_CUR_MKVP always is set. + * EP11 keys are also checked to have XCP_BLOB_PROTKEY_EXTRACTABLE set. * The ioctl returns 0 as long as the given or found apqn matches to * matches with the current or alternate mkvp to the key's mkvp. If the given * apqn does not match or there is no such apqn found, -1 with errno @@ -313,16 +326,20 @@ struct pkey_kblob2pkey2 { /* * Build a list of APQNs based on a key blob given. * Is able to find out which type of secure key is given (CCA AES secure - * key or CCA AES cipher key) and tries to find all matching crypto cards - * based on the MKVP and maybe other criterias (like CCA AES cipher keys - * need a CEX5C or higher). The list of APQNs is further filtered by the key's - * mkvp which needs to match to either the current mkvp or the alternate mkvp - * (which is the old mkvp on CCA adapters) of the apqns. The flags argument may - * be used to limit the matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is - * given, only the current mkvp of each apqn is compared. Likewise with the - * PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to - * return apqns where either the current or the alternate mkvp + * key, CCA AES cipher key or EP11 AES key) and tries to find all matching + * crypto cards based on the MKVP and maybe other criterias (like CCA AES + * cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE + * need a CEX7 and EP11 api version 4). The list of APQNs is further filtered + * by the key's mkvp which needs to match to either the current mkvp (CCA and + * EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The + * flags argument may be used to limit the matching apqns. If the + * PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is + * compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it + * is assumed to return apqns where either the current or the alternate mkvp * matches. At least one of the matching flags needs to be given. + * The flags argument for EP11 keys has no further action and is currently + * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only + * the wkvp from the key to match against the apqn's wkvp. * The list of matching apqns is stored into the space given by the apqns * argument and the number of stored entries goes into apqn_entries. If the list * is empty (apqn_entries is 0) the apqn_entries field is updated to the number @@ -356,6 +373,10 @@ struct pkey_apqns4key { * If both are given, it is assumed to return apqns where either the * current or the alternate mkvp matches. If no match flag is given * (flags is 0) the mkvp values are ignored for the match process. + * For EP11 keys there is only the current wkvp. So if the apqns should also + * match to a given wkvp, then the PKEY_FLAGS_MATCH_CUR_MKVP flag should be + * set. The wkvp value is 32 bytes but only the leftmost 16 bytes are compared + * against the leftmost 16 byte of the wkvp of the apqn. * The list of matching apqns is stored into the space given by the apqns * argument and the number of stored entries goes into apqn_entries. If the list * is empty (apqn_entries is 0) the apqn_entries field is updated to the number diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index f9e5e1f0821d..5a2177e96e88 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -161,17 +161,17 @@ struct ica_xcRB { * @payload_len: Payload length */ struct ep11_cprb { - __u16 cprb_len; - unsigned char cprb_ver_id; - unsigned char pad_000[2]; - unsigned char flags; - unsigned char func_id[2]; - __u32 source_id; - __u32 target_id; - __u32 ret_code; - __u32 reserved1; - __u32 reserved2; - __u32 payload_len; + __u16 cprb_len; + __u8 cprb_ver_id; + __u8 pad_000[2]; + __u8 flags; + __u8 func_id[2]; + __u32 source_id; + __u32 target_id; + __u32 ret_code; + __u32 reserved1; + __u32 reserved2; + __u32 payload_len; } __attribute__((packed)); /** @@ -197,13 +197,13 @@ struct ep11_target_dev { */ struct ep11_urb { __u16 targets_num; - __u64 targets; + __u8 __user *targets; __u64 weight; __u64 req_no; __u64 req_len; - __u64 req; + __u8 __user *req; __u64 resp_len; - __u64 resp; + __u8 __user *resp; } __attribute__((packed)); /** @@ -237,7 +237,9 @@ struct zcrypt_device_matrix_ext { struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT]; }; -#define AUTOSELECT 0xFFFFFFFF +#define AUTOSELECT 0xFFFFFFFF +#define AUTOSEL_AP ((__u16) 0xFFFF) +#define AUTOSEL_DOM ((__u16) 0xFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 1bb85f60c0dd..4cd9b1ada834 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) #endif } -static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) -{ -#ifdef CONFIG_KPROBES - if (insn->opc == BREAKPOINT_INSTRUCTION) - return 1; -#endif - return 0; -} - static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) { #ifdef CONFIG_KPROBES @@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, /* Initial code replacement */ ftrace_generate_orig_insn(&orig); ftrace_generate_nop_insn(&new); - } else if (is_kprobe_on_ftrace(&old)) { - /* - * If we find a breakpoint instruction, a kprobe has been - * placed at the beginning of the function. We write the - * constant KPROBE_ON_FTRACE_NOP into the remaining four - * bytes of the original instruction so that the kprobes - * handler can execute a nop, if it reaches this breakpoint. - */ - ftrace_generate_kprobe_call_insn(&orig); - ftrace_generate_kprobe_nop_insn(&new); } else { /* Replace ftrace call with a nop. */ ftrace_generate_call_insn(&orig, rec->ip); @@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - if (is_kprobe_on_ftrace(&old)) { - /* - * If we find a breakpoint instruction, a kprobe has been - * placed at the beginning of the function. We write the - * constant KPROBE_ON_FTRACE_CALL into the remaining four - * bytes of the original instruction so that the kprobes - * handler can execute a brasl if it reaches this breakpoint. - */ - ftrace_generate_kprobe_nop_insn(&orig); - ftrace_generate_kprobe_call_insn(&new); - } else { - /* Replace nop with an ftrace call. */ - ftrace_generate_nop_insn(&orig); - ftrace_generate_call_insn(&new, rec->ip); - } + /* Replace nop with an ftrace call. */ + ftrace_generate_nop_insn(&orig); + ftrace_generate_call_insn(&new, rec->ip); + /* Verify that the to be replaced code matches what we expect. */ if (memcmp(&orig, &old, sizeof(old))) return -EINVAL; @@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_KPROBES_ON_FTRACE +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb; + struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); + + if (unlikely(!p) || kprobe_disabled(p)) + return; + + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + return; + } + + __this_cpu_write(current_kprobe, p); + + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + instruction_pointer_set(regs, ip); + + if (!p->pre_handler || !p->pre_handler(p, regs)) { + + instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); + + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + return 0; +} +#endif diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 6f1388391620..548d0ea9808d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = { static void copy_instruction(struct kprobe *p) { - unsigned long ip = (unsigned long) p->addr; s64 disp, new_disp; u64 addr, new_addr; - if (ftrace_location(ip) == ip) { - /* - * If kprobes patches the instruction that is morphed by - * ftrace make sure that kprobes always sees the branch - * "jg .+24" that skips the mcount block or the "brcl 0,0" - * in case of hotpatch. - */ - ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); - p->ainsn.is_ftrace_insn = 1; - } else - memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); + memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); p->opcode = p->ainsn.insn[0]; if (!probe_is_insn_relative_long(p->ainsn.insn)) return; @@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p) } NOKPROBE_SYMBOL(arch_prepare_kprobe); -int arch_check_ftrace_location(struct kprobe *p) -{ - return 0; -} - struct swap_insn_args { struct kprobe *p; unsigned int arm_kprobe : 1; @@ -149,28 +133,11 @@ struct swap_insn_args { static int swap_instruction(void *data) { struct swap_insn_args *args = data; - struct ftrace_insn new_insn, *insn; struct kprobe *p = args->p; - size_t len; - - new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; - len = sizeof(new_insn.opc); - if (!p->ainsn.is_ftrace_insn) - goto skip_ftrace; - len = sizeof(new_insn); - insn = (struct ftrace_insn *) p->addr; - if (args->arm_kprobe) { - if (is_ftrace_nop(insn)) - new_insn.disp = KPROBE_ON_FTRACE_NOP; - else - new_insn.disp = KPROBE_ON_FTRACE_CALL; - } else { - ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); - if (insn->disp == KPROBE_ON_FTRACE_NOP) - ftrace_generate_nop_insn(&new_insn); - } -skip_ftrace: - s390_kernel_write(p->addr, &new_insn, len); + u16 opc; + + opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; + s390_kernel_write(p->addr, &opc, sizeof(opc)); return 0; } NOKPROBE_SYMBOL(swap_instruction); @@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) unsigned long ip = regs->psw.addr; int fixup = probe_get_fixup_type(p->ainsn.insn); - /* Check if the kprobes location is an enabled ftrace caller */ - if (p->ainsn.is_ftrace_insn) { - struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; - struct ftrace_insn call_insn; - - ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); - /* - * A kprobe on an enabled ftrace call site actually single - * stepped an unconditional branch (ftrace nop equivalent). - * Now we need to fixup things and pretend that a brasl r0,... - * was executed instead. - */ - if (insn->disp == KPROBE_ON_FTRACE_CALL) { - ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; - regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); - } - } - if (fixup & FIXUP_PSW_NORMAL) ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index f942341429b1..7458dcfd6464 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -42,6 +42,9 @@ ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller + lghi %r14,0 # save condition code + ipm %r14 # don't put any instructions + sllg %r14,%r14,16 # clobbering CC before this point lgr %r1,%r15 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP @@ -54,6 +57,9 @@ ENTRY(ftrace_caller) # allocate pt_regs and stack frame for ftrace_trace_function aghi %r15,-STACK_FRAME_SIZE stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) + stg %r14,(STACK_PTREGS_PSW)(%r15) + lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address + stosm (STACK_PTREGS_PSW)(%r15),0 aghi %r1,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index b0246c705a19..5674710a4841 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -2,7 +2,7 @@ /* * IBM System z Huge TLB Page Support for Kernel. * - * Copyright IBM Corp. 2007,2016 + * Copyright IBM Corp. 2007,2020 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> */ @@ -11,6 +11,9 @@ #include <linux/mm.h> #include <linux/hugetlb.h> +#include <linux/mman.h> +#include <linux/sched/mm.h> +#include <linux/security.h> /* * If the bit selected by single-bit bitmask "a" is set within "x", move @@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); + +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int rc; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + goto check_asce_limit; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma))) + goto check_asce_limit; + } + + if (mm->get_unmapped_area == arch_get_unmapped_area) + addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + addr = hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); + if (addr & ~PAGE_MASK) + return addr; + +check_asce_limit: + if (addr + len > current->mm->context.asce_limit && + addr + len <= TASK_SIZE) { + rc = crst_table_upgrade(mm, addr + len); + if (rc) + return (unsigned long) rc; + } + return addr; +} |