diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-14 16:22:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-14 16:22:26 -0800 |
commit | 586592478b1fa8bb8cd6875a9191468e9b1a8b13 (patch) | |
tree | b93ea8074b11ffa3c10ec4c0ed910479a6564bb0 /arch/s390/lib | |
parent | 0b03beface02d519693edb8020f9811c67d5c88f (diff) | |
parent | 343dbdb7cb8997a2cb0fd804d6563b8a6de8d49b (diff) |
Merge tag 's390-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens:
- Add support for the hugetlb_cma command line option to allocate
gigantic hugepages using CMA
- Add arch_get_random_long() support.
- Add ap bus userspace notifications.
- Increase default size of vmalloc area to 512GB and otherwise let it
increase dynamically by the size of physical memory. This should fix
all occurrences where the vmalloc area was not large enough.
- Completely get rid of set_fs() (aka select SET_FS) and rework address
space handling while doing that; making address space handling much
more simple.
- Reimplement getcpu vdso syscall in C.
- Add support for extended SCLP responses (> 4k). This allows e.g. to
handle also potential large system configurations.
- Simplify KASAN by removing 3-level page table support and only
supporting 4-levels from now on.
- Improve debug-ability of the kernel decompressor code, which now
prints also stack traces and symbols in case of problems to the
console.
- Remove more power management leftovers.
- Other various fixes and improvements all over the place.
* tag 's390-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (62 commits)
s390/mm: add support to allocate gigantic hugepages using CMA
s390/crypto: add arch_get_random_long() support
s390/smp: perform initial CPU reset also for SMT siblings
s390/mm: use invalid asce for user space when switching to init_mm
s390/idle: fix accounting with machine checks
s390/idle: add missing mt_cycles calculation
s390/boot: add build-id to decompressor
s390/kexec_file: fix diag308 subcode when loading crash kernel
s390/cio: fix use-after-free in ccw_device_destroy_console
s390/cio: remove pm support from ccw bus driver
s390/cio: remove pm support from css-bus driver
s390/cio: remove pm support from IO subchannel drivers
s390/cio: remove pm support from chsc subchannel driver
s390/vmur: remove unused pm related functions
s390/tape: remove unsupported PM functions
s390/cio: remove pm support from eadm-sch drivers
s390: remove pm support from console drivers
s390/dasd: remove unused pm related functions
s390/zfcp: remove pm support from zfcp driver
s390/ap: let bus_register() add the AP bus sysfs attributes
...
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/delay.c | 13 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.c | 105 |
2 files changed, 34 insertions, 84 deletions
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 8c0c68e7770e..68d61f2835df 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -13,11 +13,19 @@ #include <linux/export.h> #include <linux/irqflags.h> #include <linux/interrupt.h> +#include <linux/jump_label.h> #include <linux/irq.h> #include <asm/vtimer.h> #include <asm/div64.h> #include <asm/idle.h> +static DEFINE_STATIC_KEY_FALSE(udelay_ready); + +void __init udelay_enable(void) +{ + static_branch_enable(&udelay_ready); +} + void __delay(unsigned long loops) { /* @@ -76,6 +84,11 @@ void __udelay(unsigned long long usecs) { unsigned long flags; + if (!static_branch_likely(&udelay_ready)) { + udelay_simple(usecs); + return; + } + preempt_disable(); local_irq_save(flags); if (in_irq()) { diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 0267405ab7c6..e8f642446fed 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -16,6 +16,22 @@ #include <asm/mmu_context.h> #include <asm/facility.h> +#ifdef CONFIG_DEBUG_USER_ASCE +void debug_user_asce(void) +{ + unsigned long cr1, cr7; + + __ctl_store(cr1, 1, 1); + __ctl_store(cr7, 7, 7); + if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce) + return; + panic("incorrect ASCE on kernel exit\n" + "cr1: %016lx cr7: %016lx\n" + "kernel: %016llx user: %016llx\n", + cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce); +} +#endif /*CONFIG_DEBUG_USER_ASCE */ + #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES static DEFINE_STATIC_KEY_FALSE(have_mvcos); @@ -40,71 +56,10 @@ static inline int copy_with_mvcos(void) } #endif -void set_fs(mm_segment_t fs) -{ - current->thread.mm_segment = fs; - if (fs == USER_DS) { - __ctl_load(S390_lowcore.user_asce, 1, 1); - clear_cpu_flag(CIF_ASCE_PRIMARY); - } else { - __ctl_load(S390_lowcore.kernel_asce, 1, 1); - set_cpu_flag(CIF_ASCE_PRIMARY); - } - if (fs & 1) { - if (fs == USER_DS_SACF) - __ctl_load(S390_lowcore.user_asce, 7, 7); - else - __ctl_load(S390_lowcore.kernel_asce, 7, 7); - set_cpu_flag(CIF_ASCE_SECONDARY); - } -} -EXPORT_SYMBOL(set_fs); - -mm_segment_t enable_sacf_uaccess(void) -{ - mm_segment_t old_fs; - unsigned long asce, cr; - unsigned long flags; - - old_fs = current->thread.mm_segment; - if (old_fs & 1) - return old_fs; - /* protect against a concurrent page table upgrade */ - local_irq_save(flags); - current->thread.mm_segment |= 1; - asce = S390_lowcore.kernel_asce; - if (likely(old_fs == USER_DS)) { - __ctl_store(cr, 1, 1); - if (cr != S390_lowcore.kernel_asce) { - __ctl_load(S390_lowcore.kernel_asce, 1, 1); - set_cpu_flag(CIF_ASCE_PRIMARY); - } - asce = S390_lowcore.user_asce; - } - __ctl_store(cr, 7, 7); - if (cr != asce) { - __ctl_load(asce, 7, 7); - set_cpu_flag(CIF_ASCE_SECONDARY); - } - local_irq_restore(flags); - return old_fs; -} -EXPORT_SYMBOL(enable_sacf_uaccess); - -void disable_sacf_uaccess(mm_segment_t old_fs) -{ - current->thread.mm_segment = old_fs; - if (old_fs == USER_DS && test_facility(27)) { - __ctl_load(S390_lowcore.user_asce, 1, 1); - clear_cpu_flag(CIF_ASCE_PRIMARY); - } -} -EXPORT_SYMBOL(disable_sacf_uaccess); - static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, unsigned long size) { - register unsigned long reg0 asm("0") = 0x01UL; + register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; @@ -135,9 +90,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, unsigned long size) { unsigned long tmp1, tmp2; - mm_segment_t old_fs; - old_fs = enable_sacf_uaccess(); tmp1 = -256UL; asm volatile( " sacf 0\n" @@ -164,7 +117,6 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); - disable_sacf_uaccess(old_fs); return size; } @@ -179,7 +131,7 @@ EXPORT_SYMBOL(raw_copy_from_user); static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, unsigned long size) { - register unsigned long reg0 asm("0") = 0x010000UL; + register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; @@ -210,9 +162,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, unsigned long size) { unsigned long tmp1, tmp2; - mm_segment_t old_fs; - old_fs = enable_sacf_uaccess(); tmp1 = -256UL; asm volatile( " sacf 0\n" @@ -239,7 +189,6 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); - disable_sacf_uaccess(old_fs); return size; } @@ -254,7 +203,7 @@ EXPORT_SYMBOL(raw_copy_to_user); static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, unsigned long size) { - register unsigned long reg0 asm("0") = 0x010001UL; + register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; @@ -277,10 +226,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, unsigned long size) { - mm_segment_t old_fs; unsigned long tmp1; - old_fs = enable_sacf_uaccess(); asm volatile( " sacf 256\n" " aghi %0,-1\n" @@ -304,7 +251,6 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) : : "cc", "memory"); - disable_sacf_uaccess(old_fs); return size; } @@ -318,7 +264,7 @@ EXPORT_SYMBOL(raw_copy_in_user); static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) { - register unsigned long reg0 asm("0") = 0x010000UL; + register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; @@ -346,10 +292,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size static inline unsigned long clear_user_xc(void __user *to, unsigned long size) { - mm_segment_t old_fs; unsigned long tmp1, tmp2; - old_fs = enable_sacf_uaccess(); asm volatile( " sacf 256\n" " aghi %0,-1\n" @@ -378,7 +322,6 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size) EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) : : "cc", "memory"); - disable_sacf_uaccess(old_fs); return size; } @@ -414,15 +357,9 @@ static inline unsigned long strnlen_user_srst(const char __user *src, unsigned long __strnlen_user(const char __user *src, unsigned long size) { - mm_segment_t old_fs; - unsigned long len; - if (unlikely(!size)) return 0; - old_fs = enable_sacf_uaccess(); - len = strnlen_user_srst(src, size); - disable_sacf_uaccess(old_fs); - return len; + return strnlen_user_srst(src, size); } EXPORT_SYMBOL(__strnlen_user); |