diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-27 16:20:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-27 16:20:17 -0800 |
commit | 7981164791d18d5ed1dcdfa9598949ed158a5333 (patch) | |
tree | 6565e7406dd55eb5014efd3e54109159a47cb10e | |
parent | f1dd6ad599732fc89f36fdd65a2c2cf3c63a8711 (diff) | |
parent | a8d6356cdabf4495aaae7d3e89eb058b1909761c (diff) |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (35 commits)
[S390] time: remove unused code
[S390] zcore: Add prefix registers to dump header
[S390] correct vdso version string
[S390] add support for compressed kernels
[S390] Define new s390 ELF note sections in elf.h
[S390] codepage conversion of kernel parameter line
[S390] seq_file: convert drivers/s390/
[S390] add z9-ec/z10 instruction to kernel disassembler
[S390] dasd: correct offline processing
[S390] dasd: fix refcounting.
[S390] dasd: fix online/offline race
[S390] use kprobes_built_in() in mm/fault code
[S390] bug: use relative pointers in bug table entries
[S390] Cleanup struct _lowcore usage and defines.
[S390] free_initmem: reduce code duplication
[S390] Replace ENOTSUPP usage with EOPNOTSUPP
[S390] spinlock: check virtual cpu running status
[S390] sysinfo: fix SYSIB 3,2,2 structure
[S390] add MACHINE_IS_LPAR flag
[S390] qdio: optimize cache line usage of struct qdio_irq
...
95 files changed, 2207 insertions, 1501 deletions
diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO index 339207d11d95..d378cba66456 100644 --- a/Documentation/s390/CommonIO +++ b/Documentation/s390/CommonIO @@ -87,6 +87,12 @@ Command line parameters compatibility, by the device number in hexadecimal (0xabcd or abcd). Device numbers given as 0xabcd will be interpreted as 0.0.abcd. +* /proc/cio_settle + + A write request to this file is blocked until all queued cio actions are + handled. This will allow userspace to wait for pending work affecting + device availability after changing cio_ignore or the hardware configuration. + * For some of the information present in the /proc filesystem in 2.4 (namely, /proc/subchannels and /proc/chpids), see driver-model.txt. Information formerly in /proc/irq_count is now in /proc/interrupts. diff --git a/Documentation/s390/driver-model.txt b/Documentation/s390/driver-model.txt index bde473df748d..ed265cf54cde 100644 --- a/Documentation/s390/driver-model.txt +++ b/Documentation/s390/driver-model.txt @@ -223,8 +223,8 @@ touched by the driver - it should use the ccwgroup device's driver_data for its private data. To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in -mind that most drivers will need to implement both a ccwgroup and a ccw driver -(unless you have a meta ccw driver, like cu3088 for lcs and ctc). +mind that most drivers will need to implement both a ccwgroup and a ccw +driver. 2. Channel paths diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c80235206c01..19deda8d8875 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -54,6 +54,9 @@ config GENERIC_BUG depends on BUG default y +config GENERIC_BUG_RELATIVE_POINTERS + def_bool y + config NO_IOMEM def_bool y @@ -95,6 +98,9 @@ config S390 select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE select HAVE_PERF_EVENTS + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_LZMA select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 2283933a9a93..45e0c6199f36 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" +config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict user copy size checks" + ---help--- + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time warnings. + + The copy_from_user() etc checks are there to help test if there + are sufficient security checks on the length argument of + the copy operation, by having gcc prove that the argument is + within bounds. + + If unsure, or if you run an older (pre 4.4) gcc, say N. + endmenu diff --git a/arch/s390/Makefile b/arch/s390/Makefile index fc8fb20e7fc0..0da10746e0e5 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -14,6 +14,7 @@ # ifndef CONFIG_64BIT +LD_BFD := elf32-s390 LDFLAGS := -m elf_s390 KBUILD_CFLAGS += -m31 KBUILD_AFLAGS += -m31 @@ -21,6 +22,7 @@ UTS_MACHINE := s390 STACK_SIZE := 8192 CHECKFLAGS += -D__s390__ -msize-long else +LD_BFD := elf64-s390 LDFLAGS := -m elf64_s390 MODFLAGS += -fpic -D__PIC__ KBUILD_CFLAGS += -m64 @@ -30,6 +32,8 @@ STACK_SIZE := 16384 CHECKFLAGS += -D__s390__ -D__s390x__ endif +export LD_BFD + cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) @@ -85,7 +89,9 @@ KBUILD_AFLAGS += $(aflags-y) OBJCOPYFLAGS := -O binary LDFLAGS_vmlinux := -e start -head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o +head-y := arch/s390/kernel/head.o +head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) +head-y += arch/s390/kernel/init_task.o core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ @@ -99,12 +105,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/ boot := arch/s390/boot -all: image +all: image bzImage install: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ -image: vmlinux +image bzImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ zfcpdump: @@ -116,4 +122,5 @@ archclean: # Don't use tabs in echo arguments define archhelp echo '* image - Kernel image for IPL ($(boot)/image)' + echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)' endef diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 4d97eef36b8d..8800cf090694 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -9,10 +9,18 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \ EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. targets := image +targets += bzImage +subdir- := compressed $(obj)/image: vmlinux FORCE $(call if_changed,objcopy) +$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/compressed/vmlinux: FORCE + $(Q)$(MAKE) $(build)=$(obj)/compressed $@ + install: $(CONFIGURE) $(obj)/image sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ System.map Kerntypes "$(INSTALL_PATH)" diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile new file mode 100644 index 000000000000..6e4a67ad07e1 --- /dev/null +++ b/arch/s390/boot/compressed/Makefile @@ -0,0 +1,60 @@ +# +# linux/arch/s390/boot/compressed/Makefile +# +# create a compressed vmlinux image from the original vmlinux +# + +BITS := $(if $(CONFIG_64BIT),64,31) + +targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ + vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o + +KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 +KBUILD_CFLAGS += $(cflags-y) +KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) +KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + +GCOV_PROFILE := n + +OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) +OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o + +LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T +$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) + $(call if_changed,ld) + @: + +sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p' + +quiet_cmd_sizes = GEN $@ + cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@ + +$(obj)/sizes.h: vmlinux + $(call if_changed,sizes) + +AFLAGS_head$(BITS).o += -I$(obj) +$(obj)/head$(BITS).o: $(obj)/sizes.h + +CFLAGS_misc.o += -I$(obj) +$(obj)/misc.o: $(obj)/sizes.h + +OBJCOPYFLAGS_vmlinux.bin := -R .comment -S +$(obj)/vmlinux.bin: vmlinux + $(call if_changed,objcopy) + +vmlinux.bin.all-y := $(obj)/vmlinux.bin + +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_LZMA) := lzma + +$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) + $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) + $(call if_changed,lzma) + +LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) + $(call if_changed,ld) diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S new file mode 100644 index 000000000000..2a5523a32bcc --- /dev/null +++ b/arch/s390/boot/compressed/head31.S @@ -0,0 +1,51 @@ +/* + * Startup glue code to uncompress the kernel + * + * Copyright IBM Corp. 2010 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/init.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/page.h> +#include "sizes.h" + +__HEAD + .globl startup_continue +startup_continue: + basr %r13,0 # get base +.LPG1: + # setup stack + l %r15,.Lstack-.LPG1(%r13) + ahi %r15,-96 + l %r1,.Ldecompress-.LPG1(%r13) + basr %r14,%r1 + # setup registers for memory mover & branch to target + lr %r4,%r2 + l %r2,.Loffset-.LPG1(%r13) + la %r4,0(%r2,%r4) + l %r3,.Lmvsize-.LPG1(%r13) + lr %r5,%r3 + # move the memory mover someplace safe + la %r1,0x200 + mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) + # decompress image is started at 0x11000 + lr %r6,%r2 + br %r1 +mover: + mvcle %r2,%r4,0 + jo mover + br %r6 +mover_end: + + .align 8 +.Lstack: + .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) +.Ldecompress: + .long decompress_kernel +.Loffset: + .long 0x11000 +.Lmvsize: + .long SZ__bss_start diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S new file mode 100644 index 000000000000..2982cb140550 --- /dev/null +++ b/arch/s390/boot/compressed/head64.S @@ -0,0 +1,48 @@ +/* + * Startup glue code to uncompress the kernel + * + * Copyright IBM Corp. 2010 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/init.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/page.h> +#include "sizes.h" + +__HEAD + .globl startup_continue +startup_continue: + basr %r13,0 # get base +.LPG1: + # setup stack + lg %r15,.Lstack-.LPG1(%r13) + aghi %r15,-160 + brasl %r14,decompress_kernel + # setup registers for memory mover & branch to target + lgr %r4,%r2 + lg %r2,.Loffset-.LPG1(%r13) + la %r4,0(%r2,%r4) + lg %r3,.Lmvsize-.LPG1(%r13) + lgr %r5,%r3 + # move the memory mover someplace safe + la %r1,0x200 + mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) + # decompress image is started at 0x11000 + lgr %r6,%r2 + br %r1 +mover: + mvcle %r2,%r4,0 + jo mover + br %r6 +mover_end: + + .align 8 +.Lstack: + .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) +.Loffset: + .quad 0x11000 +.Lmvsize: + .quad SZ__bss_start diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c new file mode 100644 index 000000000000..a97d69525829 --- /dev/null +++ b/arch/s390/boot/compressed/misc.c @@ -0,0 +1,158 @@ +/* + * Definitions and wrapper functions for kernel decompressor + * + * Copyright IBM Corp. 2010 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <asm/uaccess.h> +#include <asm/page.h> +#include <asm/ipl.h> +#include "sizes.h" + +/* + * gzip declarations + */ +#define STATIC static + +#undef memset +#undef memcpy +#undef memmove +#define memzero(s, n) memset((s), 0, (n)) + +/* Symbols defined by linker scripts */ +extern char input_data[]; +extern int input_len; +extern int _text; +extern int _end; + +static void error(char *m); + +static unsigned long free_mem_ptr; +static unsigned long free_mem_end_ptr; + +#ifdef CONFIG_HAVE_KERNEL_BZIP2 +#define HEAP_SIZE 0x400000 +#else +#define HEAP_SIZE 0x10000 +#endif + +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + +extern _sclp_print_early(const char *); + +int puts(const char *s) +{ + _sclp_print_early(s); + return 0; +} + +void *memset(void *s, int c, size_t n) +{ + char *xs; + + if (c == 0) + return __builtin_memset(s, 0, n); + + xs = (char *) s; + if (n > 0) + do { + *xs++ = c; + } while (--n > 0); + return s; +} + +void *memcpy(void *__dest, __const void *__src, size_t __n) +{ + return __builtin_memcpy(__dest, __src, __n); +} + +void *memmove(void *__dest, __const void *__src, size_t __n) +{ + char *d; + const char *s; + + if (__dest <= __src) + return __builtin_memcpy(__dest, __src, __n); + d = __dest + __n; + s = __src + __n; + while (__n--) + *--d = *--s; + return __dest; +} + +static void error(char *x) +{ + unsigned long long psw = 0x000a0000deadbeefULL; + + puts("\n\n"); + puts(x); + puts("\n\n -- System halted"); + + asm volatile("lpsw %0" : : "Q" (psw)); +} + +/* + * Safe guard the ipl parameter block against a memory area that will be + * overwritten. The validity check for the ipl parameter block is complex + * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to + * the ipl parameter block intersects with the passed memory area we can + * safely assume that we can read from that memory. In that case just copy + * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter + * block. + */ +static void check_ipl_parmblock(void *start, unsigned long size) +{ + void *src, *dst; + + src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr; + if (src + PAGE_SIZE <= start || src >= start + size) + return; + dst = (void *) IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; +} + +unsigned long decompress_kernel(void) +{ + unsigned long output_addr; + unsigned char *output; + + free_mem_ptr = (unsigned long)&_end; + free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; + output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); + + check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); + +#ifdef CONFIG_BLK_DEV_INITRD + /* + * Move the initrd right behind the end of the decompressed + * kernel image. + */ + if (INITRD_START && INITRD_SIZE && + INITRD_START < (unsigned long) output + SZ__bss_start) { + check_ipl_parmblock(output + SZ__bss_start, + INITRD_START + INITRD_SIZE); + memmove(output + SZ__bss_start, + (void *) INITRD_START, INITRD_SIZE); + INITRD_START = (unsigned long) output + SZ__bss_start; + } +#endif + + puts("Uncompressing Linux... "); + decompress(input_data, input_len, NULL, NULL, output, NULL, error); + puts("Ok, booting the kernel.\n"); + return (unsigned long) output; +} + diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S new file mode 100644 index 000000000000..d80f79d8dd9c --- /dev/null +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -0,0 +1,55 @@ +#include <asm-generic/vmlinux.lds.h> + +#ifdef CONFIG_64BIT +OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") +OUTPUT_ARCH(s390:64-bit) +#else +OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") +OUTPUT_ARCH(s390) +#endif + +ENTRY(startup) + +SECTIONS +{ + /* Be careful parts of head_64.S assume startup_32 is at + * address 0. + */ + . = 0; + .head.text : { + _head = . ; + HEAD_TEXT + _ehead = . ; + } + .rodata.compressed : { + *(.rodata.compressed) + } + .text : { + _text = .; /* Text */ + *(.text) + *(.text.*) + _etext = . ; + } + .rodata : { + _rodata = . ; + *(.rodata) /* read-only data */ + *(.rodata.*) + _erodata = . ; + } + .data : { + _data = . ; + *(.data) + *(.data.*) + _edata = . ; + } + . = ALIGN(256); + .bss : { + _bss = . ; + *(.bss) + *(.bss.*) + *(COMMON) + . = ALIGN(8); /* For convenience during zeroing */ + _ebss = .; + } + _end = .; +} diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr new file mode 100644 index 000000000000..f02382ae5c48 --- /dev/null +++ b/arch/s390/boot/compressed/vmlinux.scr @@ -0,0 +1,10 @@ +SECTIONS +{ + .rodata.compressed : { + input_len = .; + LONG(input_data_end - input_data) input_data = .; + *(.data) + output_len = . - 4; + input_data_end = .; + } +} diff --git a/arch/s390/defconfig b/arch/s390/defconfig index b416aa11b91e..7ae71cc56973 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -36,6 +36,13 @@ CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_LZO is not set CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 2b92d501425f..87cf523192e9 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -488,7 +488,7 @@ out: static int diag224(void *ptr) { - int rc = -ENOTSUPP; + int rc = -EOPNOTSUPP; asm volatile( " diag %1,%2,0x224\n" @@ -507,7 +507,7 @@ static int diag224_get_name_table(void) return -ENOMEM; if (diag224(diag224_cpu_names)) { kfree(diag224_cpu_names); - return -ENOTSUPP; + return -EOPNOTSUPP; } EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); return 0; diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 2a113d6a7dfd..451bfbb9db3d 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -18,8 +18,6 @@ #define ATOMIC_INIT(i) { (i) } -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - #define __CS_LOOP(ptr, op_val, op_string) ({ \ int old_val, new_val; \ asm volatile( \ @@ -35,26 +33,6 @@ new_val; \ }) -#else /* __GNUC__ */ - -#define __CS_LOOP(ptr, op_val, op_string) ({ \ - int old_val, new_val; \ - asm volatile( \ - " l %0,0(%3)\n" \ - "0: lr %1,%0\n" \ - op_string " %1,%4\n" \ - " cs %0,%1,0(%3)\n" \ - " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=m" (((atomic_t *)(ptr))->counter) \ - : "a" (ptr), "d" (op_val), \ - "m" (((atomic_t *)(ptr))->counter) \ - : "cc", "memory"); \ - new_val; \ -}) - -#endif /* __GNUC__ */ - static inline int atomic_read(const atomic_t *v) { barrier(); @@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) asm volatile( " cs %0,%2,%1" : "+d" (old), "=Q" (v->counter) : "d" (new), "Q" (v->counter) : "cc", "memory"); -#else /* __GNUC__ */ - asm volatile( - " cs %0,%3,0(%2)" - : "+d" (old), "=m" (v->counter) - : "a" (v), "d" (new), "m" (v->counter) - : "cc", "memory"); -#endif /* __GNUC__ */ return old; } @@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - #define __CSG_LOOP(ptr, op_val, op_string) ({ \ long long old_val, new_val; \ asm volatile( \ @@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) new_val; \ }) -#else /* __GNUC__ */ - -#define __CSG_LOOP(ptr, op_val, op_string) ({ \ - long long old_val, new_val; \ - asm volatile( \ - " lg %0,0(%3)\n" \ - "0: lgr %1,%0\n" \ - op_string " %1,%4\n" \ - " csg %0,%1,0(%3)\n" \ - " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=m" (((atomic_t *)(ptr))->counter) \ - : "a" (ptr), "d" (op_val), \ - "m" (((atomic_t *)(ptr))->counter) \ - : "cc", "memory"); \ - new_val; \ -}) - -#endif /* __GNUC__ */ - static inline long long atomic64_read(const atomic64_t *v) { barrier(); @@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) static inline long long atomic64_cmpxchg(atomic64_t *v, long long old, long long new) { -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) asm volatile( " csg %0,%2,%1" : "+d" (old), "=Q" (v->counter) : "d" (new), "Q" (v->counter) : "cc", "memory"); -#else /* __GNUC__ */ - asm volatile( - " csg %0,%3,0(%2)" - : "+d" (old), "=m" (v->counter) - : "a" (v), "d" (new), "m" (v->counter) - : "cc", "memory"); -#endif /* __GNUC__ */ return old; } @@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v) register_pair rp; asm volatile( - " lm %0,%N0,0(%1)" - : "=&d" (rp) - : "a" (&v->counter), "m" (v->counter) - ); + " lm %0,%N0,%1" + : "=&d" (rp) : "Q" (v->counter) ); return rp.pair; } @@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i) register_pair rp = {.pair = i}; asm volatile( - " stm %1,%N1,0(%2)" - : "=m" (v->counter) - : "d" (rp), "a" (&v->counter) - ); + " stm %1,%N1,%0" + : "=Q" (v->counter) : "d" (rp) ); } static inline long long atomic64_xchg(atomic64_t *v, long long new) @@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) register_pair rp_old; asm volatile( - " lm %0,%N0,0(%2)\n" - "0: cds %0,%3,0(%2)\n" + " lm %0,%N0,%1\n" + "0: cds %0,%2,%1\n" " jl 0b\n" - : "=&d" (rp_old), "+m" (v->counter) - : "a" (&v->counter), "d" (rp_new) + : "=&d" (rp_old), "=Q" (v->counter) + : "d" (rp_new), "Q" (v->counter) : "cc"); return rp_old.pair; } @@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, register_pair rp_new = {.pair = new}; asm volatile( - " cds %0,%3,0(%2)" - : "+&d" (rp_old), "+m" (v->counter) - : "a" (&v->counter), "d" (rp_new) + " cds %0,%2,%1" + : "+&d" (rp_old), "=Q" (v->counter) + : "d" (rp_new), "Q" (v->counter) : "cc"); return rp_old.pair; } diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index b30606f6d523..2e05972c5085 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -71,8 +71,6 @@ extern const char _sb_findmap[]; #define __BITOPS_AND "nr" #define __BITOPS_XOR "xr" -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ asm volatile( \ " l %0,%2\n" \ @@ -85,22 +83,6 @@ extern const char _sb_findmap[]; : "d" (__val), "Q" (*(unsigned long *) __addr) \ : "cc"); -#else /* __GNUC__ */ - -#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ - asm volatile( \ - " l %0,0(%4)\n" \ - "0: lr %1,%0\n" \ - __op_string " %1,%3\n" \ - " cs %0,%1,0(%4)\n" \ - " jl 0b" \ - : "=&d" (__old), "=&d" (__new), \ - "=m" (*(unsigned long *) __addr) \ - : "d" (__val), "a" (__addr), \ - "m" (*(unsigned long *) __addr) : "cc"); - -#endif /* __GNUC__ */ - #else /* __s390x__ */ #define __BITOPS_ALIGN 7 @@ -109,8 +91,6 @@ extern const char _sb_findmap[]; #define __BITOPS_AND "ngr" #define __BITOPS_XOR "xgr" -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ asm volatile( \ " lg %0,%2\n" \ @@ -123,23 +103,6 @@ extern const char _sb_findmap[]; : "d" (__val), "Q" (*(unsigned long *) __addr) \ : "cc"); -#else /* __GNUC__ */ - -#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ - asm volatile( \ - " lg %0,0(%4)\n" \ - "0: lgr %1,%0\n" \ - __op_string " %1,%3\n" \ - " csg %0,%1,0(%4)\n" \ - " jl 0b" \ - : "=&d" (__old), "=&d" (__new), \ - "=m" (*(unsigned long *) __addr) \ - : "d" (__val), "a" (__addr), \ - "m" (*(unsigned long *) __addr) : "cc"); - - -#endif /* __GNUC__ */ - #endif /* __s390x__ */ #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) @@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); asm volatile( - " oc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) : "a" (addr), - "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); + " oc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); } static inline void @@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); asm volatile( - " nc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) : "a" (addr), - "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); + " nc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); } static inline void @@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); asm volatile( - " xc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) : "a" (addr), - "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); + " xc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); } static inline void @@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( - " oc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), - "m" (*(char *) addr) : "cc", "memory"); + " oc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) + : "cc", "memory"); return (ch >> (nr & 7)) & 1; } #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) @@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( - " nc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) - : "a" (addr), "a" (_ni_bitmap + (nr & 7)), - "m" (*(char *) addr) : "cc", "memory"); + " nc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) + : "cc", "memory"); return (ch >> (nr & 7)) & 1; } #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) @@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( - " xc 0(1,%1),0(%2)" - : "=m" (*(char *) addr) - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), - "m" (*(char *) addr) : "cc", "memory"); + " xc %O0(1,%R0),%1" + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) + : "cc", "memory"); return (ch >> (nr & 7)) & 1; } #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) @@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p, p = (unsigned long *)((unsigned long) p + offset); #ifndef __s390x__ asm volatile( - " ic %0,0(%1)\n" - " icm %0,2,1(%1)\n" - " icm %0,4,2(%1)\n" - " icm %0,8,3(%1)" - : "=&d" (word) : "a" (p), "m" (*p) : "cc"); + " ic %0,%O1(%R1)\n" + " icm %0,2,%O1+1(%R1)\n" + " icm %0,4,%O1+2(%R1)\n" + " icm %0,8,%O1+3(%R1)" + : "=&d" (word) : "Q" (*p) : "cc"); #else asm volatile( " lrvg %0,%1" diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index b1066b9fb5f8..9beeb9db9b23 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -5,12 +5,6 @@ #ifdef CONFIG_BUG -#ifdef CONFIG_64BIT -#define S390_LONG ".quad" -#else -#define S390_LONG ".long" -#endif - #ifdef CONFIG_DEBUG_BUGVERBOSE #define __EMIT_BUG(x) do { \ @@ -21,7 +15,7 @@ "2: .asciz \""__FILE__"\"\n" \ ".previous\n" \ ".section __bug_table,\"a\"\n" \ - "3:\t" S390_LONG "\t1b,2b\n" \ + "3: .long 1b-3b,2b-3b\n" \ " .short %0,%1\n" \ " .org 3b+%2\n" \ ".previous\n" \ @@ -37,7 +31,7 @@ "0: j 0b+2\n" \ "1:\n" \ ".section __bug_table,\"a\"\n" \ - "2:\t" S390_LONG "\t1b\n" \ + "2: .long 1b-2b\n" \ " .short %0\n" \ " .org 2b+%1\n" \ ".previous\n" \ diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h index 2185a6d619d3..749a97e61bea 100644 --- a/arch/s390/include/asm/crw.h +++ b/arch/s390/include/asm/crw.h @@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int); extern int crw_register_handler(int rsc, crw_handler_t handler); extern void crw_unregister_handler(int rsc); extern void crw_handle_channel_report(void); +void crw_wait_for_channel_report(void); #define NR_RSCS 16 diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index 80ef58c61970..538e1b36a726 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h @@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl) int rc = -ENOSYS; asm volatile( - " .insn s,0xb2160000,0(%2)\n" + " .insn s,0xb2160000,%1\n" "0: la %0,0\n" "1:\n" EX_TABLE(0b,1b) - : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); + : "+d" (rc) : "Q" (*ctrl)); return rc; } @@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib) int rc = -ENOSYS; asm volatile( - " .insn s,0xb2170000,0(%2)\n" + " .insn s,0xb2170000,%1\n" "0: la %0,0\n" "1:\n" EX_TABLE(0b,1b) - : "+d" (rc) : "m" (*aib), "a" (aib)); + : "+d" (rc) : "Q" (*aib)); return rc; } @@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func) int rc = -ENOSYS; asm volatile( - " .insn s,0xb2b30000,0(%2)\n" + " .insn s,0xb2b30000,%1\n" "0: la %0,0\n" "1:\n" EX_TABLE(0b,1b) - : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); + : "+d" (rc) : "Q" (*aib), "d" (reg0)); return rc; } diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index c2fb432f576a..15b3ac253898 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - /* store then or system mask. */ #define __raw_local_irq_stosm(__or) \ ({ \ @@ -36,40 +34,6 @@ asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ }) -#else /* __GNUC__ */ - -/* store then or system mask. */ -#define __raw_local_irq_stosm(__or) \ -({ \ - unsigned long __mask; \ - asm volatile( \ - " stosm 0(%1),%2" \ - : "=m" (__mask) \ - : "a" (&__mask), "i" (__or) : "memory"); \ - __mask; \ -}) - -/* store then and system mask. */ -#define __raw_local_irq_stnsm(__and) \ -({ \ - unsigned long __mask; \ - asm volatile( \ - " stnsm 0(%1),%2" \ - : "=m" (__mask) \ - : "a" (&__mask), "i" (__and) : "memory"); \ - __mask; \ -}) - -/* set system mask. */ -#define __raw_local_irq_ssm(__mask) \ -({ \ - asm volatile( \ - " ssm 0(%0)" \ - : : "a" (&__mask), "m" (__mask) : "memory"); \ -}) - -#endif /* __GNUC__ */ - /* interrupt control.. */ static inline unsigned long raw_local_irq_enable(void) { diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index c25dfac7dd76..05527c040b7a 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -1,141 +1,16 @@ /* - * include/asm-s390/lowcore.h - * - * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Hartmut Penner (hp@de.ibm.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com), - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Copyright IBM Corp. 1999,2010 + * Author(s): Hartmut Penner <hp@de.ibm.com>, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Denis Joseph Barrow, */ #ifndef _ASM_S390_LOWCORE_H #define _ASM_S390_LOWCORE_H -#define __LC_IPL_PARMBLOCK_PTR 0x0014 -#define __LC_EXT_PARAMS 0x0080 -#define __LC_CPU_ADDRESS 0x0084 -#define __LC_EXT_INT_CODE 0x0086 - -#define __LC_SVC_ILC 0x0088 -#define __LC_SVC_INT_CODE 0x008a -#define __LC_PGM_ILC 0x008c -#define __LC_PGM_INT_CODE 0x008e - -#define __LC_PER_ATMID 0x0096 -#define __LC_PER_ADDRESS 0x0098 -#define __LC_PER_ACCESS_ID 0x00a1 -#define __LC_AR_MODE_ID 0x00a3 - -#define __LC_SUBCHANNEL_ID 0x00b8 -#define __LC_SUBCHANNEL_NR 0x00ba -#define __LC_IO_INT_PARM 0x00bc -#define __LC_IO_INT_WORD 0x00c0 -#define __LC_STFL_FAC_LIST 0x00c8 -#define __LC_MCCK_CODE 0x00e8 - -#define __LC_DUMP_REIPL 0x0e00 - -#ifndef __s390x__ -#define __LC_EXT_OLD_PSW 0x0018 -#define __LC_SVC_OLD_PSW 0x0020 -#define __LC_PGM_OLD_PSW 0x0028 -#define __LC_MCK_OLD_PSW 0x0030 -#define __LC_IO_OLD_PSW 0x0038 -#define __LC_EXT_NEW_PSW 0x0058 -#define __LC_SVC_NEW_PSW 0x0060 -#define __LC_PGM_NEW_PSW 0x0068 -#define __LC_MCK_NEW_PSW 0x0070 -#define __LC_IO_NEW_PSW 0x0078 -#define __LC_SAVE_AREA 0x0200 -#define __LC_RETURN_PSW 0x0240 -#define __LC_RETURN_MCCK_PSW 0x0248 -#define __LC_SYNC_ENTER_TIMER 0x0250 -#define __LC_ASYNC_ENTER_TIMER 0x0258 -#define __LC_EXIT_TIMER 0x0260 -#define __LC_USER_TIMER 0x0268 -#define __LC_SYSTEM_TIMER 0x0270 -#define __LC_STEAL_TIMER 0x0278 -#define __LC_LAST_UPDATE_TIMER 0x0280 -#define __LC_LAST_UPDATE_CLOCK 0x0288 -#define __LC_CURRENT 0x0290 -#define __LC_THREAD_INFO 0x0294 -#define __LC_KERNEL_STACK 0x0298 -#define __LC_ASYNC_STACK 0x029c -#define __LC_PANIC_STACK 0x02a0 -#define __LC_KERNEL_ASCE 0x02a4 -#define __LC_USER_ASCE 0x02a8 -#define __LC_USER_EXEC_ASCE 0x02ac -#define __LC_CPUID 0x02b0 -#define __LC_INT_CLOCK 0x02c8 -#define __LC_MACHINE_FLAGS 0x02d8 -#define __LC_FTRACE_FUNC 0x02dc -#define __LC_IRB 0x0300 -#define __LC_PFAULT_INTPARM 0x0080 -#define __LC_CPU_TIMER_SAVE_AREA 0x00d8 -#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0 -#define __LC_PSW_SAVE_AREA 0x0100 -#define __LC_PREFIX_SAVE_AREA 0x0108 -#define __LC_AREGS_SAVE_AREA 0x0120 -#define __LC_FPREGS_SAVE_AREA 0x0160 -#define __LC_GPREGS_SAVE_AREA 0x0180 -#define __LC_CREGS_SAVE_AREA 0x01c0 -#else /* __s390x__ */ -#define __LC_LAST_BREAK 0x0110 -#define __LC_EXT_OLD_PSW 0x0130 -#define __LC_SVC_OLD_PSW 0x0140 -#define __LC_PGM_OLD_PSW 0x0150 -#define __LC_MCK_OLD_PSW 0x0160 -#define __LC_IO_OLD_PSW 0x0170 -#define __LC_RESTART_PSW 0x01a0 -#define __LC_EXT_NEW_PSW 0x01b0 -#define __LC_SVC_NEW_PSW 0x01c0 -#define __LC_PGM_NEW_PSW 0x01d0 -#define __LC_MCK_NEW_PSW 0x01e0 -#define __LC_IO_NEW_PSW 0x01f0 -#define __LC_SAVE_AREA 0x0200 -#define __LC_RETURN_PSW 0x0280 -#define __LC_RETURN_MCCK_PSW 0x0290 -#define __LC_SYNC_ENTER_TIMER 0x02a0 -#define __LC_ASYNC_ENTER_TIMER 0x02a8 -#define __LC_EXIT_TIMER 0x02b0 -#define __LC_USER_TIMER 0x02b8 -#define __LC_SYSTEM_TIMER 0x02c0 -#define __LC_STEAL_TIMER 0x02c8 -#define __LC_LAST_UPDATE_TIMER 0x02d0 -#define __LC_LAST_UPDATE_CLOCK 0x02d8 -#define __LC_CURRENT 0x02e0 -#define __LC_THREAD_INFO 0x02e8 -#define __LC_KERNEL_STACK 0x02f0 -#define __LC_ASYNC_STACK 0x02f8 -#define __LC_PANIC_STACK 0x0300 -#define __LC_KERNEL_ASCE 0x0308 -#define __LC_USER_ASCE 0x0310 -#define __LC_USER_EXEC_ASCE 0x0318 -#define __LC_CPUID 0x0320 -#define __LC_INT_CLOCK 0x0340 -#define __LC_VDSO_PER_CPU 0x0350 -#define __LC_MACHINE_FLAGS 0x0358 -#define __LC_FTRACE_FUNC 0x0360 -#define __LC_IRB 0x0380 -#define __LC_PASTE 0x03c0 -#define __LC_PFAULT_INTPARM 0x11b8 -#define __LC_FPREGS_SAVE_AREA 0x1200 -#define __LC_GPREGS_SAVE_AREA 0x1280 -#define __LC_PSW_SAVE_AREA 0x1300 -#define __LC_PREFIX_SAVE_AREA 0x1318 -#define __LC_FP_CREG_SAVE_AREA 0x131c -#define __LC_TODREG_SAVE_AREA 0x1324 -#define __LC_CPU_TIMER_SAVE_AREA 0x1328 -#define __LC_CLOCK_COMP_SAVE_AREA 0x1331 -#define __LC_AREGS_SAVE_AREA 0x1340 -#define __LC_CREGS_SAVE_AREA 0x1380 -#endif /* __s390x__ */ - -#ifndef __ASSEMBLY__ - -#include <asm/cpu.h> -#include <asm/ptrace.h> #include <linux/types.h> +#include <asm/ptrace.h> +#include <asm/cpu.h> void restart_int_handler(void); void ext_int_handler(void); @@ -144,7 +19,12 @@ void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); -struct save_area_s390 { +#ifdef CONFIG_32BIT + +#define LC_ORDER 0 +#define LC_PAGES 1 + +struct save_area { u32 ext_save; u64 timer; u64 clk_cmp; @@ -156,54 +36,13 @@ struct save_area_s390 { u64 fp_regs[4]; u32 gp_regs[16]; u32 ctrl_regs[16]; -} __attribute__((packed)); +} __packed; -struct save_area_s390x { - u64 fp_regs[16]; - u64 gp_regs[16]; - u8 psw[16]; - u8 pad1[8]; - u32 pref_reg; - u32 fp_ctrl_reg; - u8 pad2[4]; - u32 tod_reg; - u64 timer; - u64 clk_cmp; - u8 pad3[8]; - u32 acc_regs[16]; - u64 ctrl_regs[16]; -} __attribute__((packed)); - -union save_area { - struct save_area_s390 s390; - struct save_area_s390x s390x; -}; - -#define SAVE_AREA_BASE_S390 0xd4 -#define SAVE_AREA_BASE_S390X 0x1200 - -#ifndef __s390x__ -#define SAVE_AREA_SIZE sizeof(struct save_area_s390) -#define SAVE_AREA_BASE SAVE_AREA_BASE_S390 -#else -#define SAVE_AREA_SIZE sizeof(struct save_area_s390x) -#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X -#endif - -#ifndef __s390x__ -#define LC_ORDER 0 -#else -#define LC_ORDER 1 -#endif - -#define LC_PAGES (1UL << LC_ORDER) - -struct _lowcore -{ -#ifndef __s390x__ - /* 0x0000 - 0x01ff: defined by architecture */ +struct _lowcore { psw_t restart_psw; /* 0x0000 */ - __u32 ccw2[4]; /* 0x0008 */ + psw_t restart_old_psw; /* 0x0008 */ + __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */ + __u32 ipl_parmblock_ptr; /* 0x0014 */ psw_t external_old_psw; /* 0x0018 */ psw_t svc_old_psw; /* 0x0020 */ psw_t program_old_psw; /* 0x0028 */ @@ -229,7 +68,9 @@ struct _lowcore __u32 monitor_code; /* 0x009c */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ - __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ + __u8 op_access_id; /* 0x00a2 */ + __u8 ar_access_id; /* 0x00a3 */ + __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ __u16 subchannel_id; /* 0x00b8 */ __u16 subchannel_nr; /* 0x00ba */ __u32 io_int_parm; /* 0x00bc */ @@ -245,8 +86,9 @@ struct _lowcore __u32 external_damage_code; /* 0x00f4 */ __u32 failing_storage_address; /* 0x00f8 */ __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ - __u32 st_status_fixed_logout[4]; /* 0x0100 */ - __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ + psw_t psw_save_area; /* 0x0100 */ + __u32 prefixreg_save_area; /* 0x0108 */ + __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */ /* CPU register save area: defined by architecture */ __u32 access_regs_save_area[16]; /* 0x0120 */ @@ -310,10 +152,32 @@ struct _lowcore /* Align to the top 1k of prefix area */ __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ -#else /* !__s390x__ */ - /* 0x0000 - 0x01ff: defined by architecture */ - __u32 ccw1[2]; /* 0x0000 */ - __u32 ccw2[4]; /* 0x0008 */ +} __packed; + +#else /* CONFIG_32BIT */ + +#define LC_ORDER 1 +#define LC_PAGES 2 + +struct save_area { + u64 fp_regs[16]; + u64 gp_regs[16]; + u8 psw[16]; + u8 pad1[8]; + u32 pref_reg; + u32 fp_ctrl_reg; + u8 pad2[4]; + u32 tod_reg; + u64 timer; + u64 clk_cmp; + u8 pad3[8]; + u32 acc_regs[16]; + u64 ctrl_regs[16]; +} __packed; + +struct _lowcore { + __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ + __u32 ipl_parmblock_ptr; /* 0x0014 */ __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ __u32 ext_params; /* 0x0080 */ __u16 cpu_addr; /* 0x0084 */ @@ -344,7 +208,9 @@ struct _lowcore __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ __u32 external_damage_code; /* 0x00f4 */ addr_t failing_storage_address; /* 0x00f8 */ - __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ + __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */ + __u64 breaking_event_addr; /* 0x0110 */ + __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */ psw_t restart_old_psw; /* 0x0120 */ psw_t external_old_psw; /* 0x0130 */ psw_t svc_old_psw; /* 0x0140 */ @@ -425,7 +291,7 @@ struct _lowcore /* CPU register save area: defined by architecture */ __u64 floating_pt_save_area[16]; /* 0x1200 */ __u64 gpregs_save_area[16]; /* 0x1280 */ - __u32 st_status_fixed_logout[4]; /* 0x1300 */ + psw_t psw_save_area; /* 0x1300 */ __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ __u32 prefixreg_save_area; /* 0x1318 */ __u32 fpt_creg_save_area; /* 0x131c */ @@ -439,10 +305,12 @@ struct _lowcore /* align to the top of the prefix area */ __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ -#endif /* !__s390x__ */ -} __attribute__((packed)); /* End structure*/ +} __packed; + +#endif /* CONFIG_32BIT */ #define S390_lowcore (*((struct _lowcore *) 0)) + extern struct _lowcore *lowcore_ptr[]; static inline void set_prefix(__u32 address) @@ -458,6 +326,4 @@ static inline __u32 store_prefix(void) return address; } -#endif - -#endif +#endif /* _ASM_S390_LOWCORE_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 5e9daf5d7f22..af650fb47206 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -107,9 +107,6 @@ typedef pte_t *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* default storage key used for all pages */ -extern unsigned int default_storage_key; - static inline void page_set_storage_key(unsigned long addr, unsigned int skey) { diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index b42715458312..73e259834e10 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -28,7 +28,7 @@ static inline void get_cpu_id(struct cpuid *ptr) { - asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); + asm volatile("stidp %0" : "=Q" (*ptr)); } extern void s390_adjust_jiffies(void); @@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key) static inline void __load_psw(psw_t psw) { #ifndef __s390x__ - asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); + asm volatile("lpsw %0" : : "Q" (psw) : "cc"); #else - asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); + asm volatile("lpswe %0" : : "Q" (psw) : "cc"); #endif } @@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask) asm volatile( " basr %0,0\n" "0: ahi %0,1f-0b\n" - " st %0,4(%1)\n" - " lpsw 0(%1)\n" + " st %0,%O1+4(%R1)\n" + " lpsw %1\n" "1:" - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); + : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); #else /* __s390x__ */ asm volatile( " larl %0,1f\n" - " stg %0,8(%1)\n" - " lpswe 0(%1)\n" + " stg %0,%O1+8(%R1)\n" + " lpswe %1\n" "1:" - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); + : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); #endif /* __s390x__ */ } diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 79d849f014f0..c666bfe5e984 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -13,7 +13,8 @@ #include <asm/cio.h> #include <asm/ccwdev.h> -#define QDIO_MAX_QUEUES_PER_IRQ 32 +/* only use 4 queues to save some cachelines */ +#define QDIO_MAX_QUEUES_PER_IRQ 4 #define QDIO_MAX_BUFFERS_PER_Q 128 #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) #define QDIO_MAX_ELEMENTS_PER_BUFFER 16 diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 9d2a17971805..423fdda2322d 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " ahi %1,%5\n" - " cs %0,%1,0(%3)\n" + " ahi %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " aghi %1,%5\n" - " csg %0,%1,0(%3)\n" + " aghi %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) + : "cc", "memory"); if (old < 0) rwsem_down_read_failed(sem); } @@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: ltr %1,%0\n" " jm 1f\n" - " ahi %1,%5\n" - " cs %0,%1,0(%3)\n" + " ahi %1,%4\n" + " cs %0,%1,%2\n" " jl 0b\n" "1:" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: ltgr %1,%0\n" " jm 1f\n" - " aghi %1,%5\n" - " csg %0,%1,0(%3)\n" + " aghi %1,%4\n" + " csg %0,%1,%2\n" " jl 0b\n" "1:" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) + : "cc", "memory"); return old >= 0 ? 1 : 0; } @@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) tmp = RWSEM_ACTIVE_WRITE_BIAS; asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " a %1,%5\n" - " cs %0,%1,0(%3)\n" + " a %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " ag %1,%5\n" - " csg %0,%1,0(%3)\n" + " ag %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), "m" (tmp) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "m" (tmp) : "cc", "memory"); if (old != 0) rwsem_down_write_failed(sem); @@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%2)\n" + " l %0,%1\n" "0: ltr %0,%0\n" " jnz 1f\n" - " cs %0,%4,0(%2)\n" + " cs %0,%3,%1\n" " jl 0b\n" #else /* __s390x__ */ - " lg %0,0(%2)\n" + " lg %0,%1\n" "0: ltgr %0,%0\n" " jnz 1f\n" - " csg %0,%4,0(%2)\n" + " csg %0,%3,%1\n" " jl 0b\n" #endif /* __s390x__ */ "1:" - : "=&d" (old), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), - "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); + : "=&d" (old), "=Q" (sem->count) + : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) + : "cc", "memory"); return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; } @@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " ahi %1,%5\n" - " cs %0,%1,0(%3)\n" + " ahi %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " aghi %1,%5\n" - " csg %0,%1,0(%3)\n" + " aghi %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), - "i" (-RWSEM_ACTIVE_READ_BIAS) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); if (new < 0) if ((new & RWSEM_ACTIVE_MASK) == 0) @@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem) tmp = -RWSEM_ACTIVE_WRITE_BIAS; asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " a %1,%5\n" - " cs %0,%1,0(%3)\n" + " a %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " ag %1,%5\n" - " csg %0,%1,0(%3)\n" + " ag %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), "m" (tmp) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "m" (tmp) : "cc", "memory"); if (new < 0) if ((new & RWSEM_ACTIVE_MASK) == 0) @@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem) tmp = -RWSEM_WAITING_BIAS; asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " a %1,%5\n" - " cs %0,%1,0(%3)\n" + " a %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " ag %1,%5\n" - " csg %0,%1,0(%3)\n" + " ag %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), "m" (tmp) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "m" (tmp) : "cc", "memory"); if (new > 1) rwsem_downgrade_wake(sem); @@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " ar %1,%5\n" - " cs %0,%1,0(%3)\n" + " ar %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " agr %1,%5\n" - " csg %0,%1,0(%3)\n" + " agr %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), "d" (delta) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "d" (delta) : "cc", "memory"); } @@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) asm volatile( #ifndef __s390x__ - " l %0,0(%3)\n" + " l %0,%2\n" "0: lr %1,%0\n" - " ar %1,%5\n" - " cs %0,%1,0(%3)\n" + " ar %1,%4\n" + " cs %0,%1,%2\n" " jl 0b" #else /* __s390x__ */ - " lg %0,0(%3)\n" + " lg %0,%2\n" "0: lgr %1,%0\n" - " agr %1,%5\n" - " csg %0,%1,0(%3)\n" + " agr %1,%4\n" + " csg %0,%1,%2\n" " jl 0b" #endif /* __s390x__ */ - : "=&d" (old), "=&d" (new), "=m" (sem->count) - : "a" (&sem->count), "m" (sem->count), "d" (delta) + : "=&d" (old), "=&d" (new), "=Q" (sem->count) + : "Q" (sem->count), "d" (delta) : "cc", "memory"); return new; } diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 52a779c337e8..9ab6bd3a65d1 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -14,14 +14,14 @@ #ifdef __KERNEL__ -#include <asm/lowcore.h> -#include <asm/types.h> - #define PARMAREA 0x10400 #define MEMORY_CHUNKS 256 #ifndef __ASSEMBLY__ +#include <asm/lowcore.h> +#include <asm/types.h> + #ifndef __s390x__ #define IPL_DEVICE (*(unsigned long *) (0x10404)) #define INITRD_START (*(unsigned long *) (0x1040C)) @@ -71,9 +71,12 @@ extern unsigned int user_mode; #define MACHINE_FLAG_KVM (1UL << 9) #define MACHINE_FLAG_HPAGE (1UL << 10) #define MACHINE_FLAG_PFMF (1UL << 11) +#define MACHINE_FLAG_LPAR (1UL << 12) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) +#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) + #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) #ifndef __s390x__ diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index f72d611f7e13..e3bffd4e2d66 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -1,24 +1,19 @@ /* - * include/asm-s390/sigp.h + * Routines and structures for signalling other processors. * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) - * - * sigp.h by D.J. Barrow (c) IBM 1999 - * contains routines / structures for signalling other S/390 processors in an - * SMP configuration. + * Copyright IBM Corp. 1999,2010 + * Author(s): Denis Joseph Barrow, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, */ -#ifndef __SIGP__ -#define __SIGP__ +#ifndef __ASM_SIGP_H +#define __ASM_SIGP_H #include <asm/system.h> -/* get real cpu address from logical cpu number */ -extern int __cpu_logical_map[]; +/* Get real cpu address from logical cpu number. */ +extern unsigned short __cpu_logical_map[]; static inline int cpu_logical_map(int cpu) { @@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu) #endif } -typedef enum -{ - sigp_unassigned=0x0, - sigp_sense, - sigp_external_call, - sigp_emergency_signal, - sigp_start, - sigp_stop, - sigp_restart, - sigp_unassigned1, - sigp_unassigned2, - sigp_stop_and_store_status, - sigp_unassigned3, - sigp_initial_cpu_reset, - sigp_cpu_reset, - sigp_set_prefix, - sigp_store_status_at_address, - sigp_store_extended_status_at_address -} sigp_order_code; - -typedef __u32 sigp_status_word; - -typedef enum -{ - sigp_order_code_accepted=0, - sigp_status_stored, - sigp_busy, - sigp_not_operational -} sigp_ccode; - +enum { + sigp_sense = 1, + sigp_external_call = 2, + sigp_emergency_signal = 3, + sigp_start = 4, + sigp_stop = 5, + sigp_restart = 6, + sigp_stop_and_store_status = 9, + sigp_initial_cpu_reset = 11, + sigp_cpu_reset = 12, + sigp_set_prefix = 13, + sigp_store_status_at_address = 14, + sigp_store_extended_status_at_address = 15, + sigp_set_architecture = 18, + sigp_conditional_emergency_signal = 19, + sigp_sense_running = 21, +}; + +enum { + sigp_order_code_accepted = 0, + sigp_status_stored = 1, + sigp_busy = 2, + sigp_not_operational = 3, +}; /* - * Definitions for the external call + * Definitions for external call. */ - -/* 'Bit' signals, asynchronous */ -typedef enum -{ - ec_schedule=0, +enum { + ec_schedule = 0, ec_call_function, ec_call_function_single, - ec_bit_last -} ec_bit_sig; +}; /* - * Signal processor + * Signal processor. */ -static inline sigp_ccode -signal_processor(__u16 cpu_addr, sigp_order_code order_code) +static inline int raw_sigp(u16 cpu, int order) { register unsigned long reg1 asm ("1") = 0; - sigp_ccode ccode; + int ccode; asm volatile( " sigp %1,%2,0(%3)\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode) - : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), - "a" (order_code) : "cc" , "memory"); + : "d" (reg1), "d" (cpu), + "a" (order) : "cc" , "memory"); return ccode; } /* - * Signal processor with parameter + * Signal processor with parameter. */ -static inline sigp_ccode -signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) +static inline int raw_sigp_p(u32 parameter, u16 cpu, int order) { register unsigned int reg1 asm ("1") = parameter; - sigp_ccode ccode; + int ccode; asm volatile( " sigp %1,%2,0(%3)\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode) - : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), - "a" (order_code) : "cc" , "memory"); + : "d" (reg1), "d" (cpu), + "a" (order) : "cc" , "memory"); return ccode; } /* - * Signal processor with parameter and return status + * Signal processor with parameter and return status. */ -static inline sigp_ccode -signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, - sigp_order_code order_code) +static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order) { - register unsigned int reg1 asm ("1") = parameter; - sigp_ccode ccode; + register unsigned int reg1 asm ("1") = parm; + int ccode; asm volatile( " sigp %1,%2,0(%3)\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg1) - : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) + : "d" (cpu), "a" (order) : "cc" , "memory"); - *statusptr = reg1; + *status = reg1; return ccode; } -#endif /* __SIGP__ */ +static inline int sigp(int cpu, int order) +{ + return raw_sigp(cpu_logical_map(cpu), order); +} + +static inline int sigp_p(u32 parameter, int cpu, int order) +{ + return raw_sigp_p(parameter, cpu_logical_map(cpu), order); +} + +static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order) +{ + return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order); +} + +#endif /* __ASM_SIGP_H */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 2ab1141eeb50..edc03cb9cd79 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -29,7 +29,43 @@ extern int smp_cpu_polarization[]; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; + +extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); +extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, + int from, int to); +extern void smp_restart_cpu(void); + +/* + * returns 1 if (virtual) cpu is scheduled + * returns 0 otherwise + */ +static inline int smp_vcpu_scheduled(int cpu) +{ + u32 status; + + switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) { + case sigp_status_stored: + /* Check for running status */ + if (status & 0x400) + return 0; + break; + case sigp_not_operational: + return 0; + default: + break; + } + return 1; +} + +#else /* CONFIG_SMP */ + +static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) +{ + func(data); +} + +#define smp_vcpu_scheduled (1) #endif /* CONFIG_SMP */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a587907d77f3..56612fc8186e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -13,8 +13,6 @@ #include <linux/smp.h> -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) - static inline int _raw_compare_and_swap(volatile unsigned int *lock, unsigned int old, unsigned int new) @@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock, return old; } -#else /* __GNUC__ */ - -static inline int -_raw_compare_and_swap(volatile unsigned int *lock, - unsigned int old, unsigned int new) -{ - asm volatile( - " cs %0,%3,0(%4)" - : "=d" (old), "=m" (*lock) - : "0" (old), "d" (new), "a" (lock), "m" (*lock) - : "cc", "memory" ); - return old; -} - -#endif /* __GNUC__ */ - /* * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h index eb18dc1f327b..6bdee21c077e 100644 --- a/arch/s390/include/asm/swab.h +++ b/arch/s390/include/asm/swab.h @@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x) asm volatile( #ifndef __s390x__ - " icm %0,8,3(%1)\n" - " icm %0,4,2(%1)\n" - " icm %0,2,1(%1)\n" - " ic %0,0(%1)" - : "=&d" (result) : "a" (x), "m" (*x) : "cc"); + " icm %0,8,%O1+3(%R1)\n" + " icm %0,4,%O1+2(%R1)\n" + " icm %0,2,%O1+1(%R1)\n" + " ic %0,%1" + : "=&d" (result) : "Q" (*x) : "cc"); #else /* __s390x__ */ " lrv %0,%1" : "=d" (result) : "m" (*x)); @@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x) asm volatile( #ifndef __s390x__ - " icm %0,2,1(%1)\n" - " ic %0,0(%1)\n" - : "=&d" (result) : "a" (x), "m" (*x) : "cc"); + " icm %0,2,%O+1(%R1)\n" + " ic %0,%1\n" + : "=&d" (result) : "Q" (*x) : "cc"); #else /* __s390x__ */ " lrvh %0,%1" : "=d" (result) : "m" (*x)); diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 9d70057d828c..22bdb2a0ee5f 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -87,7 +87,8 @@ struct sysinfo_2_2_2 { struct sysinfo_3_2_2 { char reserved_0[31]; - unsigned char count; + unsigned char :4; + unsigned char count:4; struct { char reserved_0[4]; unsigned short cpus_total; diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 379661d2f81a..67ee6c3c6bb3 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *); static inline void save_fp_regs(s390_fp_regs *fpregs) { asm volatile( - " std 0,8(%1)\n" - " std 2,24(%1)\n" - " std 4,40(%1)\n" - " std 6,56(%1)" - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); + " std 0,%O0+8(%R0)\n" + " std 2,%O0+24(%R0)\n" + " std 4,%O0+40(%R0)\n" + " std 6,%O0+56(%R0)" + : "=Q" (*fpregs) : "Q" (*fpregs)); if (!MACHINE_HAS_IEEE) return; asm volatile( - " stfpc 0(%1)\n" - " std 1,16(%1)\n" - " std 3,32(%1)\n" - " std 5,48(%1)\n" - " std 7,64(%1)\n" - " std 8,72(%1)\n" - " std 9,80(%1)\n" - " std 10,88(%1)\n" - " std 11,96(%1)\n" - " std 12,104(%1)\n" - " std 13,112(%1)\n" - " std 14,120(%1)\n" - " std 15,128(%1)\n" - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); + " stfpc %0\n" + " std 1,%O0+16(%R0)\n" + " std 3,%O0+32(%R0)\n" + " std 5,%O0+48(%R0)\n" + " std 7,%O0+64(%R0)\n" + " std 8,%O0+72(%R0)\n" + " std 9,%O0+80(%R0)\n" + " std 10,%O0+88(%R0)\n" + " std 11,%O0+96(%R0)\n" + " std 12,%O0+104(%R0)\n" + " std 13,%O0+112(%R0)\n" + " std 14,%O0+120(%R0)\n" + " std 15,%O0+128(%R0)\n" + : "=Q" (*fpregs) : "Q" (*fpregs)); } static inline void restore_fp_regs(s390_fp_regs *fpregs) { asm volatile( - " ld 0,8(%0)\n" - " ld 2,24(%0)\n" - " ld 4,40(%0)\n" - " ld 6,56(%0)" - : : "a" (fpregs), "m" (*fpregs)); + " ld 0,%O0+8(%R0)\n" + " ld 2,%O0+24(%R0)\n" + " ld 4,%O0+40(%R0)\n" + " ld 6,%O0+56(%R0)" + : : "Q" (*fpregs)); if (!MACHINE_HAS_IEEE) return; asm volatile( - " lfpc 0(%0)\n" - " ld 1,16(%0)\n" - " ld 3,32(%0)\n" - " ld 5,48(%0)\n" - " ld 7,64(%0)\n" - " ld 8,72(%0)\n" - " ld 9,80(%0)\n" - " ld 10,88(%0)\n" - " ld 11,96(%0)\n" - " ld 12,104(%0)\n" - " ld 13,112(%0)\n" - " ld 14,120(%0)\n" - " ld 15,128(%0)\n" - : : "a" (fpregs), "m" (*fpregs)); + " lfpc %0\n" + " ld 1,%O0+16(%R0)\n" + " ld 3,%O0+32(%R0)\n" + " ld 5,%O0+48(%R0)\n" + " ld 7,%O0+64(%R0)\n" + " ld 8,%O0+72(%R0)\n" + " ld 9,%O0+80(%R0)\n" + " ld 10,%O0+88(%R0)\n" + " ld 11,%O0+96(%R0)\n" + " ld 12,%O0+104(%R0)\n" + " ld 13,%O0+112(%R0)\n" + " ld 14,%O0+120(%R0)\n" + " ld 15,%O0+128(%R0)\n" + : : "Q" (*fpregs)); } static inline void save_access_regs(unsigned int *acrs) { - asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); + asm volatile("stam 0,15,%0" : "=Q" (*acrs)); } static inline void restore_access_regs(unsigned int *acrs) { - asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); + asm volatile("lam 0,15,%0" : : "Q" (*acrs)); } #define switch_to(prev,next,last) do { \ @@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) shift = (3 ^ (addr & 3)) << 3; addr ^= addr & 3; asm volatile( - " l %0,0(%4)\n" + " l %0,%4\n" "0: lr 0,%0\n" " nr 0,%3\n" " or 0,%2\n" - " cs %0,0,0(%4)\n" + " cs %0,0,%4\n" " jl 0b\n" - : "=&d" (old), "=m" (*(int *) addr) - : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), - "m" (*(int *) addr) : "memory", "cc", "0"); + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(255 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); return old >> shift; case 2: addr = (unsigned long) ptr; shift = (2 ^ (addr & 2)) << 3; addr ^= addr & 2; asm volatile( - " l %0,0(%4)\n" + " l %0,%4\n" "0: lr 0,%0\n" " nr 0,%3\n" " or 0,%2\n" - " cs %0,0,0(%4)\n" + " cs %0,0,%4\n" " jl 0b\n" - : "=&d" (old), "=m" (*(int *) addr) - : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), - "m" (*(int *) addr) : "memory", "cc", "0"); + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(65535 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); return old >> shift; case 4: asm volatile( - " l %0,0(%3)\n" - "0: cs %0,%2,0(%3)\n" + " l %0,%3\n" + "0: cs %0,%2,%3\n" " jl 0b\n" - : "=&d" (old), "=m" (*(int *) ptr) - : "d" (x), "a" (ptr), "m" (*(int *) ptr) + : "=&d" (old), "=Q" (*(int *) ptr) + : "d" (x), "Q" (*(int *) ptr) : "memory", "cc"); return old; #ifdef __s390x__ case 8: asm volatile( - " lg %0,0(%3)\n" - "0: csg %0,%2,0(%3)\n" + " lg %0,%3\n" + "0: csg %0,%2,%3\n" " jl 0b\n" : "=&d" (old), "=m" (*(long *) ptr) - : "d" (x), "a" (ptr), "m" (*(long *) ptr) + : "d" (x), "Q" (*(long *) ptr) : "memory", "cc"); return old; #endif /* __s390x__ */ @@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) shift = (3 ^ (addr & 3)) << 3; addr ^= addr & 3; asm volatile( - " l %0,0(%4)\n" + " l %0,%2\n" "0: nr %0,%5\n" " lr %1,%0\n" " or %0,%2\n" " or %1,%3\n" - " cs %0,%1,0(%4)\n" + " cs %0,%1,%2\n" " jnl 1f\n" " xr %1,%0\n" " nr %1,%5\n" " jnz 0b\n" "1:" - : "=&d" (prev), "=&d" (tmp) - : "d" (old << shift), "d" (new << shift), "a" (ptr), - "d" (~(255 << shift)) + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(255 << shift)), "Q" (*(int *) ptr) : "memory", "cc"); return prev >> shift; case 2: @@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) shift = (2 ^ (addr & 2)) << 3; addr ^= addr & 2; asm volatile( - " l %0,0(%4)\n" + " l %0,%2\n" "0: nr %0,%5\n" " lr %1,%0\n" " or %0,%2\n" " or %1,%3\n" - " cs %0,%1,0(%4)\n" + " cs %0,%1,%2\n" " jnl 1f\n" " xr %1,%0\n" " nr %1,%5\n" " jnz 0b\n" "1:" - : "=&d" (prev), "=&d" (tmp) - : "d" (old << shift), "d" (new << shift), "a" (ptr), - "d" (~(65535 << shift)) + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(65535 << shift)), "Q" (*(int *) ptr) : "memory", "cc"); return prev >> shift; case 4: asm volatile( - " cs %0,%2,0(%3)\n" - : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) + " cs %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(int *) ptr) + : "0" (old), "d" (new), "Q" (*(int *) ptr) : "memory", "cc"); return prev; #ifdef __s390x__ case 8: asm volatile( - " csg %0,%2,0(%3)\n" - : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) + " csg %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(long *) ptr) + : "0" (old), "d" (new), "Q" (*(long *) ptr) : "memory", "cc"); return prev; #endif /* __s390x__ */ @@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ asm volatile( \ - " lctlg %1,%2,0(%0)\n" \ - : : "a" (&array), "i" (low), "i" (high), \ - "m" (*(addrtype *)(&array))); \ + " lctlg %1,%2,%0\n" \ + : : "Q" (*(addrtype *)(&array)), \ + "i" (low), "i" (high)); \ }) #define __ctl_store(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ asm volatile( \ - " stctg %2,%3,0(%1)\n" \ - : "=m" (*(addrtype *)(&array)) \ - : "a" (&array), "i" (low), "i" (high)); \ + " stctg %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ }) #else /* __s390x__ */ @@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ asm volatile( \ - " lctl %1,%2,0(%0)\n" \ - : : "a" (&array), "i" (low), "i" (high), \ - "m" (*(addrtype *)(&array))); \ + " lctl %1,%2,%0\n" \ + : : "Q" (*(addrtype *)(&array)), \ + "i" (low), "i" (high)); \ }) #define __ctl_store(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ asm volatile( \ - " stctl %2,%3,0(%1)\n" \ - : "=m" (*(addrtype *)(&array)) \ - : "a" (&array), "i" (low), "i" (high)); \ + " stctl %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ }) #endif /* __s390x__ */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 66069e736842..34f0873d6525 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -73,7 +73,7 @@ struct thread_info { /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { - return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); + return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); } #define THREAD_SIZE_ORDER THREAD_ORDER diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 68d9fea34b4b..f174bdaa6b59 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -20,10 +20,10 @@ static inline int set_clock(__u64 time) int cc; asm volatile( - " sck 0(%2)\n" + " sck %1\n" " ipm %0\n" " srl %0,28\n" - : "=d" (cc) : "m" (time), "a" (&time) : "cc"); + : "=d" (cc) : "Q" (time) : "cc"); return cc; } @@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time) int cc; asm volatile( - " stck 0(%2)\n" + " stck %1\n" " ipm %0\n" " srl %0,28\n" - : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); + : "=d" (cc), "=Q" (*time) : : "cc"); return cc; } static inline void set_clock_comparator(__u64 time) { - asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); + asm volatile("sckc %0" : : "Q" (time)); } static inline void store_clock_comparator(__u64 *time) { - asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); + asm volatile("stckc %0" : "=Q" (*time)); } #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ @@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void) { unsigned long long clk; -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) asm volatile("stck %0" : "=Q" (clk) : : "cc"); -#else /* __GNUC__ */ - asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); -#endif /* __GNUC__ */ return clk; } @@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void) { unsigned char clk[16]; -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) asm volatile("stcke %0" : "=Q" (clk) : : "cc"); -#else /* __GNUC__ */ - asm volatile("stcke 0(%1)" : "=m" (clk) - : "a" (clk) : "cc"); -#endif /* __GNUC__ */ - return *((unsigned long long *)&clk[1]); } diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cbf0a8745bf4..d6b1ed0ec52b 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) return uaccess.copy_from_user(n, from, to); } +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +__compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + /** * copy_from_user: - Copy a block of data from user space. * @to: Destination address, in kernel space. @@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned int sz = __compiletime_object_size(to); + might_fault(); + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return n; + } if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 7bdd7c8ebc91..4a76d9480cce 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -7,7 +7,7 @@ #define VDSO32_LBASE 0 #define VDSO64_LBASE 0 -#define VDSO_VERSION_STRING LINUX_2.6.26 +#define VDSO_VERSION_STRING LINUX_2.6.29 #ifndef __ASSEMBLY__ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 683f6381cc59..64230bc392fa 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) extra-y += head.o init_task.o vmlinux.lds +extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o topology.o +obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ + switch_cpu.o) obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 63e46433e81d..08db736dded0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -4,18 +4,27 @@ * and format the required data. */ -#include <linux/sched.h> +#define ASM_OFFSETS_C + #include <linux/kbuild.h> +#include <linux/sched.h> #include <asm/vdso.h> #include <asm/sigp.h> +/* + * Make sure that the compiler is new enough. We want a compiler that + * is known to work with the "Q" assembler constraint. + */ +#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) +#error Your compiler is too old; please use version 3.3.3 or newer +#endif + int main(void) { DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); - DEFINE(__THREAD_mm_segment, - offsetof(struct task_struct, thread.mm_segment)); + DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); BLANK(); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); @@ -52,18 +61,94 @@ int main(void) DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); - DEFINE(__VDSO_ECTG_BASE, - offsetof(struct vdso_per_cpu_data, ectg_timer_base)); - DEFINE(__VDSO_ECTG_USER, - offsetof(struct vdso_per_cpu_data, ectg_user_time)); + DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); + DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); /* constants used by the vdso */ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); + BLANK(); /* constants for SIGP */ DEFINE(__SIGP_STOP, sigp_stop); DEFINE(__SIGP_RESTART, sigp_restart); DEFINE(__SIGP_SENSE, sigp_sense); DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); + BLANK(); + /* lowcore offsets */ + DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); + DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr)); + DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); + DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); + DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); + DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); + DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); + DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); + DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); + DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); + DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); + DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); + DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); + DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); + DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); + DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); + DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); + DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); + BLANK(); + DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); + DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); + DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); + DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); + DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); + DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); + DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); + DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); + DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); + DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); + DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); + DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); + DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area)); + DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); + DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); + DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); + DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); + DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); + DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); + DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); + DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer)); + DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); + DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); + DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); + DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); + DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); + DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); + DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); + DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); + DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); + DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); + DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id)); + DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); + DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); + DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); + DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); + DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); + DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); + DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); + DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area)); + DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area)); + DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); + DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); + DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); +#ifdef CONFIG_32BIT + DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params)); + DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); +#else /* CONFIG_32BIT */ + DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2)); + DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); + DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); + DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); + DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); + DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); + DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); +#endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index dc7e5259770f..15e46ca94335 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -6,8 +6,8 @@ * Michael Holzheu <holzheu@de.ibm.com> */ +#include <asm/asm-offsets.h> #include <asm/ptrace.h> -#include <asm/lowcore.h> #ifdef CONFIG_64BIT diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index db943a7ec513..b39b27d68b45 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -86,10 +86,17 @@ enum { U4_12, /* 4 bit unsigned value starting at 12 */ U4_16, /* 4 bit unsigned value starting at 16 */ U4_20, /* 4 bit unsigned value starting at 20 */ + U4_32, /* 4 bit unsigned value starting at 32 */ U8_8, /* 8 bit unsigned value starting at 8 */ U8_16, /* 8 bit unsigned value starting at 16 */ + U8_24, /* 8 bit unsigned value starting at 24 */ + U8_32, /* 8 bit unsigned value starting at 32 */ + I8_8, /* 8 bit signed value starting at 8 */ + I8_32, /* 8 bit signed value starting at 32 */ I16_16, /* 16 bit signed value starting at 16 */ + I16_32, /* 32 bit signed value starting at 16 */ U16_16, /* 16 bit unsigned value starting at 16 */ + U16_32, /* 32 bit unsigned value starting at 16 */ J16_16, /* PC relative jump offset at 16 */ J32_16, /* PC relative long offset at 16 */ I32_16, /* 32 bit signed value starting at 16 */ @@ -104,21 +111,37 @@ enum { */ enum { INSTR_INVALID, - INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, - INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, + INSTR_E, + INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, + INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, + INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, + INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, + INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, - INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, - INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, - INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, + INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, + INSTR_RRE_RR, INSTR_RRE_RR_OPT, + INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, + INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, + INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, + INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, - INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, - INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, - INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, - INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, - INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, - INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, - INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, - INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, + INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, + INSTR_RSI_RRP, + INSTR_RSL_R0RD, + INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, + INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, + INSTR_RS_RURD, + INSTR_RXE_FRRD, INSTR_RXE_RRRD, + INSTR_RXF_FRRDF, + INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, + INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, + INSTR_SIL_RDI, INSTR_SIL_RDU, + INSTR_SIY_IRD, INSTR_SIY_URD, + INSTR_SI_URD, + INSTR_SSE_RDRD, + INSTR_SSF_RRDRD, + INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, + INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, INSTR_S_00, INSTR_S_RD, }; @@ -129,7 +152,7 @@ struct operand { }; struct insn { - const char name[5]; + const char name[6]; unsigned char opfrag; unsigned char format; }; @@ -170,11 +193,16 @@ static const struct operand operands[] = [U4_12] = { 4, 12, 0 }, [U4_16] = { 4, 16, 0 }, [U4_20] = { 4, 20, 0 }, + [U4_32] = { 4, 32, 0 }, [U8_8] = { 8, 8, 0 }, [U8_16] = { 8, 16, 0 }, + [U8_24] = { 8, 24, 0 }, + [U8_32] = { 8, 32, 0 }, [I16_16] = { 16, 16, OPERAND_SIGNED }, [U16_16] = { 16, 16, 0 }, + [U16_32] = { 16, 32, 0 }, [J16_16] = { 16, 16, OPERAND_PCREL }, + [I16_32] = { 16, 32, OPERAND_SIGNED }, [J32_16] = { 32, 16, OPERAND_PCREL }, [I32_16] = { 32, 16, OPERAND_SIGNED }, [U32_16] = { 32, 16, 0 }, @@ -183,82 +211,93 @@ static const struct operand operands[] = }; static const unsigned char formats[][7] = { - [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ - [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ - [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ - [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ - [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ - [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ - [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ - [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ - [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ - [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ - [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ - [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ - [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ - [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ - [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ - [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ - [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ - [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ - [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ - [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ - [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ - [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ - [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ - [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ - [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ - [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ - [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ - [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ - [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ - [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ - [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ - [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ - [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ - [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ - [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ - [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ - [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ - [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ - [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ + [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, + [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, + [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, + [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, + [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, + [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, + [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, + [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, + [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, + [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, + [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 }, + [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 }, + [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 }, + [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, + [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, + [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, + [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, + [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, + [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, + [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, + [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, + [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, + [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, + [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 }, + [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, + [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, + [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, + [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, + [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, + [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 }, + [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 }, + [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, + [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, + [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, + [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, + [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, + [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, + [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, + [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, + [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, + [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, + [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, + [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, + [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, + [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, + [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, + [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, + [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, + [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, + [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, + [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, + [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, + [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, + [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, + [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, + [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, + [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, - /* e.g. icmh */ - [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ - [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ - [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ - [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ - [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ - [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ - [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ - [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ - [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ + [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, + [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, + [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, + [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, + [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, + [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, + [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, - /* e.g. madb */ - [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ - [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ - [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ - [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ - [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ - [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ - [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ - [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ + [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, + [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, + [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 }, + [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, + [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, + [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, + [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 }, + [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 }, + [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, + [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, + [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, + [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, + [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, - /* e.g. mvc */ [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, - /* e.g. srp */ [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, - /* e.g. pack */ - [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, - /* e.g. mvck */ [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, - /* e.g. plo */ [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, - /* e.g. lmd */ - [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ - [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ - [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, - /* e.g. mvcos */ + [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, + [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, + [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, }; static struct insn opcode[] = { @@ -454,6 +493,8 @@ static struct insn opcode[] = { static struct insn opcode_01[] = { #ifdef CONFIG_64BIT { "sam64", 0x0e, INSTR_E }, + { "pfpo", 0x0a, INSTR_E }, + { "ptff", 0x04, INSTR_E }, #endif { "pr", 0x01, INSTR_E }, { "upt", 0x02, INSTR_E }, @@ -519,6 +560,8 @@ static struct insn opcode_b2[] = { { "cutfu", 0xa7, INSTR_RRF_M0RR }, { "stfle", 0xb0, INSTR_S_RD }, { "lpswe", 0xb2, INSTR_S_RD }, + { "srnmt", 0xb9, INSTR_S_RD }, + { "lfas", 0xbd, INSTR_S_RD }, #endif { "stidp", 0x02, INSTR_S_RD }, { "sck", 0x04, INSTR_S_RD }, @@ -589,7 +632,6 @@ static struct insn opcode_b2[] = { { "clst", 0x5d, INSTR_RRE_RR }, { "srst", 0x5e, INSTR_RRE_RR }, { "cmpsc", 0x63, INSTR_RRE_RR }, - { "cmpsc", 0x63, INSTR_RRE_RR }, { "siga", 0x74, INSTR_S_RD }, { "xsch", 0x76, INSTR_S_00 }, { "rp", 0x77, INSTR_S_RD }, @@ -630,6 +672,57 @@ static struct insn opcode_b3[] = { { "cger", 0xc8, INSTR_RRF_U0RF }, { "cgdr", 0xc9, INSTR_RRF_U0RF }, { "cgxr", 0xca, INSTR_RRF_U0RF }, + { "lpdfr", 0x70, INSTR_RRE_FF }, + { "lndfr", 0x71, INSTR_RRE_FF }, + { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, + { "lcdfr", 0x73, INSTR_RRE_FF }, + { "ldgr", 0xc1, INSTR_RRE_FR }, + { "lgdr", 0xcd, INSTR_RRE_RF }, + { "adtr", 0xd2, INSTR_RRR_F0FF }, + { "axtr", 0xda, INSTR_RRR_F0FF }, + { "cdtr", 0xe4, INSTR_RRE_FF }, + { "cxtr", 0xec, INSTR_RRE_FF }, + { "kdtr", 0xe0, INSTR_RRE_FF }, + { "kxtr", 0xe8, INSTR_RRE_FF }, + { "cedtr", 0xf4, INSTR_RRE_FF }, + { "cextr", 0xfc, INSTR_RRE_FF }, + { "cdgtr", 0xf1, INSTR_RRE_FR }, + { "cxgtr", 0xf9, INSTR_RRE_FR }, + { "cdstr", 0xf3, INSTR_RRE_FR }, + { "cxstr", 0xfb, INSTR_RRE_FR }, + { "cdutr", 0xf2, INSTR_RRE_FR }, + { "cxutr", 0xfa, INSTR_RRE_FR }, + { "cgdtr", 0xe1, INSTR_RRF_U0RF }, + { "cgxtr", 0xe9, INSTR_RRF_U0RF }, + { "csdtr", 0xe3, INSTR_RRE_RF }, + { "csxtr", 0xeb, INSTR_RRE_RF }, + { "cudtr", 0xe2, INSTR_RRE_RF }, + { "cuxtr", 0xea, INSTR_RRE_RF }, + { "ddtr", 0xd1, INSTR_RRR_F0FF }, + { "dxtr", 0xd9, INSTR_RRR_F0FF }, + { "eedtr", 0xe5, INSTR_RRE_RF }, + { "eextr", 0xed, INSTR_RRE_RF }, + { "esdtr", 0xe7, INSTR_RRE_RF }, + { "esxtr", 0xef, INSTR_RRE_RF }, + { "iedtr", 0xf6, INSTR_RRF_F0FR }, + { "iextr", 0xfe, INSTR_RRF_F0FR }, + { "ltdtr", 0xd6, INSTR_RRE_FF }, + { "ltxtr", 0xde, INSTR_RRE_FF }, + { "fidtr", 0xd7, INSTR_RRF_UUFF }, + { "fixtr", 0xdf, INSTR_RRF_UUFF }, + { "ldetr", 0xd4, INSTR_RRF_0UFF }, + { "lxdtr", 0xdc, INSTR_RRF_0UFF }, + { "ledtr", 0xd5, INSTR_RRF_UUFF }, + { "ldxtr", 0xdd, INSTR_RRF_UUFF }, + { "mdtr", 0xd0, INSTR_RRR_F0FF }, + { "mxtr", 0xd8, INSTR_RRR_F0FF }, + { "qadtr", 0xf5, INSTR_RRF_FUFF }, + { "qaxtr", 0xfd, INSTR_RRF_FUFF }, + { "rrdtr", 0xf7, INSTR_RRF_FFRU }, + { "rrxtr", 0xff, INSTR_RRF_FFRU }, + { "sfasr", 0x85, INSTR_RRE_R0 }, + { "sdtr", 0xd3, INSTR_RRR_F0FF }, + { "sxtr", 0xdb, INSTR_RRR_F0FF }, #endif { "lpebr", 0x00, INSTR_RRE_FF }, { "lnebr", 0x01, INSTR_RRE_FF }, @@ -780,6 +873,14 @@ static struct insn opcode_b9[] = { { "cu24", 0xb1, INSTR_RRF_M0RR }, { "cu41", 0xb2, INSTR_RRF_M0RR }, { "cu42", 0xb3, INSTR_RRF_M0RR }, + { "crt", 0x72, INSTR_RRF_U0RR }, + { "cgrt", 0x60, INSTR_RRF_U0RR }, + { "clrt", 0x73, INSTR_RRF_U0RR }, + { "clgrt", 0x61, INSTR_RRF_U0RR }, + { "ptf", 0xa2, INSTR_RRE_R0 }, + { "pfmf", 0xaf, INSTR_RRE_RR }, + { "trte", 0xbf, INSTR_RRF_M0RR }, + { "trtre", 0xbd, INSTR_RRF_M0RR }, #endif { "kmac", 0x1e, INSTR_RRE_RR }, { "lrvr", 0x1f, INSTR_RRE_RR }, @@ -835,6 +936,43 @@ static struct insn opcode_c2[] = { { "cfi", 0x0d, INSTR_RIL_RI }, { "clgfi", 0x0e, INSTR_RIL_RU }, { "clfi", 0x0f, INSTR_RIL_RU }, + { "msfi", 0x01, INSTR_RIL_RI }, + { "msgfi", 0x00, INSTR_RIL_RI }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c4[] = { +#ifdef CONFIG_64BIT + { "lrl", 0x0d, INSTR_RIL_RP }, + { "lgrl", 0x08, INSTR_RIL_RP }, + { "lgfrl", 0x0c, INSTR_RIL_RP }, + { "lhrl", 0x05, INSTR_RIL_RP }, + { "lghrl", 0x04, INSTR_RIL_RP }, + { "llgfrl", 0x0e, INSTR_RIL_RP }, + { "llhrl", 0x02, INSTR_RIL_RP }, + { "llghrl", 0x06, INSTR_RIL_RP }, + { "strl", 0x0f, INSTR_RIL_RP }, + { "stgrl", 0x0b, INSTR_RIL_RP }, + { "sthrl", 0x07, INSTR_RIL_RP }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c6[] = { +#ifdef CONFIG_64BIT + { "crl", 0x0d, INSTR_RIL_RP }, + { "cgrl", 0x08, INSTR_RIL_RP }, + { "cgfrl", 0x0c, INSTR_RIL_RP }, + { "chrl", 0x05, INSTR_RIL_RP }, + { "cghrl", 0x04, INSTR_RIL_RP }, + { "clrl", 0x0f, INSTR_RIL_RP }, + { "clgrl", 0x0a, INSTR_RIL_RP }, + { "clgfrl", 0x0e, INSTR_RIL_RP }, + { "clhrl", 0x07, INSTR_RIL_RP }, + { "clghrl", 0x06, INSTR_RIL_RP }, + { "pfdrl", 0x02, INSTR_RIL_UP }, + { "exrl", 0x00, INSTR_RIL_RP }, #endif { "", 0, INSTR_INVALID } }; @@ -842,6 +980,8 @@ static struct insn opcode_c2[] = { static struct insn opcode_c8[] = { #ifdef CONFIG_64BIT { "mvcos", 0x00, INSTR_SSF_RRDRD }, + { "ectg", 0x01, INSTR_SSF_RRDRD }, + { "csst", 0x02, INSTR_SSF_RRDRD }, #endif { "", 0, INSTR_INVALID } }; @@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = { { "llgh", 0x91, INSTR_RXY_RRRD }, { "llc", 0x94, INSTR_RXY_RRRD }, { "llh", 0x95, INSTR_RXY_RRRD }, + { "cgh", 0x34, INSTR_RXY_RRRD }, + { "laey", 0x75, INSTR_RXY_RRRD }, + { "ltgf", 0x32, INSTR_RXY_RRRD }, + { "mfy", 0x5c, INSTR_RXY_RRRD }, + { "mhy", 0x7c, INSTR_RXY_RRRD }, + { "pfd", 0x36, INSTR_RXY_URRD }, #endif { "lrv", 0x1e, INSTR_RXY_RRRD }, { "lrvh", 0x1f, INSTR_RXY_RRRD }, @@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = { static struct insn opcode_e5[] = { #ifdef CONFIG_64BIT { "strag", 0x02, INSTR_SSE_RDRD }, + { "chhsi", 0x54, INSTR_SIL_RDI }, + { "chsi", 0x5c, INSTR_SIL_RDI }, + { "cghsi", 0x58, INSTR_SIL_RDI }, + { "clhhsi", 0x55, INSTR_SIL_RDU }, + { "clfhsi", 0x5d, INSTR_SIL_RDU }, + { "clghsi", 0x59, INSTR_SIL_RDU }, + { "mvhhi", 0x44, INSTR_SIL_RDI }, + { "mvhi", 0x4c, INSTR_SIL_RDI }, + { "mvghi", 0x48, INSTR_SIL_RDI }, #endif { "lasp", 0x00, INSTR_SSE_RDRD }, { "tprot", 0x01, INSTR_SSE_RDRD }, @@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = { { "lmy", 0x98, INSTR_RSY_RRRD }, { "lamy", 0x9a, INSTR_RSY_AARD }, { "stamy", 0x9b, INSTR_RSY_AARD }, + { "asi", 0x6a, INSTR_SIY_IRD }, + { "agsi", 0x7a, INSTR_SIY_IRD }, + { "alsi", 0x6e, INSTR_SIY_IRD }, + { "algsi", 0x7e, INSTR_SIY_IRD }, + { "ecag", 0x4c, INSTR_RSY_RRRD }, #endif { "rll", 0x1d, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD }, @@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = { #ifdef CONFIG_64BIT { "brxhg", 0x44, INSTR_RIE_RRP }, { "brxlg", 0x45, INSTR_RIE_RRP }, + { "crb", 0xf6, INSTR_RRS_RRRDU }, + { "cgrb", 0xe4, INSTR_RRS_RRRDU }, + { "crj", 0x76, INSTR_RIE_RRPU }, + { "cgrj", 0x64, INSTR_RIE_RRPU }, + { "cib", 0xfe, INSTR_RIS_RURDI }, + { "cgib", 0xfc, INSTR_RIS_RURDI }, + { "cij", 0x7e, INSTR_RIE_RUPI }, + { "cgij", 0x7c, INSTR_RIE_RUPI }, + { "cit", 0x72, INSTR_RIE_R0IU }, + { "cgit", 0x70, INSTR_RIE_R0IU }, + { "clrb", 0xf7, INSTR_RRS_RRRDU }, + { "clgrb", 0xe5, INSTR_RRS_RRRDU }, + { "clrj", 0x77, INSTR_RIE_RRPU }, + { "clgrj", 0x65, INSTR_RIE_RRPU }, + { "clib", 0xff, INSTR_RIS_RURDU }, + { "clgib", 0xfd, INSTR_RIS_RURDU }, + { "clij", 0x7f, INSTR_RIE_RUPU }, + { "clgij", 0x7d, INSTR_RIE_RUPU }, + { "clfit", 0x73, INSTR_RIE_R0UU }, + { "clgit", 0x71, INSTR_RIE_R0UU }, + { "rnsbg", 0x54, INSTR_RIE_RRUUU }, + { "rxsbg", 0x57, INSTR_RIE_RRUUU }, + { "rosbg", 0x56, INSTR_RIE_RRUUU }, + { "risbg", 0x55, INSTR_RIE_RRUUU }, #endif { "", 0, INSTR_INVALID } }; @@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = { { "ldy", 0x65, INSTR_RXY_FRRD }, { "stey", 0x66, INSTR_RXY_FRRD }, { "stdy", 0x67, INSTR_RXY_FRRD }, + { "sldt", 0x40, INSTR_RXF_FRRDF }, + { "slxt", 0x48, INSTR_RXF_FRRDF }, + { "srdt", 0x41, INSTR_RXF_FRRDF }, + { "srxt", 0x49, INSTR_RXF_FRRDF }, + { "tdcet", 0x50, INSTR_RXE_FRRD }, + { "tdcdt", 0x54, INSTR_RXE_FRRD }, + { "tdcxt", 0x58, INSTR_RXE_FRRD }, + { "tdget", 0x51, INSTR_RXE_FRRD }, + { "tdgdt", 0x55, INSTR_RXE_FRRD }, + { "tdgxt", 0x59, INSTR_RXE_FRRD }, #endif { "ldeb", 0x04, INSTR_RXE_FRRD }, { "lxdb", 0x05, INSTR_RXE_FRRD }, @@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = { { "mae", 0x2e, INSTR_RXF_FRRDF }, { "mse", 0x2f, INSTR_RXF_FRRDF }, { "sqe", 0x34, INSTR_RXE_FRRD }, + { "sqd", 0x35, INSTR_RXE_FRRD }, { "mee", 0x37, INSTR_RXE_FRRD }, { "mad", 0x3e, INSTR_RXF_FRRDF }, { "msd", 0x3f, INSTR_RXF_FRRDF }, @@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code) case 0xc2: table = opcode_c2; break; + case 0xc4: + table = opcode_c4; + break; + case 0xc6: + table = opcode_c6; + break; case 0xc8: table = opcode_c8; break; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index e49e9e0c69fd..31d618a443af 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -214,10 +214,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); static noinline __init void detect_machine_type(void) { - /* No VM information? Looks like LPAR */ - if (stsi(&vmms, 3, 2, 2) == -ENOSYS) + /* Check current-configuration-level */ + if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) { + S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; return; - if (!vmms.count) + } + /* Get virtual-machine cpu information. */ + if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count) return; /* Running under KVM? If not we assume z/VM */ @@ -402,8 +405,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) static void __init setup_boot_command_line(void) { + int i; + + /* convert arch command line to ascii */ + for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++) + if (COMMAND_LINE[i] & 0x80) + break; + if (i < ARCH_COMMAND_LINE_SIZE) + EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); + COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0; + /* copy arch command line */ - strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); + strlcpy(boot_command_line, strstrip(COMMAND_LINE), + ARCH_COMMAND_LINE_SIZE); /* append IPL PARM data to the boot command line */ if (MACHINE_IS_VM) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e8ef21c51bbe..4348f9bc5393 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -13,7 +13,6 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/cache.h> -#include <asm/lowcore.h> #include <asm/errno.h> #include <asm/ptrace.h> #include <asm/thread_info.h> diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f33658f09dd7..29fd0f1e6ec4 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -9,11 +9,9 @@ * Heiko Carstens <heiko.carstens@de.ibm.com> */ -#include <linux/sys.h> #include <linux/linkage.h> #include <linux/init.h> #include <asm/cache.h> -#include <asm/lowcore.h> #include <asm/errno.h> #include <asm/ptrace.h> #include <asm/thread_info.h> diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 5a82bc68193e..314d8f09cf31 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -13,7 +13,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <trace/syscall.h> -#include <asm/lowcore.h> +#include <asm/asm-offsets.h> #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index c52b4f7742fa..ca4a62bd862f 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 1999,2009 + * Copyright IBM Corp. 1999,2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -22,12 +22,9 @@ */ #include <linux/init.h> -#include <asm/setup.h> -#include <asm/lowcore.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> -#include <asm/cpu.h> #ifdef CONFIG_64BIT #define ARCH_OFFSET 4 @@ -288,19 +285,7 @@ iplstart: bz .Lagain1 # skip dateset trailer la %r5,0(%r4,%r2) lr %r3,%r2 -.Lidebc: - tm 0(%r5),0x80 # high order bit set ? - bo .Ldocv # yes -> convert from EBCDIC - ahi %r5,-1 - bct %r3,.Lidebc - b .Lnocv -.Ldocv: - l %r3,.Lcvtab - tr 0(256,%r4),0(%r3) # convert parameters to ascii - tr 256(256,%r4),0(%r3) - tr 512(256,%r4),0(%r3) - tr 768(122,%r4),0(%r3) -.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line + la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line mvc 0(256,%r3),0(%r4) mvc 256(256,%r3),256(%r4) mvc 512(256,%r3),512(%r4) @@ -384,7 +369,6 @@ iplstart: .Linitrd:.long _end + 0x400000 # default address of initrd .Lparm: .long PARMAREA .Lstartup: .long startup -.Lcvtab:.long _ebcasc # ebcdic to ascii table .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" @@ -417,13 +401,10 @@ start: .sk8x8: mvc 0(240,%r8),0(%r9) # copy iplparms into buffer .gotr: - l %r10,.tbl # EBCDIC to ASCII table - tr 0(240,%r8),0(%r10) slr %r0,%r0 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) j startup # continue with startup -.tbl: .long _ebcasc # translate table .cmd: .long COMMAND_LINE # address of command line buffer .parm: .long PARMAREA .lowcase: @@ -467,16 +448,15 @@ start: # or linload or SALIPL # .org 0x10000 -startup:basr %r13,0 # get base + .globl startup +startup: + basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 - l %r1,5f-.LPG0(%r13) - stck 0(%r1) - spt 6f-.LPG0(%r13) - mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1) - mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) - mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) + stck __LC_LAST_UPDATE_CLOCK + spt 5f-.LPG0(%r13) + mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) #ifndef CONFIG_MARCH_G5 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST @@ -494,7 +474,6 @@ startup:basr %r13,0 # get base cl %r0,2f+12-.LPG0(%r13) je 3f 1: l %r15,.Lstack-.LPG0(%r13) - ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE ahi %r15,-96 la %r2,.Lals_string-.LPG0(%r13) l %r3,.Lsclp_print-.LPG0(%r13) @@ -505,7 +484,7 @@ startup:basr %r13,0 # get base .Lsclp_print: .long _sclp_print_early .Lstack: - .long init_thread_union + .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) .align 16 2: .long 0x000a0000,0x8badcccc #if defined(CONFIG_64BIT) @@ -532,13 +511,22 @@ startup:basr %r13,0 # get base 3: #endif +#ifdef CONFIG_64BIT + mvi __LC_AR_MODE_ID,1 # set esame flag + slr %r0,%r0 # set cpuid to zero + lhi %r1,2 # mode 2 = esame (dump) + sigp %r1,%r0,0x12 # switch to esame mode + sam64 # switch to 64 bit mode + jg startup_continue +#else + mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) l %r13,4f-.LPG0(%r13) b 0(%r13) - .align 4 + .align 8 4: .long startup_continue -5: .long sched_clock_base_cc +#endif .align 8 -6: .long 0x7fffffff,0xffffffff +5: .long 0x7fffffff,0xffffffff # # params at 10400 (setup.h) @@ -552,8 +540,4 @@ startup:basr %r13,0 # get base .byte "root=/dev/ram0 ro" .byte 0 -#ifdef CONFIG_64BIT -#include "head64.S" -#else -#include "head31.S" -#endif + .org 0x11000 diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 602b508cd4c4..1bbcc499d455 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -1,7 +1,7 @@ /* * arch/s390/kernel/head31.S * - * Copyright (C) IBM Corp. 2005,2006 + * Copyright (C) IBM Corp. 2005,2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -10,13 +10,19 @@ * */ - .org 0x11000 +#include <linux/init.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/page.h> +__HEAD + .globl startup_continue startup_continue: basr %r13,0 # get base .LPG1: - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) + l %r1,.Lbase_cc-.LPG1(%r13) + mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore @@ -69,10 +75,12 @@ startup_continue: .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr +.Lbase_cc: + .long sched_clock_base_cc - .org 0x12000 .globl _ehead _ehead: + #ifdef CONFIG_SHARED_KERNEL .org 0x100000 #endif diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index d984a2a380c3..39580e768658 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -1,7 +1,7 @@ /* * arch/s390/kernel/head64.S * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright (C) IBM Corp. 1999,2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -10,80 +10,17 @@ * */ - .org 0x11000 +#include <linux/init.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/page.h> +__HEAD + .globl startup_continue startup_continue: - basr %r13,0 # get base -.LPG1: sll %r13,1 # remove high order bit - srl %r13,1 - -#ifdef CONFIG_ZFCPDUMP - - # check if we have been ipled using zfcp dump: - - tm 0xb9,0x01 # test if subchannel is enabled - jno .nodump # subchannel disabled - l %r1,0xb8 - la %r5,.Lipl_schib-.LPG1(%r13) - stsch 0(%r5) # get schib of subchannel - jne .nodump # schib not available - tm 5(%r5),0x01 # devno valid? - jno .nodump - tm 4(%r5),0x80 # qdio capable device? - jno .nodump - l %r2,20(%r0) # address of ipl parameter block - lhi %r3,0 - ic %r3,0x148(%r2) # get opt field - chi %r3,0x20 # load with dump? - jne .nodump - - # store all prefix registers in case of load with dump: - - la %r7,0 # base register for 0 page - la %r8,0 # first cpu - l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array - ahi %r11,4 # skip boot cpu - lr %r12,%r11 - ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array - stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr -1: - cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? - je 4f # if yes get next cpu -2: - lr %r9,%r7 - sigp %r9,%r8,0x9 # stop & store status of cpu - brc 8,3f # accepted - brc 4,4f # status stored: next cpu - brc 2,2b # busy: try again - brc 1,4f # not op: next cpu -3: - mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array - ahi %r11,4 # next element in prefix array - clr %r11,%r12 - je 5f # no more space in prefix array -4: - ahi %r8,1 # next cpu (r8 += 1) - chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? - jle 1b # jump if not last cpu -5: - lhi %r1,2 # mode 2 = esame (dump) - j 6f - .align 4 -.Lipl_schib: - .rept 13 - .long 0 - .endr -.nodump: - lhi %r1,1 # mode 1 = esame (normal ipl) -6: -#else - lhi %r1,1 # mode 1 = esame (normal ipl) -#endif /* CONFIG_ZFCPDUMP */ - mvi __LC_AR_MODE_ID,1 # set esame flag - slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,0x12 # switch to esame mode - sam64 # switch to 64 bit mode - llgfr %r13,%r13 # clear high-order half of base reg + larl %r1,sched_clock_base_cc + mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK + larl %r13,.LPG1 # get base lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area @@ -108,6 +45,7 @@ startup_continue: lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, # virtual and never return ... .align 16 +.LPG1: .Lentry:.quad 0x0000000180000000,_stext .Lctl: .quad 0x04350002 # cr0: various things .quad 0 # cr1: primary space segment table @@ -130,12 +68,6 @@ startup_continue: .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 .Lzero64:.fill 16,4,0x0 -#ifdef CONFIG_ZFCPDUMP -.Lcurrent_cpu: - .long 0x0 -.Lpref_arr_ptr: - .long zfcpdump_prefix_array -#endif /* CONFIG_ZFCPDUMP */ .Lparmaddr: .quad PARMAREA .align 64 @@ -146,9 +78,9 @@ startup_continue: .long 0x80000000,0,0,0 # invalid access-list entries .endr - .org 0x12000 .globl _ehead _ehead: + #ifdef CONFIG_SHARED_KERNEL .org 0x100000 #endif diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 4d73296fed74..7eedbbcb54aa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -553,7 +553,7 @@ out: return rc; } -static void ipl_run(struct shutdown_trigger *trigger) +static void __ipl_run(void *unused) { diag308(DIAG308_IPL, NULL); if (MACHINE_IS_VM) @@ -562,6 +562,11 @@ static void ipl_run(struct shutdown_trigger *trigger) reipl_ccw_dev(&ipl_info.data.ccw.dev_id); } +static void ipl_run(struct shutdown_trigger *trigger) +{ + smp_switch_to_ipl_cpu(__ipl_run, NULL); +} + static int __init ipl_init(void) { int rc; @@ -1039,7 +1044,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, sprintf(dst + pos, " PARM %s", vmparm); } -static void reipl_run(struct shutdown_trigger *trigger) +static void __reipl_run(void *unused) { struct ccw_dev_id devid; static char buf[128]; @@ -1087,6 +1092,11 @@ static void reipl_run(struct shutdown_trigger *trigger) disabled_wait((unsigned long) __builtin_return_address(0)); } +static void reipl_run(struct shutdown_trigger *trigger) +{ + smp_switch_to_ipl_cpu(__reipl_run, NULL); +} + static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) { ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; @@ -1369,20 +1379,18 @@ static struct kobj_attribute dump_type_attr = static struct kset *dump_kset; -static void dump_run(struct shutdown_trigger *trigger) +static void __dump_run(void *unused) { struct ccw_dev_id devid; static char buf[100]; switch (dump_method) { case DUMP_METHOD_CCW_CIO: - smp_send_stop(); devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.ssid = 0; reipl_ccw_dev(&devid); break; case DUMP_METHOD_CCW_VM: - smp_send_stop(); sprintf(buf, "STORE STATUS"); __cpcmd(buf, NULL, 0, NULL); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); @@ -1396,10 +1404,17 @@ static void dump_run(struct shutdown_trigger *trigger) diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_DUMP, NULL); break; - case DUMP_METHOD_NONE: - return; + default: + break; } - printk(KERN_EMERG "Dump failed!\n"); +} + +static void dump_run(struct shutdown_trigger *trigger) +{ + if (dump_method == DUMP_METHOD_NONE) + return; + smp_send_stop(); + smp_switch_to_ipl_cpu(__dump_run, NULL); } static int __init dump_ccw_init(void) @@ -1577,7 +1592,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger) static int vmcmd_init(void) { if (!MACHINE_IS_VM) - return -ENOTSUPP; + return -EOPNOTSUPP; vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); if (!vmcmd_kset) return -ENOMEM; @@ -1595,7 +1610,7 @@ static void stop_run(struct shutdown_trigger *trigger) { if (strcmp(trigger->name, ON_PANIC_STR) == 0) disabled_wait((unsigned long) __builtin_return_address(0)); - while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) cpu_relax(); for (;;); } @@ -1902,7 +1917,6 @@ void __init ipl_update_parameters(void) void __init ipl_save_parameters(void) { struct cio_iplinfo iplinfo; - unsigned int *ipl_ptr; void *src, *dst; if (cio_get_iplinfo(&iplinfo)) @@ -1913,11 +1927,10 @@ void __init ipl_save_parameters(void) if (!iplinfo.is_qdio) return; ipl_flags |= IPL_PARMBLOCK_VALID; - ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; - src = (void *)(unsigned long)*ipl_ptr; + src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr; dst = (void *)IPL_PARMBLOCK_ORIGIN; memmove(dst, src, PAGE_SIZE); - *ipl_ptr = IPL_PARMBLOCK_ORIGIN; + S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; } static LIST_HEAD(rcall); diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 131d7ee8b416..a922d51df6bf 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -54,11 +54,11 @@ void machine_shutdown(void) { } -void machine_kexec(struct kimage *image) +static void __machine_kexec(void *data) { relocate_kernel_t data_mover; + struct kimage *image = data; - smp_send_stop(); pfault_fini(); s390_reset_system(); @@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image) (*data_mover)(&image->head, image->start); for (;;); } + +void machine_kexec(struct kimage *image) +{ + smp_send_stop(); + smp_switch_to_ipl_cpu(__machine_kexec, image); +} diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 2f481cc3d1c9..cb899d9f8505 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -6,7 +6,7 @@ * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) */ -#include <asm/lowcore.h> +#include <asm/asm-offsets.h> # # do_reipl_asm diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 774147824c3d..5e73dee63baa 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -4,7 +4,7 @@ * Denis Joseph Barrow, */ -#include <asm/lowcore.h> +#include <asm/asm-offsets.h> # # do_reipl_asm diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index e27ca63076d1..27af3bf3a009 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -9,8 +9,10 @@ */ LC_EXT_NEW_PSW = 0x58 # addr of ext int handler +LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter LC_EXT_INT_CODE = 0x86 # addr of ext int code +LC_AR_MODE_ID = 0xa3 # # Subroutine which waits synchronously until either an external interruption @@ -30,8 +32,16 @@ _sclp_wait_int: .LbaseS1: ahi %r15,-96 # create stack frame la %r8,LC_EXT_NEW_PSW # register int handler - mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) - mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) + la %r9,.LextpswS1-.LbaseS1(%r13) +#ifdef CONFIG_64BIT + tm LC_AR_MODE_ID,1 + jno .Lesa1 + la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit + la %r9,.LextpswS1_64-.LbaseS1(%r13) +.Lesa1: +#endif + mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) + mvc 0(16,%r8),0(%r9) lhi %r6,0x0200 # cr mask for ext int (cr0.54) ltr %r2,%r2 jz .LsetctS1 @@ -64,15 +74,19 @@ _sclp_wait_int: .LtimeoutS1: lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting # restore old handler - mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) + mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13) lm %r6,%r15,120(%r15) # restore registers br %r14 # return to caller .align 8 .LoldpswS1: - .long 0, 0 # old ext int PSW + .long 0, 0, 0, 0 # old ext int PSW .LextpswS1: .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int +#ifdef CONFIG_64BIT +.LextpswS1_64: + .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit +#endif .LwaitpswS1: .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int .LtimeS1: @@ -250,6 +264,13 @@ _sclp_print: _sclp_print_early: stm %r6,%r15,24(%r15) # save registers ahi %r15,-96 # create stack frame +#ifdef CONFIG_64BIT + tm LC_AR_MODE_ID,1 + jno .Lesa2 + ahi %r15,-80 + stmh %r6,%r15,96(%r15) # store upper register halves +.Lesa2: +#endif lr %r10,%r2 # save string pointer lhi %r2,0 bras %r14,_sclp_setup # enable console @@ -262,6 +283,13 @@ _sclp_print_early: lhi %r2,1 bras %r14,_sclp_setup # disable console .LendS5: +#ifdef CONFIG_64BIT + tm LC_AR_MODE_ID,1 + jno .Lesa3 + lmh %r6,%r15,96(%r15) # store upper register halves + ahi %r15,80 +.Lesa3: +#endif lm %r6,%r15,120(%r15) # restore registers br %r14 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 8d8957b38ab3..77a63ae419f0 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -396,15 +396,12 @@ static void __init setup_lowcore(void) { struct _lowcore *lc; - int lc_pages; /* * Setup lowcore for boot cpu */ - lc_pages = sizeof(void *) == 8 ? 2 : 1; - lc = (struct _lowcore *) - __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); - memset(lc, 0, lc_pages * PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); + lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; @@ -804,7 +801,7 @@ setup_arch(char **cmdline_p) if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 31-bit mode\n"); - else + else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 31-bit mode\n"); if (MACHINE_HAS_IEEE) pr_info("The hardware system has IEEE compatible " @@ -818,7 +815,7 @@ setup_arch(char **cmdline_p) "guest operating system in 64-bit mode\n"); else if (MACHINE_IS_KVM) pr_info("Linux is running under KVM in 64-bit mode\n"); - else + else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 76a6fdd46c45..8b10127c00ad 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -36,6 +36,7 @@ #include <linux/cpu.h> #include <linux/timex.h> #include <linux/bootmem.h> +#include <asm/asm-offsets.h> #include <asm/ipl.h> #include <asm/setup.h> #include <asm/sigp.h> @@ -53,7 +54,7 @@ #include "entry.h" /* logical cpu to cpu address */ -int __cpu_logical_map[NR_CPUS]; +unsigned short __cpu_logical_map[NR_CPUS]; static struct task_struct *current_set[NR_CPUS]; @@ -72,13 +73,13 @@ static int cpu_management; static DEFINE_PER_CPU(struct cpu, cpu_devices); -static void smp_ext_bitcall(int, ec_bit_sig); +static void smp_ext_bitcall(int, int); -static int cpu_stopped(int cpu) +static int raw_cpu_stopped(int cpu) { - __u32 status; + u32 status; - switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { + switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { case sigp_status_stored: /* Check for stopped and check stop state */ if (status & 0x50) @@ -90,6 +91,44 @@ static int cpu_stopped(int cpu) return 0; } +static inline int cpu_stopped(int cpu) +{ + return raw_cpu_stopped(cpu_logical_map(cpu)); +} + +void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) +{ + struct _lowcore *lc, *current_lc; + struct stack_frame *sf; + struct pt_regs *regs; + unsigned long sp; + + if (smp_processor_id() == 0) + func(data); + __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); + /* Disable lowcore protection */ + __ctl_clear_bit(0, 28); + current_lc = lowcore_ptr[smp_processor_id()]; + lc = lowcore_ptr[0]; + if (!lc) + lc = current_lc; + lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; + if (!cpu_online(0)) + smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); + while (sigp(0, sigp_stop_and_store_status) == sigp_busy) + cpu_relax(); + sp = lc->panic_stack; + sp -= sizeof(struct pt_regs); + regs = (struct pt_regs *) sp; + memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); + regs->psw = lc->psw_save_area; + sp -= STACK_FRAME_OVERHEAD; + sf = (struct stack_frame *) sp; + sf->back_chain = regs->gprs[15]; + smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); +} + void smp_send_stop(void) { int cpu, rc; @@ -103,7 +142,7 @@ void smp_send_stop(void) if (cpu == smp_processor_id()) continue; do { - rc = signal_processor(cpu, sigp_stop); + rc = sigp(cpu, sigp_stop); } while (rc == sigp_busy); while (!cpu_stopped(cpu)) @@ -139,13 +178,13 @@ static void do_ext_call_interrupt(__u16 code) * Send an external call sigp to another cpu and return without waiting * for its completion. */ -static void smp_ext_bitcall(int cpu, ec_bit_sig sig) +static void smp_ext_bitcall(int cpu, int sig) { /* * Set signaling bit in lowcore of target cpu and kick it */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (sigp(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } @@ -239,24 +278,8 @@ void smp_ctl_clear_bit(int cr, int bit) } EXPORT_SYMBOL(smp_ctl_clear_bit); -/* - * In early ipl state a temp. logically cpu number is needed, so the sigp - * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on - * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. - */ -#define CPU_INIT_NO 1 - #ifdef CONFIG_ZFCPDUMP -/* - * zfcpdump_prefix_array holds prefix registers for the following scenario: - * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to - * save its prefix registers, since they get lost, when switching from 31 bit - * to 64 bit. - */ -unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ - __attribute__((__section__(".data"))); - static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { if (ipl_info.type != IPL_TYPE_FCP_DUMP) @@ -266,21 +289,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) "the dump\n", cpu, NR_CPUS - 1); return; } - zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); - __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; - while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == - sigp_busy) + zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); + while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) cpu_relax(); memcpy(zfcpdump_save_areas[cpu], (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, - SAVE_AREA_SIZE); -#ifdef CONFIG_64BIT - /* copy original prefix register */ - zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; -#endif + sizeof(struct save_area)); } -union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; EXPORT_SYMBOL_GPL(zfcpdump_save_areas); #else @@ -389,8 +406,7 @@ static void __init smp_detect_cpus(void) for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { if (cpu == boot_cpu_addr) continue; - __cpu_logical_map[CPU_INIT_NO] = cpu; - if (!cpu_stopped(CPU_INIT_NO)) + if (!raw_cpu_stopped(cpu)) continue; smp_get_save_area(c_cpus, cpu); c_cpus++; @@ -413,8 +429,7 @@ static void __init smp_detect_cpus(void) cpu_addr = info->cpu[cpu].address; if (cpu_addr == boot_cpu_addr) continue; - __cpu_logical_map[CPU_INIT_NO] = cpu_addr; - if (!cpu_stopped(CPU_INIT_NO)) { + if (!raw_cpu_stopped(cpu_addr)) { s_cpus++; continue; } @@ -533,18 +548,18 @@ static void smp_free_lowcore(int cpu) /* Upping and downing of CPUs */ int __cpuinit __cpu_up(unsigned int cpu) { - struct task_struct *idle; struct _lowcore *cpu_lowcore; + struct task_struct *idle; struct stack_frame *sf; - sigp_ccode ccode; u32 lowcore; + int ccode; if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) return -EIO; if (smp_alloc_lowcore(cpu)) return -ENOMEM; do { - ccode = signal_processor(cpu, sigp_initial_cpu_reset); + ccode = sigp(cpu, sigp_initial_cpu_reset); if (ccode == sigp_busy) udelay(10); if (ccode == sigp_not_operational) @@ -552,7 +567,7 @@ int __cpuinit __cpu_up(unsigned int cpu) } while (ccode == sigp_busy); lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; - while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) + while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) udelay(10); idle = current_set[cpu]; @@ -578,7 +593,7 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; eieio(); - while (signal_processor(cpu, sigp_restart) == sigp_busy) + while (sigp(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -640,7 +655,7 @@ void __cpu_die(unsigned int cpu) /* Wait until target cpu is down */ while (!cpu_stopped(cpu)) cpu_relax(); - while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) + while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) udelay(10); smp_free_lowcore(cpu); pr_info("Processor %d stopped\n", cpu); @@ -649,7 +664,7 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { idle_task_exit(); - while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) cpu_relax(); for (;;); } @@ -765,7 +780,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev, get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); rc = -EBUSY; - if (cpu_online(cpu)) + /* disallow configuration changes of online cpus and cpu 0 */ + if (cpu_online(cpu) || cpu == 0) goto out; rc = 0; switch (val) { diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S new file mode 100644 index 000000000000..469f11b574fa --- /dev/null +++ b/arch/s390/kernel/switch_cpu.S @@ -0,0 +1,58 @@ +/* + * 31-bit switch cpu code + * + * Copyright IBM Corp. 2009 + * + */ + +#include <asm/asm-offsets.h> +#include <asm/ptrace.h> + +# smp_switch_to_cpu switches to destination cpu and executes the passed function +# Parameter: %r2 - function to call +# %r3 - function parameter +# %r4 - stack poiner +# %r5 - current cpu +# %r6 - destination cpu + + .section .text + .align 4 + .globl smp_switch_to_cpu +smp_switch_to_cpu: + stm %r6,%r15,__SF_GPRS(%r15) + lr %r1,%r15 + ahi %r15,-STACK_FRAME_OVERHEAD + st %r1,__SF_BACKCHAIN(%r15) + basr %r13,0 +0: la %r1,.gprregs_addr-0b(%r13) + l %r1,0(%r1) + stm %r0,%r15,0(%r1) +1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ + brc 2,1b /* busy, try again */ +2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ + brc 2,2b /* busy, try again */ +3: j 3b + + .globl smp_restart_cpu +smp_restart_cpu: + basr %r13,0 +0: la %r1,.gprregs_addr-0b(%r13) + l %r1,0(%r1) + lm %r0,%r15,0(%r1) +1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ + brc 10,1b /* busy, accepted (status 0), running */ + tmll %r0,0x40 /* Test if calling CPU is stopped */ + jz 1b + ltr %r4,%r4 /* New stack ? */ + jz 1f + lr %r15,%r4 +1: basr %r14,%r2 + +.gprregs_addr: + .long .gprregs + + .section .data,"aw",@progbits +.gprregs: + .rept 16 + .long 0 + .endr diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S new file mode 100644 index 000000000000..d94aacc898cb --- /dev/null +++ b/arch/s390/kernel/switch_cpu64.S @@ -0,0 +1,51 @@ +/* + * 64-bit switch cpu code + * + * Copyright IBM Corp. 2009 + * + */ + +#include <asm/asm-offsets.h> +#include <asm/ptrace.h> + +# smp_switch_to_cpu switches to destination cpu and executes the passed function +# Parameter: %r2 - function to call +# %r3 - function parameter +# %r4 - stack poiner +# %r5 - current cpu +# %r6 - destination cpu + + .section .text + .align 4 + .globl smp_switch_to_cpu +smp_switch_to_cpu: + stmg %r6,%r15,__SF_GPRS(%r15) + lgr %r1,%r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r1,__SF_BACKCHAIN(%r15) + larl %r1,.gprregs + stmg %r0,%r15,0(%r1) +1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ + brc 2,1b /* busy, try again */ +2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ + brc 2,2b /* busy, try again */ +3: j 3b + + .globl smp_restart_cpu +smp_restart_cpu: + larl %r1,.gprregs + lmg %r0,%r15,0(%r1) +1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ + brc 10,1b /* busy, accepted (status 0), running */ + tmll %r0,0x40 /* Test if calling CPU is stopped */ + jz 1b + ltgr %r4,%r4 /* New stack ? */ + jz 1f + lgr %r15,%r4 +1: basr %r14,%r2 + + .section .data,"aw",@progbits +.gprregs: + .rept 16 + .quad 0 + .endr diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 0c26cc1898ec..b354427e03b7 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -176,7 +176,7 @@ pgm_check_entry: cgr %r1,%r2 je restore_registers /* r1 = r2 -> nothing to do */ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ - mvc __LC_RESTART_PSW(16,%r0),0(%r4) + mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 3: sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET brc 8,4f /* accepted */ diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 65065ac48ed3..a8f93f1705ad 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -51,14 +51,6 @@ #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) -/* - * Create a small time difference between the timer interrupts - * on the different cpus to avoid lock contention. - */ -#define CPU_DEVIATION (smp_processor_id() << 12) - -#define TICK_SIZE tick - u64 sched_clock_base_cc = -1; /* Force to data section. */ EXPORT_SYMBOL_GPL(sched_clock_base_cc); diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 5f99e66c51c3..6bc9c197aa91 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -23,6 +23,7 @@ #include <linux/security.h> #include <linux/bootmem.h> #include <linux/compat.h> +#include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/processor.h> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 8300309698fa..9e4c84187cf5 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -39,7 +39,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) vcpu->run->s390_reset_flags = 0; break; default: - return -ENOTSUPP; + return -EOPNOTSUPP; } atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -62,6 +62,6 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) case 0x308: return __diag_ipl_functions(vcpu); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index b40096494e46..3ddc30895e31 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -32,7 +32,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) vcpu->stat.instruction_lctlg++; if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) - return -ENOTSUPP; + return -EOPNOTSUPP; useraddr = disp2; if (base2) @@ -138,7 +138,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) rc = __kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_NOADDR); if (rc >= 0) - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; } if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { @@ -150,7 +150,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; } spin_unlock_bh(&vcpu->arch.local_int.lock); @@ -171,9 +171,9 @@ static int handle_validity(struct kvm_vcpu *vcpu) 2*PAGE_SIZE); if (rc) /* user will receive sigsegv, exit to user */ - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; } else - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; if (rc) VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", @@ -189,7 +189,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu) handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; if (handler) return handler(vcpu); - return -ENOTSUPP; + return -EOPNOTSUPP; } static int handle_prog(struct kvm_vcpu *vcpu) @@ -206,7 +206,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) rc = handle_instruction(vcpu); rc2 = handle_prog(vcpu); - if (rc == -ENOTSUPP) + if (rc == -EOPNOTSUPP) vcpu->arch.sie_block->icptcode = 0x04; if (rc) return rc; @@ -231,9 +231,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) u8 code = vcpu->arch.sie_block->icptcode; if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) - return -ENOTSUPP; + return -EOPNOTSUPP; func = intercept_funcs[code >> 2]; if (func) return func(vcpu); - return -ENOTSUPP; + return -EOPNOTSUPP; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 43486c2408e1..834774d8d5f3 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -10,12 +10,12 @@ * Author(s): Carsten Otte <cotte@de.ibm.com> */ -#include <asm/lowcore.h> -#include <asm/uaccess.h> -#include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kvm_host.h> +#include <linux/hrtimer.h> #include <linux/signal.h> +#include <asm/asm-offsets.h> +#include <asm/uaccess.h> #include "kvm-s390.h" #include "gaccess.h" @@ -187,8 +187,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, if (rc == -EFAULT) exception = 1; - rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, - inti->ext.ext_params2); + rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, + inti->ext.ext_params2); if (rc == -EFAULT) exception = 1; break; @@ -342,7 +342,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) if (psw_interrupts_disabled(vcpu)) { VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); __unset_cpu_idle(vcpu); - return -ENOTSUPP; /* disabled wait */ + return -EOPNOTSUPP; /* disabled wait */ } if (psw_extint_disabled(vcpu) || diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f8bcaefd7d34..3fa0a10e4668 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/timer.h> +#include <asm/asm-offsets.h> #include <asm/lowcore.h> #include <asm/pgtable.h> #include <asm/nmi.h> @@ -543,7 +544,7 @@ rerun_vcpu: rc = -EINTR; } - if (rc == -ENOTSUPP) { + if (rc == -EOPNOTSUPP) { /* intercept cannot be handled in-kernel, prepare kvm-run */ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; @@ -603,45 +604,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), vcpu->arch.guest_gprs, 128, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), &vcpu->arch.sie_block->gpsw, 16, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), &vcpu->arch.sie_block->prefix, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, - addr + offsetof(struct save_area_s390x, fp_ctrl_reg), + addr + offsetof(struct save_area, fp_ctrl_reg), &vcpu->arch.guest_fpregs.fpc, 4, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), &vcpu->arch.sie_block->todpr, 4, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), &vcpu->arch.sie_block->cputm, 8, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), &vcpu->arch.sie_block->ckc, 8, prefix)) return -EFAULT; - if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), + if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), &vcpu->arch.guest_acrs, 64, prefix)) return -EFAULT; if (__guestcopy(vcpu, - addr + offsetof(struct save_area_s390x, ctrl_regs), + addr + offsetof(struct save_area, ctrl_regs), &vcpu->arch.sie_block->gcr, 128, prefix)) return -EFAULT; return 0; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d426aac8095d..28c55677eb39 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -323,5 +323,5 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) else return handler(vcpu); } - return -ENOTSUPP; + return -EOPNOTSUPP; } diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 15ee1111de58..241a48459b66 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -172,7 +172,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) rc = 0; /* order accepted */ break; default: - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; } return rc; } @@ -293,7 +293,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) vcpu->stat.instruction_sigp_restart++; /* user space must know about restart */ default: - return -ENOTSUPP; + return -EOPNOTSUPP; } if (rc < 0) diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 97975ec7a274..cd54a1c352af 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for s390-specific library files.. # -lib-y += delay.o string.o uaccess_std.o uaccess_pt.o +lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index cff327f109a8..91754ffb9203 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); + unsigned int owner; while (1) { - if (count-- <= 0) { - unsigned int owner = lp->owner_cpu; - if (owner != 0) - _raw_yield_cpu(~owner); - count = spin_retry; + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + } + if (MACHINE_IS_LPAR) + continue; } - if (arch_spin_is_locked(lp)) - continue; + owner = lp->owner_cpu; + if (owner) + _raw_yield_cpu(~owner); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } @@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); + unsigned int owner; local_irq_restore(flags); while (1) { - if (count-- <= 0) { - unsigned int owner = lp->owner_cpu; - if (owner != 0) - _raw_yield_cpu(~owner); - count = spin_retry; + owner = lp->owner_cpu; + if (!owner || smp_vcpu_scheduled(~owner)) { + for (count = spin_retry; count > 0; count--) { + if (arch_spin_is_locked(lp)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&lp->owner_cpu, 0, + cpu) == 0) + return; + local_irq_restore(flags); + } + if (MACHINE_IS_LPAR) + continue; } - if (arch_spin_is_locked(lp)) - continue; + owner = lp->owner_cpu; + if (owner) + _raw_yield_cpu(~owner); local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; @@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; - if (cpu != 0) - _raw_yield_cpu(~cpu); + if (cpu != 0) { + if (MACHINE_IS_VM || MACHINE_IS_KVM || + !smp_vcpu_scheduled(~cpu)) + _raw_yield_cpu(~cpu); + } } EXPORT_SYMBOL(arch_spin_relax); diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c new file mode 100644 index 000000000000..14b363fec8a2 --- /dev/null +++ b/arch/s390/lib/usercopy.c @@ -0,0 +1,8 @@ +#include <linux/module.h> +#include <linux/bug.h> + +void copy_from_user_overflow(void) +{ + WARN(1, "Buffer overflow detected!\n"); +} +EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 5c8457129603..6409fd57eb04 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -309,7 +309,7 @@ query_segment_type (struct dcss_segment *seg) } #endif if (qout->segcnt > 6) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out_free; } @@ -324,11 +324,11 @@ query_segment_type (struct dcss_segment *seg) for (i=0; i<qout->segcnt; i++) { if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out_free; } if (start != qout->range[i].start >> PAGE_SHIFT) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out_free; } start = (qout->range[i].end >> PAGE_SHIFT) + 1; @@ -357,7 +357,7 @@ query_segment_type (struct dcss_segment *seg) * -ENOSYS : we are not running on VM * -EIO : could not perform query diagnose * -ENOENT : no such segment - * -ENOTSUPP: multi-part segment cannot be used with linux + * -EOPNOTSUPP: multi-part segment cannot be used with linux * -ENOMEM : out of memory * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h */ @@ -515,7 +515,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long * -ENOSYS : we are not running on VM * -EIO : could not perform query or load diagnose * -ENOENT : no such segment - * -ENOTSUPP: multi-part segment cannot be used with linux + * -EOPNOTSUPP: multi-part segment cannot be used with linux * -ENOSPC : segment cannot be used (overlaps with storage) * -EBUSY : segment can temporarily not be used (overlaps with dcss) * -ERANGE : segment cannot be used (exceeds kernel mapping range) @@ -742,7 +742,7 @@ void segment_warning(int rc, char *seg_name) pr_err("Loading or querying DCSS %s resulted in a " "hardware error\n", seg_name); break; - case -ENOTSUPP: + case -EOPNOTSUPP: pr_err("DCSS %s has multiple page ranges and cannot be " "loaded or queried\n", seg_name); break; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fc102e70d9c2..3040d7c78fe0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -30,6 +30,7 @@ #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> +#include <asm/asm-offsets.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/s390_ext.h> @@ -59,15 +60,13 @@ static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; -#ifdef CONFIG_KPROBES /* kprobe_running() needs smp_processor_id() */ - if (!user_mode(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; preempt_enable(); } -#endif return ret; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 765647952221..d5865e4024ce 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -143,33 +143,34 @@ void kernel_map_pages(struct page *page, int numpages, int enable) } #endif -void free_initmem(void) +void free_init_pages(char *what, unsigned long begin, unsigned long end) { - unsigned long addr; + unsigned long addr = begin; - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + if (begin >= end) + return; + for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); + memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, + PAGE_SIZE); free_page(addr); totalram_pages++; - } - printk ("Freeing unused kernel memory: %ldk freed\n", - ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); + } + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); +} + +void free_initmem(void) +{ + free_init_pages("unused kernel memory", + (unsigned long)&__init_begin, + (unsigned long)&__init_end); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start < end) - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_init_pages("initrd memory", start, end); } #endif diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 5905936c7c60..9ab1ae40565f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -20,6 +20,7 @@ #include <linux/buffer_head.h> #include <linux/hdreg.h> #include <linux/async.h> +#include <linux/mutex.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void) INIT_WORK(&device->restore_device, do_restore_device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; + mutex_init(&device->state_mutex); return device; } @@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) device->state = DASD_STATE_READY; return rc; } - dasd_destroy_partitions(block); dasd_flush_request_queue(block); + dasd_destroy_partitions(block); block->blocks = 0; block->bp_block = 0; block->s2b_shift = 0; @@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device) if (rc) device->target = device->state; - if (device->state == device->target) { + if (device->state == device->target) wake_up(&dasd_init_waitq); - dasd_put_device(device); - } /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); @@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device) static void do_kick_device(struct work_struct *work) { struct dasd_device *device = container_of(work, struct dasd_device, kick_work); + mutex_lock(&device->state_mutex); dasd_change_state(device); + mutex_unlock(&device->state_mutex); dasd_schedule_device_bh(device); dasd_put_device(device); } @@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device) void dasd_set_target_state(struct dasd_device *device, int target) { dasd_get_device(device); + mutex_lock(&device->state_mutex); /* If we are in probeonly mode stop at DASD_STATE_READY. */ if (dasd_probeonly && target > DASD_STATE_READY) target = DASD_STATE_READY; if (device->target != target) { - if (device->state == target) { + if (device->state == target) wake_up(&dasd_init_waitq); - dasd_put_device(device); - } device->target = target; } if (device->state != device->target) dasd_change_state(device); + mutex_unlock(&device->state_mutex); + dasd_put_device(device); } /* @@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, return; } - device = (struct dasd_device *) cqr->startdev; - if (device == NULL || - device != dasd_device_from_cdev_locked(cdev) || - strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) { + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", + "unable to get device from cdev"); + return; + } + + if (!cqr->startdev || + device != cqr->startdev || + strncmp(cqr->startdev->discipline->ebcname, + (char *) &cqr->magic, 4)) { DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", "invalid device in request"); + dasd_put_device(device); return; } @@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) cqr, rc); } else { cqr->stopclk = get_clock(); - rc = 1; } break; default: /* already finished or clear pending - do nothing */ @@ -2170,9 +2180,13 @@ static void dasd_flush_request_queue(struct dasd_block *block) static int dasd_open(struct block_device *bdev, fmode_t mode) { struct dasd_block *block = bdev->bd_disk->private_data; - struct dasd_device *base = block->base; + struct dasd_device *base; int rc; + if (!block) + return -ENODEV; + + base = block->base; atomic_inc(&block->open_count); if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { rc = -ENODEV; @@ -2285,11 +2299,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) if (ret) pr_warning("%s: Setting the DASD online failed with rc=%d\n", dev_name(&cdev->dev), ret); - else { - struct dasd_device *device = dasd_device_from_cdev(cdev); - wait_event(dasd_init_waitq, _wait_for_device(device)); - dasd_put_device(device); - } } /* @@ -2424,6 +2433,9 @@ int dasd_generic_set_online(struct ccw_device *cdev, } else pr_debug("dasd_generic device %s found\n", dev_name(&cdev->dev)); + + wait_event(dasd_init_waitq, _wait_for_device(device)); + dasd_put_device(device); return rc; } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 4cac5b54f26a..d49766f3b940 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, ssize_t len; device = dasd_device_from_cdev(to_ccwdev(dev)); - if (!IS_ERR(device) && device->discipline) { + if (IS_ERR(device)) + goto out; + else if (!device->discipline) { + dasd_put_device(device); + goto out; + } else { len = snprintf(buf, PAGE_SIZE, "%s\n", device->discipline->name); dasd_put_device(device); - } else - len = snprintf(buf, PAGE_SIZE, "none\n"); + return len; + } +out: + len = snprintf(buf, PAGE_SIZE, "none\n"); return len; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d3198303b93c..94f92a1247f2 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block) if (block->gdp) { del_gendisk(block->gdp); block->gdp->queue = NULL; + block->gdp->private_data = NULL; put_disk(block->gdp); block->gdp = NULL; } diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index e4c2143dabf6..ed73ce550822 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -368,6 +368,7 @@ struct dasd_device { /* Device state and target state. */ int state, target; + struct mutex state_mutex; int stopped; /* device (ccw_device_start) was stopped */ /* reference count. */ diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 71f95f54866f..f13a0bdd148c 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = { .release = seq_release, }; -static int -dasd_calc_metrics(char *page, char **start, off_t off, - int count, int *eof, int len) -{ - len = (len > off) ? len - off : 0; - if (len > count) - len = count; - if (len < count) - *eof = 1; - *start = page + off; - return len; -} - #ifdef CONFIG_DASD_PROFILE -static char * -dasd_statistics_array(char *str, unsigned int *array, int factor) +static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor) { int i; for (i = 0; i < 32; i++) { - str += sprintf(str, "%7d ", array[i] / factor); + seq_printf(m, "%7d ", array[i] / factor); if (i == 15) - str += sprintf(str, "\n"); + seq_putc(m, '\n'); } - str += sprintf(str,"\n"); - return str; + seq_putc(m, '\n'); } #endif /* CONFIG_DASD_PROFILE */ -static int -dasd_statistics_read(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int dasd_stats_proc_show(struct seq_file *m, void *v) { - unsigned long len; #ifdef CONFIG_DASD_PROFILE struct dasd_profile_info_t *prof; - char *str; int factor; /* check for active profiling */ if (dasd_profile_level == DASD_PROFILE_OFF) { - len = sprintf(page, "Statistics are off - they might be " + seq_printf(m, "Statistics are off - they might be " "switched on using 'echo set on > " "/proc/dasd/statistics'\n"); - return dasd_calc_metrics(page, start, off, count, eof, len); + return 0; } prof = &dasd_global_profile; @@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off, for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; factor *= 10); - str = page; - str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); - str += sprintf(str, "with %u sectors(512B each)\n", + seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs); + seq_printf(m, "with %u sectors(512B each)\n", prof->dasd_io_sects); - str += sprintf(str, "Scale Factor is %d\n", factor); - str += sprintf(str, + seq_printf(m, "Scale Factor is %d\n", factor); + seq_printf(m, " __<4 ___8 __16 __32 __64 _128 " " _256 _512 __1k __2k __4k __8k " " _16k _32k _64k 128k\n"); - str += sprintf(str, + seq_printf(m, " _256 _512 __1M __2M __4M __8M " " _16M _32M _64M 128M 256M 512M " " __1G __2G __4G " " _>4G\n"); - str += sprintf(str, "Histogram of sizes (512B secs)\n"); - str = dasd_statistics_array(str, prof->dasd_io_secs, factor); - str += sprintf(str, "Histogram of I/O times (microseconds)\n"); - str = dasd_statistics_array(str, prof->dasd_io_times, factor); - str += sprintf(str, "Histogram of I/O times per sector\n"); - str = dasd_statistics_array(str, prof->dasd_io_timps, factor); - str += sprintf(str, "Histogram of I/O time till ssch\n"); - str = dasd_statistics_array(str, prof->dasd_io_time1, factor); - str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); - str = dasd_statistics_array(str, prof->dasd_io_time2, factor); - str += sprintf(str, "Histogram of I/O time between ssch " + seq_printf(m, "Histogram of sizes (512B secs)\n"); + dasd_statistics_array(m, prof->dasd_io_secs, factor); + seq_printf(m, "Histogram of I/O times (microseconds)\n"); + dasd_statistics_array(m, prof->dasd_io_times, factor); + seq_printf(m, "Histogram of I/O times per sector\n"); + dasd_statistics_array(m, prof->dasd_io_timps, factor); + seq_printf(m, "Histogram of I/O time till ssch\n"); + dasd_statistics_array(m, prof->dasd_io_time1, factor); + seq_printf(m, "Histogram of I/O time between ssch and irq\n"); + dasd_statistics_array(m, prof->dasd_io_time2, factor); + seq_printf(m, "Histogram of I/O time between ssch " "and irq per sector\n"); - str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); - str += sprintf(str, "Histogram of I/O time between irq and end\n"); - str = dasd_statistics_array(str, prof->dasd_io_time3, factor); - str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); - str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); - len = str - page; + dasd_statistics_array(m, prof->dasd_io_time2ps, factor); + seq_printf(m, "Histogram of I/O time between irq and end\n"); + dasd_statistics_array(m, prof->dasd_io_time3, factor); + seq_printf(m, "# of req in chanq at enqueuing (1..32) \n"); + dasd_statistics_array(m, prof->dasd_io_nr_req, factor); #else - len = sprintf(page, "Statistics are not activated in this kernel\n"); + seq_printf(m, "Statistics are not activated in this kernel\n"); #endif - return dasd_calc_metrics(page, start, off, count, eof, len); + return 0; } -static int -dasd_statistics_write(struct file *file, const char __user *user_buf, - unsigned long user_len, void *data) +static int dasd_stats_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, dasd_stats_proc_show, NULL); +} + +static ssize_t dasd_stats_proc_write(struct file *file, + const char __user *user_buf, size_t user_len, loff_t *pos) { #ifdef CONFIG_DASD_PROFILE char *buffer, *str; @@ -308,6 +291,15 @@ out_error: #endif /* CONFIG_DASD_PROFILE */ } +static const struct file_operations dasd_stats_proc_fops = { + .owner = THIS_MODULE, + .open = dasd_stats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dasd_stats_proc_write, +}; + /* * Create dasd proc-fs entries. * In case creation failed, cleanup and return -ENOENT. @@ -324,13 +316,12 @@ dasd_proc_init(void) &dasd_devices_file_ops); if (!dasd_devices_entry) goto out_nodevices; - dasd_statistics_entry = create_proc_entry("statistics", - S_IFREG | S_IRUGO | S_IWUSR, - dasd_proc_root_entry); + dasd_statistics_entry = proc_create("statistics", + S_IFREG | S_IRUGO | S_IWUSR, + dasd_proc_root_entry, + &dasd_stats_proc_fops); if (!dasd_statistics_entry) goto out_nostatistics; - dasd_statistics_entry->read_proc = dasd_statistics_read; - dasd_statistics_entry->write_proc = dasd_statistics_write; return 0; out_nostatistics: diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 82daa3c1dc9c..3438658b66b7 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> +#include <asm/asm-offsets.h> #include <asm/ipl.h> #include <asm/sclp.h> #include <asm/setup.h> @@ -40,12 +41,12 @@ enum arch_id { /* dump system info */ struct sys_info { - enum arch_id arch; - unsigned long sa_base; - u32 sa_size; - int cpu_map[NR_CPUS]; - unsigned long mem_size; - union save_area lc_mask; + enum arch_id arch; + unsigned long sa_base; + u32 sa_size; + int cpu_map[NR_CPUS]; + unsigned long mem_size; + struct save_area lc_mask; }; struct ipib_info { @@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) return 0; } -#ifdef __s390x__ -/* - * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info - */ -static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, - int cpu) -{ - int i; - - for (i = 0; i < 16; i++) { - out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; - out->s390.acc_regs[i] = in->s390x.acc_regs[i]; - out->s390.ctrl_regs[i] = - in->s390x.ctrl_regs[i] & 0x00000000ffffffff; - } - /* locore for 31 bit has only space for fpregs 0,2,4,6 */ - out->s390.fp_regs[0] = in->s390x.fp_regs[0]; - out->s390.fp_regs[1] = in->s390x.fp_regs[2]; - out->s390.fp_regs[2] = in->s390x.fp_regs[4]; - out->s390.fp_regs[3] = in->s390x.fp_regs[6]; - memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); - out->s390.psw[1] |= 0x8; /* set bit 12 */ - memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); - out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ - out->s390.pref_reg = in->s390x.pref_reg; - out->s390.timer = in->s390x.timer; - out->s390.clk_cmp = in->s390x.clk_cmp; -} - -static void __init s390x_to_s390_save_areas(void) -{ - int i = 1; - static union save_area tmp; - - while (zfcpdump_save_areas[i]) { - s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); - memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); - i++; - } -} - -#endif /* __s390x__ */ - static int __init init_cpu_info(enum arch_id arch) { - union save_area *sa; + struct save_area *sa; /* get info for boot cpu from lowcore, stored in the HSA */ @@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch) return -EIO; } zfcpdump_save_areas[0] = sa; - -#ifdef __s390x__ - /* convert s390x regs to s390, if we are dumping an s390 Linux */ - - if (arch == ARCH_S390) - s390x_to_s390_save_areas(); -#endif - return 0; } static DEFINE_MUTEX(zcore_mutex); -#define DUMP_VERSION 0x3 +#define DUMP_VERSION 0x5 #define DUMP_MAGIC 0xa8190173618f23fdULL #define DUMP_ARCH_S390X 2 #define DUMP_ARCH_S390 1 @@ -279,7 +229,14 @@ struct zcore_header { u32 volnr; u32 build_arch; u64 rmem_size; - char pad2[4016]; + u8 mvdump; + u16 cpu_cnt; + u16 real_cpu_cnt; + u8 end_pad1[0x200-0x061]; + u64 mvdump_sign; + u64 mvdump_zipl_time; + u8 end_pad2[0x800-0x210]; + u32 lc_vec[512]; } __attribute__((packed,__aligned__(16))); static struct zcore_header zcore_header = { @@ -289,7 +246,7 @@ static struct zcore_header zcore_header = { .dump_level = 0, .page_size = PAGE_SIZE, .mem_start = 0, -#ifdef __s390x__ +#ifdef CONFIG_64BIT .build_arch = DUMP_ARCH_S390X, #else .build_arch = DUMP_ARCH_S390, @@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) unsigned long prefix; unsigned long sa_off, len, buf_off; - if (sys_info.arch == ARCH_S390) - prefix = zfcpdump_save_areas[i]->s390.pref_reg; - else - prefix = zfcpdump_save_areas[i]->s390x.pref_reg; - + prefix = zfcpdump_save_areas[i]->pref_reg; sa_start = prefix + sys_info.sa_base; sa_end = prefix + sys_info.sa_base + sys_info.sa_size; @@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = { .release = zcore_reipl_release, }; +#ifdef CONFIG_32BIT -static void __init set_s390_lc_mask(union save_area *map) +static void __init set_lc_mask(struct save_area *map) { - memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); - memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); - memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); - memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); - memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); - memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); - memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); - memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); - memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); + memset(&map->ext_save, 0xff, sizeof(map->ext_save)); + memset(&map->timer, 0xff, sizeof(map->timer)); + memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); + memset(&map->psw, 0xff, sizeof(map->psw)); + memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); + memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); + memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); + memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); + memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); } -static void __init set_s390x_lc_mask(union save_area *map) +#else /* CONFIG_32BIT */ + +static void __init set_lc_mask(struct save_area *map) { - memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); - memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); - memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); - memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); - memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); - memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); - memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); - memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); - memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); - memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); + memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); + memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); + memset(&map->psw, 0xff, sizeof(map->psw)); + memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); + memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg)); + memset(&map->tod_reg, 0xff, sizeof(map->tod_reg)); + memset(&map->timer, 0xff, sizeof(map->timer)); + memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); + memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); + memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); } +#endif /* CONFIG_32BIT */ + /* * Initialize dump globals for a given architecture */ @@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch) switch (arch) { case ARCH_S390X: pr_alert("DETECTED 'S390X (64 bit) OS'\n"); - sys_info.sa_base = SAVE_AREA_BASE_S390X; - sys_info.sa_size = sizeof(struct save_area_s390x); - set_s390x_lc_mask(&sys_info.lc_mask); break; case ARCH_S390: pr_alert("DETECTED 'S390 (32 bit) OS'\n"); - sys_info.sa_base = SAVE_AREA_BASE_S390; - sys_info.sa_size = sizeof(struct save_area_s390); - set_s390_lc_mask(&sys_info.lc_mask); break; default: pr_alert("0x%x is an unknown architecture.\n",arch); return -EINVAL; } + sys_info.sa_base = SAVE_AREA_BASE; + sys_info.sa_size = sizeof(struct save_area); sys_info.arch = arch; + set_lc_mask(&sys_info.lc_mask); rc = init_cpu_info(arch); if (rc) return rc; @@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem) static int __init zcore_header_init(int arch, struct zcore_header *hdr) { - int rc; + int rc, i; unsigned long memory = 0; + u32 prefix; if (arch == ARCH_S390X) hdr->arch_id = DUMP_ARCH_S390X; @@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr) hdr->num_pages = memory / PAGE_SIZE; hdr->tod = get_clock(); get_cpu_id(&hdr->cpu_id); + for (i = 0; zfcpdump_save_areas[i]; i++) { + prefix = zfcpdump_save_areas[i]->pref_reg; + hdr->real_cpu_cnt++; + if (!prefix) + continue; + hdr->lc_vec[hdr->cpu_cnt] = prefix; + hdr->cpu_cnt++; + } return 0; } @@ -741,14 +705,21 @@ static int __init zcore_init(void) if (rc) goto fail; -#ifndef __s390x__ +#ifdef CONFIG_64BIT + if (arch == ARCH_S390) { + pr_alert("The 64-bit dump tool cannot be used for a " + "32-bit system\n"); + rc = -EINVAL; + goto fail; + } +#else /* CONFIG_64BIT */ if (arch == ARCH_S390X) { pr_alert("The 32-bit dump tool cannot be used for a " "64-bit system\n"); rc = -EINVAL; goto fail; } -#endif +#endif /* CONFIG_64BIT */ rc = sys_info_init(arch); if (rc) diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 7a28a3029a3f..37df42af05ec 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) */ void ccw_request_handler(struct ccw_device *cdev) { + struct irb *irb = (struct irb *)&S390_lowcore.irb; struct ccw_request *req = &cdev->private->req; - struct irb *irb = (struct irb *) __LC_IRB; enum io_status status; int rc = -EOPNOTSUPP; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 1ecd3e567648..4038f5b4f144 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) secm_area->request.length = 0x0050; secm_area->request.code = 0x0016; - secm_area->key = PAGE_DEFAULT_KEY; + secm_area->key = PAGE_DEFAULT_KEY >> 4; secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index c84ac9443079..852612f5dba0 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) { struct chsc_private *private = sch->private; struct chsc_request *request = private->request; - struct irb *irb = (struct irb *)__LC_IRB; + struct irb *irb = (struct irb *)&S390_lowcore.irb; CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); @@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, int ret = -ENODEV; char dbf[10]; - chsc_area->header.key = PAGE_DEFAULT_KEY; + chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; while ((sch = chsc_get_next_subchannel(sch))) { spin_lock(sch->lock); private = sch->private; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 126f240715a4..f736cdcf08ad 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs) /* * Get interrupt information from lowcore */ - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; - irb = (struct irb *) __LC_IRB; + tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; + irb = (struct irb *)&S390_lowcore.irb; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; /* @@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) * We don't do this for VM because a tpi drops the cpu * out of the sie which costs more cycles than it saves. */ - } while (!MACHINE_IS_VM && tpi (NULL) != 0); + } while (MACHINE_IS_LPAR && tpi(NULL) != 0); irq_exit(); set_irq_regs(old_regs); } @@ -682,10 +682,10 @@ static int cio_tpi(void) struct irb *irb; int irq_context; - tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; + tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; if (tpi(NULL) != 1) return 0; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ @@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.schid, (struct irb *)__LC_IRB); + tsch(ti.schid, (struct irb *)&S390_lowcore.irb); if (schid_equal(&ti.schid, &schid)) return 0; } @@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) struct subchannel_id schid; struct schib schib; - schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; + schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; if (stsch(schid, &schib)) diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index d157665d0e76..425f741a280c 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c @@ -8,15 +8,16 @@ * Heiko Carstens <heiko.carstens@de.ibm.com>, */ -#include <linux/semaphore.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/init.h> +#include <linux/wait.h> #include <asm/crw.h> -static struct semaphore crw_semaphore; static DEFINE_MUTEX(crw_handler_mutex); static crw_handler_t crw_handlers[NR_RSCS]; +static atomic_t crw_nr_req = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); /** * crw_register_handler() - register a channel report word handler @@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc) static int crw_collect_info(void *unused) { struct crw crw[2]; - int ccode; + int ccode, signal; unsigned int chain; - int ignore; repeat: - ignore = down_interruptible(&crw_semaphore); + signal = wait_event_interruptible(crw_handler_wait_q, + atomic_read(&crw_nr_req) > 0); + if (unlikely(signal)) + atomic_inc(&crw_nr_req); chain = 0; while (1) { crw_handler_t handler; @@ -122,25 +125,23 @@ repeat: /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } + if (atomic_dec_and_test(&crw_nr_req)) + wake_up(&crw_handler_wait_q); goto repeat; return 0; } void crw_handle_channel_report(void) { - up(&crw_semaphore); + atomic_inc(&crw_nr_req); + wake_up(&crw_handler_wait_q); } -/* - * Separate initcall needed for semaphore initialization since - * crw_handle_channel_report might be called before crw_machine_check_init. - */ -static int __init crw_init_semaphore(void) +void crw_wait_for_channel_report(void) { - init_MUTEX_LOCKED(&crw_semaphore); - return 0; + crw_handle_channel_report(); + wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); } -pure_initcall(crw_init_semaphore); /* * Machine checks for the channel subsystem must be enabled diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa14..2769da54f2b9 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/reboot.h> #include <linux/suspend.h> +#include <linux/proc_fs.h> #include <asm/isc.h> #include <asm/crw.h> @@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) if (!get_device(&sch->dev)) return; sch->todo = todo; - if (!queue_work(slow_path_wq, &sch->todo_work)) { + if (!queue_work(cio_work_q, &sch->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&sch->dev); } @@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused) } static DECLARE_WORK(slow_path_work, css_slow_path_func); -struct workqueue_struct *slow_path_wq; +struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) { @@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -563,7 +564,7 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(slow_path_wq, &slow_path_work); + queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } void css_wait_for_slow_path(void) { - flush_workqueue(slow_path_wq); + flush_workqueue(cio_work_q); } /* Schedule reprobing of all unregistered subchannels. */ @@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void) ret = css_bus_init(); if (ret) return ret; - + cio_work_q = create_singlethread_workqueue("cio"); + if (!cio_work_q) { + ret = -ENOMEM; + goto out_bus; + } ret = io_subchannel_init(); if (ret) - css_bus_cleanup(); + goto out_wq; return ret; +out_wq: + destroy_workqueue(cio_work_q); +out_bus: + css_bus_cleanup(); + return ret; } subsys_initcall(channel_subsystem_init); @@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused) struct css_driver *cssdrv = to_cssdriver(drv); if (cssdrv->settle) - cssdrv->settle(); + return cssdrv->settle(); return 0; } +int css_complete_work(void) +{ + int ret; + + /* Wait for the evaluation of subchannels to finish. */ + ret = wait_event_interruptible(css_eval_wq, + atomic_read(&css_eval_scheduled) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + /* Wait for the subchannel type specific initialization to finish */ + return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); +} + + /* * Wait for the initialization of devices to finish, to make sure we are * done with our setup if the search for the root device starts. @@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void) { /* Start initial subchannel evaluation. */ css_schedule_eval_all(); - /* Wait for the evaluation of subchannels to finish. */ - wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); - /* Wait for the subchannel type specific initialization to finish */ - return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); + css_complete_work(); + return 0; } subsys_initcall_sync(channel_subsystem_init_sync); +#ifdef CONFIG_PROC_FS +static ssize_t cio_settle_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret; + + /* Handle pending CRW's. */ + crw_wait_for_channel_report(); + ret = css_complete_work(); + + return ret ? ret : count; +} + +static const struct file_operations cio_settle_proc_fops = { + .write = cio_settle_write, +}; + +static int __init cio_settle_init(void) +{ + struct proc_dir_entry *entry; + + entry = proc_create("cio_settle", S_IWUSR, NULL, + &cio_settle_proc_fops); + if (!entry) + return -ENOMEM; + return 0; +} +device_initcall(cio_settle_init); +#endif /*CONFIG_PROC_FS*/ + int sch_is_pseudo_sch(struct subchannel *sch) { return sch == to_css(sch->dev.parent)->pseudo_subchannel; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index fe84b92cde60..7e37886de231 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -95,7 +95,7 @@ struct css_driver { int (*freeze)(struct subchannel *); int (*thaw) (struct subchannel *); int (*restore)(struct subchannel *); - void (*settle)(void); + int (*settle)(void); const char *name; }; @@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[]; /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); +int css_complete_work(void); int sch_is_pseudo_sch(struct subchannel *); struct schib; int css_sch_is_valid(struct schib *); -extern struct workqueue_struct *slow_path_wq; +extern struct workqueue_struct *cio_work_q; void css_wait_for_slow_path(void); void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index a6c7d5426fb2..c6abb75c4615 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); -struct workqueue_struct *ccw_device_work; wait_queue_head_t ccw_device_init_wq; atomic_t ccw_device_init_count; @@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch) return 0; } -static void io_subchannel_settle(void) +static int io_subchannel_settle(void) { - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); - flush_workqueue(ccw_device_work); + int ret; + + ret = wait_event_interruptible(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + if (ret) + return -EINTR; + flush_workqueue(cio_work_q); + return 0; } static struct css_driver io_subchannel_driver = { @@ -188,27 +192,13 @@ int __init io_subchannel_init(void) atomic_set(&ccw_device_init_count, 0); setup_timer(&recovery_timer, recovery_func, 0); - ccw_device_work = create_singlethread_workqueue("cio"); - if (!ccw_device_work) - return -ENOMEM; - slow_path_wq = create_singlethread_workqueue("kslowcrw"); - if (!slow_path_wq) { - ret = -ENOMEM; - goto out_err; - } - if ((ret = bus_register (&ccw_bus_type))) - goto out_err; - + ret = bus_register(&ccw_bus_type); + if (ret) + return ret; ret = css_driver_register(&io_subchannel_driver); if (ret) - goto out_err; + bus_unregister(&ccw_bus_type); - return 0; -out_err: - if (ccw_device_work) - destroy_workqueue(ccw_device_work); - if (slow_path_wq) - destroy_workqueue(slow_path_wq); return ret; } @@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) /* Not operational. */ if (!cdev) return IO_SCH_UNREG; - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } @@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) if (!cdev) return IO_SCH_ATTACH; if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG_ATTACH; return IO_SCH_ORPH_ATTACH; } if ((sch->schib.pmcw.pam & sch->opm) == 0) { - if (!ccw_device_notify(cdev, CIO_NO_PATH)) + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_DISC; } @@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_VERIFY: + if (cdev->private->flags.resuming == 1) { + if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { + ccw_device_set_notoper(cdev); + break; + } + } /* Trigger path verification. */ io_subchannel_verify(sch); rc = 0; @@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) break; case IO_SCH_UNREG_ATTACH: /* Unregister ccw device. */ - ccw_device_unregister(cdev); + if (!cdev->private->flags.resuming) + ccw_device_unregister(cdev); break; default: break; @@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_UNREG: - css_sch_device_unregister(sch); + if (!cdev || !cdev->private->flags.resuming) + css_sch_device_unregister(sch); break; case IO_SCH_ORPH_ATTACH: case IO_SCH_UNREG_ATTACH: @@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); - if (cio_is_console(sch->schid)) - goto out; + spin_lock_irq(sch->lock); + if (cio_is_console(sch->schid)) { + cio_enable_subchannel(sch, (u32)(addr_t)sch); + goto out_unlock; + } /* * While we were sleeping, devices may have gone or become * available again. Kick re-detection. */ - spin_lock_irq(sch->lock); cdev->private->flags.resuming = 1; + css_schedule_eval(sch->schid); + spin_unlock_irq(sch->lock); + css_complete_work(); + + /* cdev may have been moved to a different subchannel. */ + sch = to_subchannel(cdev->dev.parent); + spin_lock_irq(sch->lock); + if (cdev->private->state != DEV_STATE_ONLINE && + cdev->private->state != DEV_STATE_OFFLINE) + goto out_unlock; + ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED); -out: + spin_lock_irq(sch->lock); + +out_unlock: cdev->private->flags.resuming = 0; + spin_unlock_irq(sch->lock); } static int resume_handle_boxed(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_BOXED; - if (ccw_device_notify(cdev, CIO_BOXED)) + if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; @@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev) static int resume_handle_disc(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_DISCONNECTED; - if (ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; @@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev) static int ccw_device_pm_restore(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); - struct subchannel *sch = to_subchannel(cdev->dev.parent); - int ret = 0, cm_enabled; + struct subchannel *sch; + int ret = 0; __ccw_device_pm_restore(cdev); + sch = to_subchannel(cdev->dev.parent); spin_lock_irq(sch->lock); - if (cio_is_console(sch->schid)) { - cio_enable_subchannel(sch, (u32)(addr_t)sch); - spin_unlock_irq(sch->lock); + if (cio_is_console(sch->schid)) goto out_restore; - } - cdev->private->flags.donotify = 0; + /* check recognition results */ switch (cdev->private->state) { case DEV_STATE_OFFLINE: + case DEV_STATE_ONLINE: + cdev->private->flags.donotify = 0; break; case DEV_STATE_BOXED: ret = resume_handle_boxed(cdev); - spin_unlock_irq(sch->lock); if (ret) - goto out; + goto out_unlock; goto out_restore; - case DEV_STATE_DISCONNECTED: - goto out_disc_unlock; default: - goto out_unreg_unlock; - } - /* check if the device id has changed */ - if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { - CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno " - "changed from %04x to %04x)\n", - sch->schid.ssid, sch->schid.sch_no, - cdev->private->dev_id.devno, - sch->schib.pmcw.dev); - goto out_unreg_unlock; + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; } /* check if the device type has changed */ if (!ccw_device_test_sense_data(cdev)) { @@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev) ret = -ENODEV; goto out_unlock; } - if (!cdev->online) { - ret = 0; + if (!cdev->online) goto out_unlock; - } - ret = ccw_device_online(cdev); - if (ret) - goto out_disc_unlock; - cm_enabled = cdev->private->cmb != NULL; + if (ccw_device_online(cdev)) { + ret = resume_handle_disc(cdev); + if (ret) + goto out_unlock; + goto out_restore; + } spin_unlock_irq(sch->lock); - wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); - if (cdev->private->state != DEV_STATE_ONLINE) { - spin_lock_irq(sch->lock); - goto out_disc_unlock; + spin_lock_irq(sch->lock); + + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { + ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + ret = -ENODEV; + goto out_unlock; } - if (cm_enabled) { + + /* reenable cmf, if needed */ + if (cdev->private->cmb) { + spin_unlock_irq(sch->lock); ret = ccw_set_cmf(cdev, 1); + spin_lock_irq(sch->lock); if (ret) { CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " "(rc=%d)\n", cdev->private->dev_id.ssid, @@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev) } out_restore: + spin_unlock_irq(sch->lock); if (cdev->online && cdev->drv && cdev->drv->restore) ret = cdev->drv->restore(cdev); -out: return ret; -out_disc_unlock: - ret = resume_handle_disc(cdev); - spin_unlock_irq(sch->lock); - if (ret) - return ret; - goto out_restore; - -out_unreg_unlock: - ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); - ret = -ENODEV; out_unlock: spin_unlock_irq(sch->lock); return ret; @@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) /* Get workqueue ref. */ if (!get_device(&cdev->dev)) return; - if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { + if (!queue_work(cio_work_q, &cdev->private->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&cdev->dev); } @@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register); EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(ccw_bus_type); -EXPORT_SYMBOL(ccw_device_work); EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index bcfe13e42638..379de2d1ec49 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -4,7 +4,7 @@ #include <asm/ccwdev.h> #include <asm/atomic.h> #include <linux/wait.h> - +#include <linux/notifier.h> #include "io_sch.h" /* @@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern struct workqueue_struct *ccw_device_work; extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; int __init io_subchannel_init(void); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ae760658a131..c56ab94612f9 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } } +/** + * ccw_device_notify() - inform the device's driver about an event + * @cdev: device for which an event occured + * @event: event that occurred + * + * Returns: + * -%EINVAL if the device is offline or has no driver. + * -%EOPNOTSUPP if the device's driver has no notifier registered. + * %NOTIFY_OK if the driver wants to keep the device. + * %NOTIFY_BAD if the driver doesn't want to keep the device. + */ int ccw_device_notify(struct ccw_device *cdev, int event) { + int ret = -EINVAL; + if (!cdev->drv) - return 0; + goto out; if (!cdev->online) - return 0; + goto out; CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, event); - return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; + if (!cdev->drv->notify) { + ret = -EOPNOTSUPP; + goto out; + } + if (cdev->drv->notify(cdev, event)) + ret = NOTIFY_OK; + else + ret = NOTIFY_BAD; +out: + return ret; } static void ccw_device_oper_notify(struct ccw_device *cdev) { - if (ccw_device_notify(cdev, CIO_OPER)) { + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { /* Reenable channel measurements, if needed. */ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); return; @@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state) case DEV_STATE_BOXED: CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) + if (cdev->online && + ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); cdev->private->flags.donotify = 0; break; case DEV_STATE_NOT_OPER: CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state) CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " "%04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (!ccw_device_notify(cdev, CIO_NO_PATH)) + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev) static void ccw_device_generic_notoper(struct ccw_device *cdev, enum dev_event dev_event) { - if (!ccw_device_notify(cdev, CIO_GONE)) + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); else ccw_device_set_disconnected(cdev); @@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) struct irb *irb; int is_cmd; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ if (!scsw_is_solicited(&irb->scsw)) { @@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; - irb = (struct irb *) __LC_IRB; + irb = (struct irb *)&S390_lowcore.irb; /* Check for unsolicited interrupt. */ if (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 44f2f6a97f33..48aa0647432b 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -208,18 +208,27 @@ struct qdio_dev_perf_stat { unsigned int eqbs_partial; unsigned int sqbs; unsigned int sqbs_partial; +} ____cacheline_aligned; + +struct qdio_queue_perf_stat { + /* + * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. + * Since max. 127 SBALs are scanned reuse entry for 128 as queue full + * aka 127 SBALs found. + */ + unsigned int nr_sbals[8]; + unsigned int nr_sbal_error; + unsigned int nr_sbal_nop; + unsigned int nr_sbal_total; }; struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; - /* first ACK'ed buffer */ int ack_start; - /* how much sbals are acknowledged with qebsm */ int ack_count; - /* last time of noticing incoming data */ u64 timestamp; }; @@ -227,40 +236,27 @@ struct qdio_input_q { struct qdio_output_q { /* PCIs are enabled for the queue */ int pci_out_enabled; - /* IQDIO: output multiple buffers (enhanced SIGA) */ int use_enh_siga; - /* timer to check for more outbound work */ struct timer_list timer; }; +/* + * Note on cache alignment: grouped slsb and write mostly data at the beginning + * sbal[] is read-only and starts on a new cacheline followed by read mostly. + */ struct qdio_q { struct slsb slsb; + union { struct qdio_input_q in; struct qdio_output_q out; } u; - /* queue number */ - int nr; - - /* bitmask of queue number */ - int mask; - - /* input or output queue */ - int is_input_q; - - /* list of thinint input queues */ - struct list_head entry; - - /* upper-layer program handler */ - qdio_handler_t (*handler); - /* * inbound: next buffer the program should check for - * outbound: next buffer to check for having been processed - * by the card + * outbound: next buffer to check if adapter processed it */ int first_to_check; @@ -273,16 +269,32 @@ struct qdio_q { /* number of buffers in use by the adapter */ atomic_t nr_buf_used; - struct qdio_irq *irq_ptr; - struct dentry *debugfs_q; - struct tasklet_struct tasklet; - /* error condition during a data transfer */ unsigned int qdio_error; - struct sl *sl; - struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; + struct tasklet_struct tasklet; + struct qdio_queue_perf_stat q_stats; + + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; + + /* queue number */ + int nr; + + /* bitmask of queue number */ + int mask; + + /* input or output queue */ + int is_input_q; + + /* list of thinint input queues */ + struct list_head entry; + /* upper-layer program handler */ + qdio_handler_t (*handler); + + struct dentry *debugfs_q; + struct qdio_irq *irq_ptr; + struct sl *sl; /* * Warning: Leave this member at the end so it won't be cleared in * qdio_fill_qs. A page is allocated under this pointer and used for @@ -317,12 +329,8 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); - struct qdio_dev_perf_stat perf_stat; int perf_stat_enabled; - /* - * Warning: Leave these members together at the end so they won't be - * cleared in qdio_setup_irq. - */ + struct qdr *qdr; unsigned long chsc_page; @@ -331,6 +339,7 @@ struct qdio_irq { debug_info_t *debug_area; struct mutex setup_mutex; + struct qdio_dev_perf_stat perf_stat; }; /* helper functions */ @@ -341,9 +350,20 @@ struct qdio_irq { (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ css_general_characteristics.aif_osa) -#define qperf(qdev,attr) qdev->perf_stat.attr -#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ - q->irq_ptr->perf_stat.attr++ +#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) + +#define qperf_inc(__q, __attr) \ +({ \ + struct qdio_irq *qdev = (__q)->irq_ptr; \ + if (qdev->perf_stat_enabled) \ + (qdev->perf_stat.__attr)++; \ +}) + +static inline void account_sbals_error(struct qdio_q *q, int count) +{ + q->q_stats.nr_sbal_error += count; + q->q_stats.nr_sbal_total += count; +} /* the highest iqdio queue is used for multicast */ static inline int multicast_outbound(struct qdio_q *q) diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f49761ff9a00..c94eb2a0fa2e 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); seq_printf(m, "polling: %d ack start: %d ack count: %d\n", q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); - seq_printf(m, "slsb buffer states:\n"); + seq_printf(m, "SBAL states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { @@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v) } seq_printf(m, "\n"); seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); + + seq_printf(m, "\nSBAL statistics:"); + if (!q->irq_ptr->perf_stat_enabled) { + seq_printf(m, " disabled\n"); + return 0; + } + + seq_printf(m, "\n1 2.. 4.. 8.. " + "16.. 32.. 64.. 127\n"); + for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++) + seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]); + seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n", + q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop, + q->q_stats.nr_sbal_total); return 0; } @@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, { struct seq_file *seq = file->private_data; struct qdio_irq *irq_ptr = seq->private; + struct qdio_q *q; unsigned long val; char buf[8]; - int ret; + int ret, i; if (!irq_ptr) return 0; @@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, case 0: irq_ptr->perf_stat_enabled = 0; memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + for_each_input_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); + for_each_output_queue(irq_ptr, q, i) + memset(&q->q_stats, 0, sizeof(q->q_stats)); break; case 1: irq_ptr->perf_stat_enabled = 1; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 62b654af9237..232ef047ba34 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q) set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } +static inline void account_sbals(struct qdio_q *q, int count) +{ + int pos = 0; + + q->q_stats.nr_sbal_total += count; + if (count == QDIO_MAX_BUFFERS_MASK) { + q->q_stats.nr_sbals[7]++; + return; + } + while (count >>= 1) + pos++; + q->q_stats.nr_sbals[pos]++; +} + static void announce_buffer_error(struct qdio_q *q, int count) { q->qdio_error |= QDIO_ERROR_SLSB_STATE; @@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) q->first_to_check = add_buf(q->first_to_check, count); if (atomic_sub(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); break; case SLSB_P_INPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); break; case SLSB_CU_INPUT_EMPTY: case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); break; default: @@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; - if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) + if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) q->u.in.timestamp = get_usecs(); return 1; } else @@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); + if (q->irq_ptr->perf_stat_enabled) + account_sbals(q, count); break; case SLSB_P_OUTPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); + if (q->irq_ptr->perf_stat_enabled) + account_sbals_error(q, count); break; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ + if (q->irq_ptr->perf_stat_enabled) + q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 8c2dea5fa2b4..7f4a75465140 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->qdf0[i + nr].slsba = (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; - irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; - irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; } static void setup_qdr(struct qdio_irq *irq_ptr, @@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; for (i = 0; i < qdio_init->no_input_qs; i++) __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); @@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data) struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; int rc; - memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); + memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); + memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); + memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); + memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); + memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); + + irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; + irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; + /* wipes qib.ac, required by ar7063 */ memset(irq_ptr->qdr, 0, sizeof(struct qdr)); diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 091d904d3182..9942c1031b25 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) .code = 0x0021, }; scssc_area->operation_code = 0; - scssc_area->ks = PAGE_DEFAULT_KEY; - scssc_area->kc = PAGE_DEFAULT_KEY; + scssc_area->ks = PAGE_DEFAULT_KEY >> 4; + scssc_area->kc = PAGE_DEFAULT_KEY >> 4; scssc_area->isc = QDIO_AIRQ_ISC; scssc_area->schid = irq_ptr->schid; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index c68be24e27d9..ba50fe02e572 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -33,6 +33,7 @@ #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/compat.h> #include <linux/smp_lock.h> #include <asm/atomic.h> @@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = { */ static struct proc_dir_entry *zcrypt_entry; -static int sprintcl(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) { - int hl, i; + int i; - hl = 0; for (i = 0; i < len; i++) - hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); - hl += sprintf(outaddr+hl, " "); - return hl; + seq_printf(m, "%01x", (unsigned int) addr[i]); + seq_putc(m, ' '); } -static int sprintrw(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) { - int hl, inl, c, cx; + int inl, c, cx; - hl = sprintf(outaddr, " "); + seq_printf(m, " "); inl = 0; for (c = 0; c < (len / 16); c++) { - hl += sprintcl(outaddr+hl, addr+inl, 16); + sprintcl(m, addr+inl, 16); inl += 16; } cx = len%16; if (cx) { - hl += sprintcl(outaddr+hl, addr+inl, cx); + sprintcl(m, addr+inl, cx); inl += cx; } - hl += sprintf(outaddr+hl, "\n"); - return hl; + seq_putc(m, '\n'); } -static int sprinthx(unsigned char *title, unsigned char *outaddr, - unsigned char *addr, unsigned int len) +static void sprinthx(unsigned char *title, struct seq_file *m, + unsigned char *addr, unsigned int len) { - int hl, inl, r, rx; + int inl, r, rx; - hl = sprintf(outaddr, "\n%s\n", title); + seq_printf(m, "\n%s\n", title); inl = 0; for (r = 0; r < (len / 64); r++) { - hl += sprintrw(outaddr+hl, addr+inl, 64); + sprintrw(m, addr+inl, 64); inl += 64; } rx = len % 64; if (rx) { - hl += sprintrw(outaddr+hl, addr+inl, rx); + sprintrw(m, addr+inl, rx); inl += rx; } - hl += sprintf(outaddr+hl, "\n"); - return hl; + seq_putc(m, '\n'); } -static int sprinthx4(unsigned char *title, unsigned char *outaddr, - unsigned int *array, unsigned int len) +static void sprinthx4(unsigned char *title, struct seq_file *m, + unsigned int *array, unsigned int len) { - int hl, r; + int r; - hl = sprintf(outaddr, "\n%s\n", title); + seq_printf(m, "\n%s\n", title); for (r = 0; r < len; r++) { if ((r % 8) == 0) - hl += sprintf(outaddr+hl, " "); - hl += sprintf(outaddr+hl, "%08X ", array[r]); + seq_printf(m, " "); + seq_printf(m, "%08X ", array[r]); if ((r % 8) == 7) - hl += sprintf(outaddr+hl, "\n"); + seq_putc(m, '\n'); } - hl += sprintf(outaddr+hl, "\n"); - return hl; + seq_putc(m, '\n'); } -static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, - int count, int *eof, void *data) +static int zcrypt_proc_show(struct seq_file *m, void *v) { - unsigned char *workarea; - int len; - - len = 0; - - /* resp_buff is a page. Use the right half for a work area */ - workarea = resp_buff + 2000; - len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", - ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); - len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", - ap_domain_index); - len += sprintf(resp_buff + len, "Total device count: %d\n", - zcrypt_device_count); - len += sprintf(resp_buff + len, "PCICA count: %d\n", - zcrypt_count_type(ZCRYPT_PCICA)); - len += sprintf(resp_buff + len, "PCICC count: %d\n", - zcrypt_count_type(ZCRYPT_PCICC)); - len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", - zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); - len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", - zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); - len += sprintf(resp_buff + len, "CEX2C count: %d\n", - zcrypt_count_type(ZCRYPT_CEX2C)); - len += sprintf(resp_buff + len, "CEX2A count: %d\n", - zcrypt_count_type(ZCRYPT_CEX2A)); - len += sprintf(resp_buff + len, "CEX3C count: %d\n", - zcrypt_count_type(ZCRYPT_CEX3C)); - len += sprintf(resp_buff + len, "CEX3A count: %d\n", - zcrypt_count_type(ZCRYPT_CEX3A)); - len += sprintf(resp_buff + len, "requestq count: %d\n", - zcrypt_requestq_count()); - len += sprintf(resp_buff + len, "pendingq count: %d\n", - zcrypt_pendingq_count()); - len += sprintf(resp_buff + len, "Total open handles: %d\n\n", - atomic_read(&zcrypt_open_count)); + char workarea[sizeof(int) * AP_DEVICES]; + + seq_printf(m, "\nzcrypt version: %d.%d.%d\n", + ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); + seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); + seq_printf(m, "Total device count: %d\n", zcrypt_device_count); + seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); + seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); + seq_printf(m, "PCIXCC MCL2 count: %d\n", + zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); + seq_printf(m, "PCIXCC MCL3 count: %d\n", + zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); + seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); + seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); + seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); + seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); + seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); + seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); + seq_printf(m, "Total open handles: %d\n\n", + atomic_read(&zcrypt_open_count)); zcrypt_status_mask(workarea); - len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " - "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", - resp_buff+len, workarea, AP_DEVICES); + sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " + "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", + m, workarea, AP_DEVICES); zcrypt_qdepth_mask(workarea); - len += sprinthx("Waiting work element counts", - resp_buff+len, workarea, AP_DEVICES); + sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); zcrypt_perdev_reqcnt((int *) workarea); - len += sprinthx4("Per-device successfully completed request counts", - resp_buff+len,(unsigned int *) workarea, AP_DEVICES); - *eof = 1; - memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); - return len; + sprinthx4("Per-device successfully completed request counts", + m, (unsigned int *) workarea, AP_DEVICES); + return 0; +} + +static int zcrypt_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, zcrypt_proc_show, NULL); } static void zcrypt_disable_card(int index) @@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index) spin_unlock_bh(&zcrypt_device_lock); } -static int zcrypt_status_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) { unsigned char *lbuf, *ptr; - unsigned long local_count; + size_t local_count; int j; if (count <= 0) @@ -1115,6 +1095,15 @@ out: return count; } +static const struct file_operations zcrypt_proc_fops = { + .owner = THIS_MODULE, + .open = zcrypt_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = zcrypt_proc_write, +}; + static int zcrypt_rng_device_count; static u32 *zcrypt_rng_buffer; static int zcrypt_rng_buffer_index; @@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void) goto out; /* Set up the proc file system */ - zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); + zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); if (!zcrypt_entry) { rc = -ENOMEM; goto out_misc; } - zcrypt_entry->data = NULL; - zcrypt_entry->read_proc = zcrypt_status_read; - zcrypt_entry->write_proc = zcrypt_status_write; return 0; diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 2930fc763ac5..b2fc4fd63f7f 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code) return; /* The LSB might be overloaded, we have to mask it */ - vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); + vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); /* We use the LSB of extparam, to decide, if this interrupt is a config * change or a "standard" interrupt */ - config_changed = (*(int *) __LC_EXT_PARAMS & 1); + config_changed = S390_lowcore.ext_params & 1; if (config_changed) { struct virtio_driver *drv; diff --git a/include/linux/elf.h b/include/linux/elf.h index 0cc4d55151b7..39ad4b230a4a 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -362,6 +362,11 @@ typedef struct elf64_shdr { #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ #define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ +#define NT_S390_TIMER 0x301 /* s390 timer register */ +#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ +#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ +#define NT_S390_CTRS 0x304 /* s390 control registers */ +#define NT_S390_PREFIX 0x305 /* s390 prefix register */ /* Note header in a PT_NOTE section */ |