From 0fa963553a5c28d8f8aabd8878326d3f782045fc Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 12 May 2016 18:10:48 +0200 Subject: s390/bpf: reduce maximum program size to 64 KB The s390 BFP compiler currently uses relative branch instructions that only support jumps up to 64 KB. Examples are "j", "jnz", "cgrj", etc. Currently the maximum size of s390 BPF programs is set to 0x7ffff. If branches over 64 KB are generated the, kernel can crash due to incorrect code. So fix this an reduce the maximum size to 64 KB. Programs larger than that will be interpreted. Fixes: ce2b6ad9c185 ("s390/bpf: increase BPF_SIZE_MAX") Cc: stable@vger.kernel.org # 4.3+ Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/net/bpf_jit_comp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9133b0ec000b..b36c74f4c937 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -45,7 +45,7 @@ struct bpf_jit { int labels[1]; /* Labels for local jumps */ }; -#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ +#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ #define SEEN_SKB 1 /* skb access */ #define SEEN_MEM 2 /* use mem[] for temporary storage */ -- cgit v1.2.3-58-ga151 From 6edf0aa4f8bbdfbb4d6d786892fa02728d05dc36 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 11 May 2016 21:13:13 +0200 Subject: s390/bpf: fix recache skb->data/hlen for skb_vlan_push/pop In case of usage of skb_vlan_push/pop, in the prologue we store the SKB pointer on the stack and restore it after BPF_JMP_CALL to skb_vlan_push/pop. Unfortunately currently there are two bugs in the code: 1) The wrong stack slot (offset 170 instead of 176) is used 2) The wrong register (W1 instead of B1) is saved So fix this and use correct stack slot and register. Fixes: 9db7f2b81880 ("s390/bpf: recache skb->data/hlen for skb_vlan_push/pop") Cc: stable@vger.kernel.org # 4.3+ Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/net/bpf_jit.h | 4 ++-- arch/s390/net/bpf_jit_comp.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index f010c93a88b1..fda605dbc1b4 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | | | * +---------------+ | * | 8 byte skbp | | - * R15+170 -> +---------------+ | + * R15+176 -> +---------------+ | * | 8 byte hlen | | * R15+168 -> +---------------+ | * | 4 byte align | | @@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; #define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ -#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ +#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index b36c74f4c937..bee281f3163d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit) emit_load_skb_data_hlen(jit); if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, + EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); } -- cgit v1.2.3-58-ga151 From e9bc15f28e5f0db44aba609780d6850eabefbf11 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2016 10:45:44 +0100 Subject: s390/config: update default configuration Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/configs/default_defconfig | 44 ++++++++++++++++++++++----------- arch/s390/configs/gcov_defconfig | 34 ++++++++++++++----------- arch/s390/configs/performance_defconfig | 36 ++++++++++++++++----------- arch/s390/configs/zfcpdump_defconfig | 4 +-- arch/s390/defconfig | 44 +++++++++++++++++++-------------- 5 files changed, 98 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0ac42cc4f880..d5ec71b2ed02 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_PERF=y +CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y @@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_PCI_DEBUG=y CONFIG_HOTPLUG_PCI=y @@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m @@ -537,6 +545,8 @@ CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 CONFIG_READABLE_ASM=y CONFIG_UNUSED_SYMBOLS=y @@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y CONFIG_SLUB_STATS=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_VM=y +CONFIG_DEBUG_VM_VMACACHE=y CONFIG_DEBUG_VM_RB=y +CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y +CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_ON_OOPS=y +CONFIG_DEBUG_TIMEKEEPING=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y @@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_KPROBES_SANITY_TEST=y @@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y CONFIG_TEST_KSTRTOX=y CONFIG_DMA_API_DEBUG=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m @@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index a31dcd56f7c0..f46a35115d2d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_BLK_CGROUP=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y @@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_KPROBE_EVENT is not set +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_RBTREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m @@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 7b73bf353345..ba0f2a58b8cd 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_BLK_CGROUP=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m @@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y @@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m @@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1719843a55a2..4366a3e3e754 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -1,5 +1,5 @@ # CONFIG_SWAP is not set -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_DEFAULT_DEADLINE=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y # CONFIG_COMPAT is not set CONFIG_NR_CPUS=2 @@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set -# CONFIG_STRICT_DEVMEM is not set # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS_FS is not set # CONFIG_VIRTUALIZATION is not set diff --git a/arch/s390/defconfig b/arch/s390/defconfig index e24f2af4c73b..3f571ea89509 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,8 +1,8 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y +CONFIG_USELIB=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y @@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_PERF=y +CONFIG_BLK_CGROUP=y CONFIG_CGROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y @@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y CONFIG_HZ_100=y @@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_CRASH_DUMP=y CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -61,7 +68,6 @@ CONFIG_UNIX=y CONFIG_NET_KEY=y CONFIG_INET=y CONFIG_IP_MULTICAST=y -# CONFIG_INET_LRO is not set CONFIG_L2TP=m CONFIG_L2TP_DEBUGFS=m CONFIG_VLAN_8021Q=y @@ -144,6 +150,9 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y @@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y CONFIG_DEBUG_LOCKDEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_TRACE=y CONFIG_LATENCYTOP=y CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y -CONFIG_TRACER_SNAPSHOT=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m @@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_ANSI_CPRNG=m -- cgit v1.2.3-58-ga151 From cf0d44d513f275be9ce42158079d4257e8973889 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Mon, 23 May 2016 15:35:51 +0200 Subject: s390: fix info leak in do_sigsegv Aleksa has reported incorrect si_errno value when stracing task which received SIGSEGV: [pid 20799] --- SIGSEGV {si_signo=SIGSEGV, si_code=SEGV_MAPERR, si_errno=2510266, si_addr=0x100000000000000} The reason seems to be that do_sigsegv is not initializing siginfo structure defined on the stack completely so it will leak 4B of the previous stack content. Fix it simply by initializing si_errno to 0 (same as do_sigbus does already). Cc: stable # introduced pre-git times Reported-by: Aleksa Sarai Signed-off-by: Michal Hocko Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7a3144017301..19288c1b36d3 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) report_user_fault(regs, SIGSEGV, 1); si.si_signo = SIGSEGV; + si.si_errno = 0; si.si_code = si_code; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGSEGV, &si, current); -- cgit v1.2.3-58-ga151 From 9ea46abe22550e3366ff7cee2f8391b35b12f730 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 25 May 2016 12:51:20 -0700 Subject: sparc64: Take ctx_alloc_lock properly in hugetlb_setup(). On cheetahplus chips we take the ctx_alloc_lock in order to modify the TLB lookup parameters for the indexed TLBs, which are stored in the context register. This is called with interrupts disabled, however ctx_alloc_lock is an IRQ safe lock, therefore we must take acquire/release it properly with spin_{lock,unlock}_irq(). Reported-by: Meelis Roos Tested-by: Meelis Roos Signed-off-by: David S. Miller --- arch/sparc/mm/init_64.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 652683cb4b4b..14bb0d5ed3c6 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { + bool need_context_reload = false; unsigned long ctx; - spin_lock(&ctx_alloc_lock); + spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); + need_context_reload = true; } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irq(&ctx_alloc_lock); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); } } #endif -- cgit v1.2.3-58-ga151 From d11c2a0de2824395656cf8ed15811580c9dd38aa Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 28 May 2016 21:21:31 -0700 Subject: sparc: Harden signal return frame checks. All signal frames must be at least 16-byte aligned, because that is the alignment we explicitly create when we build signal return stack frames. All stack pointers must be at least 8-byte aligned. Signed-off-by: David S. Miller --- arch/sparc/kernel/signal32.c | 46 +++++++++++++++++++++++++++--------------- arch/sparc/kernel/signal_32.c | 41 +++++++++++++++++++++++-------------- arch/sparc/kernel/signal_64.c | 31 ++++++++++++++++++---------- arch/sparc/kernel/sigutil_32.c | 9 ++++++++- arch/sparc/kernel/sigutil_64.c | 10 +++++++-- 5 files changed, 92 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 3c25241fa5cb..91cc2f4ae4d9 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || + ((unsigned long)fp) > 0x100000000ULL - fplen) + return true; + return false; +} + void do_sigreturn32(struct pt_regs *regs) { struct signal_frame32 __user *sf; compat_uptr_t fpu_save; compat_uptr_t rwin_save; - unsigned int psr; + unsigned int psr, ufp; unsigned int pc, npc; sigset_t set; compat_sigset_t seta; @@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs) sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; - if (get_user(pc, &sf->info.si_regs.pc) || + if (__get_user(pc, &sf->info.si_regs.pc) || __get_user(npc, &sf->info.si_regs.npc)) goto segv; @@ -227,7 +244,7 @@ segv: asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) { struct rt_signal_frame32 __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; compat_uptr_t fpu_save; compat_uptr_t rwin_save; sigset_t set; @@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; - if (get_user(pc, &sf->regs.pc) || + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + if (__get_user(pc, &sf->regs.pc) || __get_user(npc, &sf->regs.npc)) goto segv; @@ -307,14 +329,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) - return 1; - return 0; -} - static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 52aa5e4ce5e7..c3c12efe0bc0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -60,10 +60,22 @@ struct rt_signal_frame { #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static inline bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) + return true; + + return false; +} + asmlinkage void do_sigreturn(struct pt_regs *regs) { + unsigned long up_psr, pc, npc, ufp; struct signal_frame __user *sf; - unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; @@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv_and_exit; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) goto segv_and_exit; - if (((unsigned long) sf) & 3) + if (ufp & 0x7) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); @@ -127,7 +142,7 @@ segv_and_exit: asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 0x03)) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; err = __get_user(pc, &sf->regs.pc); @@ -178,15 +198,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static inline int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) - return 1; - - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 39aaec173f66..5ee930c48f4c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -234,6 +234,17 @@ do_sigsegv: goto out; } +/* Checks if the fp is valid. We always build rt signal frames which + * are 16-byte aligned, therefore we can always enforce that the + * restore frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp) +{ + if (((unsigned long) fp) & 15) + return true; + return false; +} + struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; @@ -246,8 +257,8 @@ struct rt_signal_frame { void do_rt_sigreturn(struct pt_regs *regs) { + unsigned long tpc, tnpc, tstate, ufp; struct rt_signal_frame __user *sf; - unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ - if (((unsigned long) sf) & 3) + if (invalid_frame_pointer(sf)) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) goto segv; - err = get_user(tpc, &sf->regs.tpc); + if ((ufp + STACK_BIAS) & 0x7) + goto segv; + + err = __get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; @@ -308,14 +325,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp) -{ - if (((unsigned long) fp) & 15) - return 1; - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 0f6eebe71e6c..e5fe8cef9a69 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c @@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; + + if (((unsigned long) fpu) & 3) + return -EFAULT; + #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; @@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 3) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index 387834a9c56a..36aadcbeac69 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c @@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) unsigned long fprs; int err; - err = __get_user(fprs, &fpu->si_fprs); + if (((unsigned long) fpu) & 7) + return -EFAULT; + + err = get_user(fprs, &fpu->si_fprs); fprs_write(0); regs->tstate &= ~TSTATE_PEF; if (fprs & FPRS_DL) @@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 7) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; -- cgit v1.2.3-58-ga151 From 7cafc0b8bf130f038b0ec2dcdd6a9de6dc59b65a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 28 May 2016 20:41:12 -0700 Subject: sparc64: Fix return from trap window fill crashes. We must handle data access exception as well as memory address unaligned exceptions from return from trap window fill faults, not just normal TLB misses. Otherwise we can get an OOPS that looks like this: ld-linux.so.2(36808): Kernel bad sw trap 5 [#1] CPU: 1 PID: 36808 Comm: ld-linux.so.2 Not tainted 4.6.0 #34 task: fff8000303be5c60 ti: fff8000301344000 task.ti: fff8000301344000 TSTATE: 0000004410001601 TPC: 0000000000a1a784 TNPC: 0000000000a1a788 Y: 00000002 Not tainted TPC: g0: fff8000024fc8248 g1: 0000000000db04dc g2: 0000000000000000 g3: 0000000000000001 g4: fff8000303be5c60 g5: fff800030e672000 g6: fff8000301344000 g7: 0000000000000001 o0: 0000000000b95ee8 o1: 000000000000012b o2: 0000000000000000 o3: 0000000200b9b358 o4: 0000000000000000 o5: fff8000301344040 sp: fff80003013475c1 ret_pc: 0000000000a1a77c RPC: l0: 00000000000007ff l1: 0000000000000000 l2: 000000000000005f l3: 0000000000000000 l4: fff8000301347e98 l5: fff8000024ff3060 l6: 0000000000000000 l7: 0000000000000000 i0: fff8000301347f60 i1: 0000000000102400 i2: 0000000000000000 i3: 0000000000000000 i4: 0000000000000000 i5: 0000000000000000 i6: fff80003013476a1 i7: 0000000000404d4c I7: Call Trace: [0000000000404d4c] user_rtt_fill_fixup+0x6c/0x7c The window trap handlers are slightly clever, the trap table entries for them are composed of two pieces of code. First comes the code that actually performs the window fill or spill trap handling, and then there are three instructions at the end which are for exception processing. The userland register window fill handler is: add %sp, STACK_BIAS + 0x00, %g1; \ ldxa [%g1 + %g0] ASI, %l0; \ mov 0x08, %g2; \ mov 0x10, %g3; \ ldxa [%g1 + %g2] ASI, %l1; \ mov 0x18, %g5; \ ldxa [%g1 + %g3] ASI, %l2; \ ldxa [%g1 + %g5] ASI, %l3; \ add %g1, 0x20, %g1; \ ldxa [%g1 + %g0] ASI, %l4; \ ldxa [%g1 + %g2] ASI, %l5; \ ldxa [%g1 + %g3] ASI, %l6; \ ldxa [%g1 + %g5] ASI, %l7; \ add %g1, 0x20, %g1; \ ldxa [%g1 + %g0] ASI, %i0; \ ldxa [%g1 + %g2] ASI, %i1; \ ldxa [%g1 + %g3] ASI, %i2; \ ldxa [%g1 + %g5] ASI, %i3; \ add %g1, 0x20, %g1; \ ldxa [%g1 + %g0] ASI, %i4; \ ldxa [%g1 + %g2] ASI, %i5; \ ldxa [%g1 + %g3] ASI, %i6; \ ldxa [%g1 + %g5] ASI, %i7; \ restored; \ retry; nop; nop; nop; nop; \ b,a,pt %xcc, fill_fixup_dax; \ b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; And the way this works is that if any of those memory accesses generate an exception, the exception handler can revector to one of those final three branch instructions depending upon which kind of exception the memory access took. In this way, the fault handler doesn't have to know if it was a spill or a fill that it's handling the fault for. It just always branches to the last instruction in the parent trap's handler. For example, for a regular fault, the code goes: winfix_trampoline: rdpr %tpc, %g3 or %g3, 0x7c, %g3 wrpr %g3, %tnpc done All window trap handlers are 0x80 aligned, so if we "or" 0x7c into the trap time program counter, we'll get that final instruction in the trap handler. On return from trap, we have to pull the register window in but we do this by hand instead of just executing a "restore" instruction for several reasons. The largest being that from Niagara and onward we simply don't have enough levels in the trap stack to fully resolve all possible exception cases of a window fault when we are already at trap level 1 (which we enter to get ready to return from the original trap). This is executed inline via the FILL_*_RTRAP handlers. rtrap_64.S's code branches directly to these to do the window fill by hand if necessary. Now if you look at them, we'll see at the end: ba,a,pt %xcc, user_rtt_fill_fixup; ba,a,pt %xcc, user_rtt_fill_fixup; ba,a,pt %xcc, user_rtt_fill_fixup; And oops, all three cases are handled like a fault. This doesn't work because each of these trap types (data access exception, memory address unaligned, and faults) store their auxiliary info in different registers to pass on to the C handler which does the real work. So in the case where the stack was unaligned, the unaligned trap handler sets up the arg registers one way, and then we branched to the fault handler which expects them setup another way. So the FAULT_TYPE_* value ends up basically being garbage, and randomly would generate the backtrace seen above. Reported-by: Nick Alcock Signed-off-by: David S. Miller --- arch/sparc/include/asm/head_64.h | 4 ++ arch/sparc/include/asm/ttable.h | 8 ++-- arch/sparc/kernel/Makefile | 1 + arch/sparc/kernel/rtrap_64.S | 57 ++++------------------- arch/sparc/kernel/urtt_fill.S | 98 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 116 insertions(+), 52 deletions(-) create mode 100644 arch/sparc/kernel/urtt_fill.S (limited to 'arch') diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 10e9dabc4c41..f0700cfeedd7 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h @@ -15,6 +15,10 @@ #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) +#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) +#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) + #define __CHEETAH_ID 0x003e0014 #define __JALAPENO_ID 0x003e0016 #define __SERRANO_ID 0x003e0022 diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 71b5a67522ab..781b9f1dbdc2 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h @@ -589,8 +589,8 @@ user_rtt_fill_64bit: \ restored; \ nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; @@ -652,8 +652,8 @@ user_rtt_fill_32bit: \ restored; \ nop; nop; nop; nop; nop; \ nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 7cf9c6ea3f1f..fdb13327fded 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg CFLAGS_REMOVE_pcr.o := -pg endif +obj-$(CONFIG_SPARC64) += urtt_fill.o obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index d08bdaffdbfc..216948ca4382 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -14,10 +14,6 @@ #include #include -#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) -#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) -#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) - #ifdef CONFIG_CONTEXT_TRACKING # define SCHEDULE_USER schedule_user #else @@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 wrpr %g1, %cwp ba,a,pt %xcc, user_rtt_fill_64bit -user_rtt_fill_fixup: - rdpr %cwp, %g1 - add %g1, 1, %g1 - wrpr %g1, 0x0, %cwp - - rdpr %wstate, %g2 - sll %g2, 3, %g2 - wrpr %g2, 0x0, %wstate - - /* We know %canrestore and %otherwin are both zero. */ - - sethi %hi(sparc64_kern_pri_context), %g2 - ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 - mov PRIMARY_CONTEXT, %g1 - -661: stxa %g2, [%g1] ASI_DMMU - .section .sun4v_1insn_patch, "ax" - .word 661b - stxa %g2, [%g1] ASI_MMU - .previous - - sethi %hi(KERNBASE), %g1 - flush %g1 +user_rtt_fill_fixup_dax: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 1, %g3 - or %g4, FAULT_CODE_WINFIXUP, %g4 - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] +user_rtt_fill_fixup_mna: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 2, %g3 - mov %g6, %l1 - wrpr %g0, 0x0, %tl - -661: nop - .section .sun4v_1insn_patch, "ax" - .word 661b - SET_GL(0) - .previous - - wrpr %g0, RTRAP_PSTATE, %pstate - - mov %l1, %g6 - ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - call do_sparc64_fault - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop +user_rtt_fill_fixup: + ba,pt %xcc, user_rtt_fill_fixup_common + clr %g3 user_rtt_pre_restore: add %g1, 1, %g1 diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000000..5604a2b051d4 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S @@ -0,0 +1,98 @@ +#include +#include +#include +#include +#include + + .text + .align 8 + .globl user_rtt_fill_fixup_common +user_rtt_fill_fixup_common: + rdpr %cwp, %g1 + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + + rdpr %wstate, %g2 + sll %g2, 3, %g2 + wrpr %g2, 0x0, %wstate + + /* We know %canrestore and %otherwin are both zero. */ + + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + sethi %hi(KERNBASE), %g1 + flush %g1 + + mov %g4, %l4 + mov %g5, %l5 + brnz,pn %g3, 1f + mov %g3, %l3 + + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] +1: + mov %g6, %l1 + wrpr %g0, 0x0, %tl + +661: nop + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + + wrpr %g0, RTRAP_PSTATE, %pstate + + mov %l1, %g6 + ldx [%g6 + TI_TASK], %g4 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) + + brnz,pn %l3, 1f + nop + + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +1: cmp %g3, 2 + bne,pn %xcc, 2f + nop + + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 + call sun4v_do_mna + mov %l5, %o1 + ba,a,pt %xcc, rtrap +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + nop + ba,a,pt %xcc, rtrap + +2: sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap + +1: call spitfire_data_access_exception + nop + ba,a,pt %xcc, rtrap -- cgit v1.2.3-58-ga151 From 871e178e0f2c4fa788f694721a10b4758d494ce1 Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Thu, 7 Apr 2016 16:28:26 +1000 Subject: powerpc/pseries/eeh: Handle RTAS delay requests in configure_bridge In the "ibm,configure-pe" and "ibm,configure-bridge" RTAS calls, the spec states that values of 9900-9905 can be returned, indicating that software should delay for 10^x (where x is the last digit, i.e. 990x) milliseconds and attempt the call again. Currently, the kernel doesn't know about this, and respecting it fixes some PCI failures when the hypervisor is busy. The delay is capped at 0.2 seconds. Cc: # 3.10+ Signed-off-by: Russell Currey Acked-by: Gavin Shan Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/eeh_pseries.c | 51 ++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ac3ffd97e059..405baaf96864 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) { int config_addr; int ret; + /* Waiting 0.2s maximum before skipping configuration */ + int max_wait = 200; /* Figure out the PE address */ config_addr = pe->config_addr; if (pe->addr) config_addr = pe->addr; - /* Use new configure-pe function, if supported */ - if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_pe, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else { - return -EFAULT; - } + while (max_wait > 0) { + /* Use new configure-pe function, if supported */ + if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { + ret = rtas_call(ibm_configure_pe, 3, 1, NULL, + config_addr, BUID_HI(pe->phb->buid), + BUID_LO(pe->phb->buid)); + } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { + ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, + config_addr, BUID_HI(pe->phb->buid), + BUID_LO(pe->phb->buid)); + } else { + return -EFAULT; + } - if (ret) - pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", - __func__, pe->phb->global_number, pe->addr, ret); + if (!ret) + return ret; + + /* + * If RTAS returns a delay value that's above 100ms, cut it + * down to 100ms in case firmware made a mistake. For more + * on how these delay values work see rtas_busy_delay_time + */ + if (ret > RTAS_EXTENDED_DELAY_MIN+2 && + ret <= RTAS_EXTENDED_DELAY_MAX) + ret = RTAS_EXTENDED_DELAY_MIN+2; + + max_wait -= rtas_busy_delay_time(ret); + + if (max_wait < 0) + break; + + rtas_busy_delay(ret); + } + pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", + __func__, pe->phb->global_number, pe->addr, ret); return ret; } -- cgit v1.2.3-58-ga151 From bd000b82e86503d5e8b9e6d40a3257bc9dddb96d Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Thu, 7 Apr 2016 16:28:27 +1000 Subject: powerpc/pseries/eeh: Refactor the configure_bridge RTAS tokens The RTAS calls "ibm,configure-pe" and "ibm,configure-bridge" perform the same actions, however the former can skip configuration if unnecessary. The existing code treats them as different tokens even though only one will ever be called. Refactor this by making a single token that is assigned during init. Signed-off-by: Russell Currey Acked-by: Gavin Shan Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/eeh_pseries.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 405baaf96864..3998e0f9a03b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2; static int ibm_slot_error_detail; static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; -static int ibm_configure_bridge; static int ibm_configure_pe; /* @@ -81,7 +80,14 @@ static int pseries_eeh_init(void) ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); ibm_configure_pe = rtas_token("ibm,configure-pe"); - ibm_configure_bridge = rtas_token("ibm,configure-bridge"); + + /* + * ibm,configure-pe and ibm,configure-bridge have the same semantics, + * however ibm,configure-pe can be faster. If we can't find + * ibm,configure-pe then fall back to using ibm,configure-bridge. + */ + if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) + ibm_configure_pe = rtas_token("ibm,configure-bridge"); /* * Necessary sanity check. We needn't check "get-config-addr-info" @@ -93,8 +99,7 @@ static int pseries_eeh_init(void) (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || - (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && - ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { + ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { pr_info("EEH functionality not supported\n"); return -EINVAL; } @@ -624,18 +629,9 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) config_addr = pe->addr; while (max_wait > 0) { - /* Use new configure-pe function, if supported */ - if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_pe, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else { - return -EFAULT; - } + ret = rtas_call(ibm_configure_pe, 3, 1, NULL, + config_addr, BUID_HI(pe->phb->buid), + BUID_LO(pe->phb->buid)); if (!ret) return ret; -- cgit v1.2.3-58-ga151 From ab2e1b89230fa80328262c91d2d0a539a2790d6f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 31 May 2016 11:00:09 +0100 Subject: Revert "arm64: hugetlb: partial revert of 66b3923a1a0f" This reverts commit ff7925848b50050732ac0401e0acf27e8b241d7b. Now that the contiguous-hint hugetlb regression has been debugged and fixed upstream by 66ee95d16a7f ("mm: exclude HugeTLB pages from THP page_mapped() logic"), we can revert the previous partial revert of this feature. Signed-off-by: Will Deacon --- arch/arm64/mm/hugetlbpage.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa8aee7d6929..2e49bd252fe7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else if (ps == (PAGE_SIZE * CONT_PTES)) { + hugetlb_add_hstate(CONT_PTE_SHIFT); + } else if (ps == (PMD_SIZE * CONT_PMDS)) { + hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { hugetlb_bad_size(); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); @@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); + +#ifdef CONFIG_ARM64_64K_PAGES +static __init int add_default_hugepagesz(void) +{ + if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) + hugetlb_add_hstate(CONT_PMD_SHIFT); + return 0; +} +arch_initcall(add_default_hugepagesz); +#endif -- cgit v1.2.3-58-ga151 From d23fac2b27d94aeb7b65536a50d32bfdc21fe01e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 12 May 2016 13:26:44 +0200 Subject: powerpc: Fix definition of SIAR and SDAR registers The SIAR and SDAR registers are available twice, one time as SPRs 780 / 781 (unprivileged, but read-only), and one time as the SPRs 796 / 797 (privileged, but read and write). The Linux kernel code currently uses the unprivileged SPRs - while this is OK for reading, writing to that register of course does not work. Since the KVM code tries to write to this register, too (see the mtspr in book3s_hv_rmhandlers.S), the contents of this register sometimes get lost for the guests, e.g. during migration of a VM. To fix this issue, simply switch to the privileged SPR numbers instead. Cc: stable@vger.kernel.org Signed-off-by: Thomas Huth Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c1e82e968506..c58a3e76caf1 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -754,13 +754,13 @@ #define SPRN_PMC6 792 #define SPRN_PMC7 793 #define SPRN_PMC8 794 -#define SPRN_SIAR 780 -#define SPRN_SDAR 781 #define SPRN_SIER 784 #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_SIAR 796 +#define SPRN_SDAR 797 #define SPRN_TACR 888 #define SPRN_TCSCR 889 #define SPRN_CSIGR 890 -- cgit v1.2.3-58-ga151 From 8dd75ccb571f3c92c48014b3dabd3d51a115ab41 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 12 May 2016 13:29:11 +0200 Subject: powerpc: Use privileged SPR number for MMCR2 We are already using the privileged versions of MMCR0, MMCR1 and MMCRA in the kernel, so for MMCR2, we should better use the privileged versions, too, to be consistent. Fixes: 240686c13687 ("powerpc: Initialise PMU related regs on Power8") Cc: stable@vger.kernel.org # v3.10+ Suggested-by: Paul Mackerras Signed-off-by: Thomas Huth Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c58a3e76caf1..a0948f40bc7b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -717,7 +717,7 @@ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 -#define SPRN_MMCR2 769 +#define SPRN_MMCR2 785 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL -- cgit v1.2.3-58-ga151 From db413b51c0d023b336634ffbf5eb29128245df8d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 24 May 2016 18:55:40 +0100 Subject: arm64: Remove orphaned __addr_ok() definition Since commit 12a0ef7b0ac3 ("arm64: use generic strnlen_user and strncpy_from_user functions"), the definition of __addr_ok() has been languishing unused; eradicate the sucker. CC: Catalin Marinas Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/include/asm/uaccess.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d74572af..9e397a542756 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) -/* - * Return 1 if addr < current->addr_limit, 0 otherwise. - */ -#define __addr_ok(addr) \ -({ \ - unsigned long flag; \ - asm("cmp %1, %0; cset %0, lo" \ - : "=&r" (flag) \ - : "r" (addr), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) - /* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. -- cgit v1.2.3-58-ga151 From 604c8e676e609da9f17a2abb36f2b2067bb86561 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 May 2016 12:20:36 +0100 Subject: arm64: enable CONFIG_SET_MODULE_RONX by default The SET_MODULE_RONX protections are effectively the same as the DEBUG_RODATA protections we enabled by default back in commit 57efac2f7108e325 ("arm64: enable CONFIG_DEBUG_RODATA by default"). It seems unusual to have one but not the other. As evidenced by the help text, the rationale appears to be that SET_MODULE_RONX interacts poorly with tracing and patching, but both of these make use of the insn framework, which takes SET_MODULE_RONX into account. Any remaining issues are bugs which should be fixed regardless of the default state of the option. This patch enables DEBUG_SET_MODULE_RONX by default, and replaces the help text with a new wording derived from the DEBUG_RODATA help text, which better describes the functionality. Previously, the DEBUG_RODATA entry was inconsistently indented with spaces, which are replaced with tabs as with the other Kconfig entries. Additionally, the wording of recommended defaults is made consistent for all options. These are placed in a new paragraph, unquoted, as a full sentence (with a period/full stop) as this appears to be the most common form per $(git grep 'in doubt'). Cc: Catalin Marinas Cc: Laura Abbott Acked-by: Kees Cook Acked-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/Kconfig.debug | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 710fde4ad0f0..0cc758cdd0dc 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -12,7 +12,8 @@ config ARM64_PTDUMP who are working in architecture specific areas of the kernel. It is probably not a good idea to enable this feature in a production kernel. - If in doubt, say "N" + + If in doubt, say N. config PID_IN_CONTEXTIDR bool "Write the current PID to the CONTEXTIDR register" @@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET value. config DEBUG_SET_MODULE_RONX - bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES - help - This option helps catch unintended modifications to loadable - kernel module's text and read-only data. It also prevents execution - of module data. Such protection may interfere with run-time code - patching and dynamic kernel tracing - and they might also protect - against certain classes of kernel exploits. - If in doubt, say "N". + bool "Set loadable kernel module data as NX and text as RO" + depends on MODULES + default y + help + Is this is set, kernel module text and rodata will be made read-only. + This is to help catch accidental or malicious attempts to change the + kernel's executable code. + + If in doubt, say Y. config DEBUG_RODATA bool "Make kernel text and rodata read-only" @@ -56,7 +57,7 @@ config DEBUG_RODATA is to help catch accidental or malicious attempts to change the kernel's executable code. - If in doubt, say Y + If in doubt, say Y. config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA @@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA alignment and potentially wasted space. Turn on this option if performance is more important than memory pressure. - If in doubt, say N + If in doubt, say N. source "drivers/hwtracing/coresight/Kconfig" -- cgit v1.2.3-58-ga151 From fa89c77e891917b5913f9be080f9131a9457bb3e Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 25 May 2016 15:26:34 +0100 Subject: KVM: arm/arm64: vgic-v3: Clear all dirty LRs When saving the state of the list registers, it is critical to reset them zero, as we could otherwise leave unexpected EOI interrupts pending for virtual level interrupts. Cc: stable@vger.kernel.org # v4.6+ Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index fff7cd42b3a3..3129df9d3a73 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -190,12 +190,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) continue; - if (cpu_if->vgic_elrsr & (1 << i)) { + if (cpu_if->vgic_elrsr & (1 << i)) cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; - continue; - } + else + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); - cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); __gic_v3_set_lr(0, i); } -- cgit v1.2.3-58-ga151 From b34f2bcbf59fe2d27c37d6553c33611754677103 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 25 May 2016 15:26:37 +0100 Subject: arm64: KVM: Make ICC_SRE_EL1 access return the configured SRE value When we trap ICC_SRE_EL1, we handle it as RAZ/WI. It would be more correct to actual make it RO, and return the configured value when read. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/sys_regs.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7bbe3ff02602..a57d650f552c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, return true; } +static bool access_gic_sre(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; + return true; +} + static bool trap_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_gic_sgi }, /* ICC_SRE_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), - trap_raz_wi }, + access_gic_sre }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), -- cgit v1.2.3-58-ga151 From a057001e9e446f2195c34bc55c57e5cf353c99d6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 25 May 2016 15:26:38 +0100 Subject: arm64: KVM: vgic-v3: Prevent the guest from messing with ICC_SRE_EL1 Both our GIC emulations are "strict", in the sense that we either emulate a GICv2 or a GICv3, and not a GICv3 with GICv2 legacy support. But when running on a GICv3 host, we still allow the guest to tinker with the ICC_SRE_EL1 register during its time slice: it can switch SRE off, observe that it is off, and yet on the next world switch, find the SRE bit to be set again. Not very nice. An obvious solution is to always trap accesses to ICC_SRE_EL1 (by clearing ICC_SRE_EL2.Enable), and to let the handler return the programmed value on a read, or ignore the write. That way, the guest can always observe that our GICv3 is SRE==1 only. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 3129df9d3a73..40c3b4c3d125 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -313,10 +313,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) * Prevent the guest from touching the GIC system registers if * SRE isn't enabled for GICv3 emulation. */ - if (!cpu_if->vgic_sre) { - write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, - ICC_SRE_EL2); - } + write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, + ICC_SRE_EL2); } void __hyp_text __vgic_v3_init_lrs(void) -- cgit v1.2.3-58-ga151 From c58513284029229842844929ddeaca44d013c128 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 25 May 2016 15:26:39 +0100 Subject: arm64: KVM: vgic-v3: Relax synchronization when SRE==1 The GICv3 backend of the vgic is quite barrier heavy, in order to ensure synchronization of the system registers and the memory mapped view for a potential GICv2 guest. But when the guest is using a GICv3 model, there is absolutely no need to execute all these heavy barriers, and it is actually beneficial to avoid them altogether. This patch makes the synchonization conditional, and ensures that we do not change the EL1 SRE settings if we do not need to. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 40c3b4c3d125..5f8f80b4a224 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) * Make sure stores to the GIC via the memory mapped interface * are now visible to the system register interface. */ - dsb(st); + if (!cpu_if->vgic_sre) + dsb(st); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); @@ -235,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) val = read_gicreg(ICC_SRE_EL2); write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); - isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ - write_gicreg(1, ICC_SRE_EL1); + + if (!cpu_if->vgic_sre) { + /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ + isb(); + write_gicreg(1, ICC_SRE_EL1); + } } void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) @@ -255,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) * been actually programmed with the value we want before * starting to mess with the rest of the GIC. */ - write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); - isb(); + if (!cpu_if->vgic_sre) { + write_gicreg(0, ICC_SRE_EL1); + isb(); + } val = read_gicreg(ICH_VTR_EL2); max_lr_idx = vtr_to_max_lr_idx(val); @@ -305,8 +312,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) * (re)distributors. This ensure the guest will read the * correct values from the memory-mapped interface. */ - isb(); - dsb(sy); + if (!cpu_if->vgic_sre) { + isb(); + dsb(sy); + } vcpu->arch.vgic_cpu.live_lrs = live_lrs; /* -- cgit v1.2.3-58-ga151 From e47b020a323d1b2a7b1e9aac86e99eae19463630 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 31 May 2016 15:55:03 +0100 Subject: arm64: Provide "model name" in /proc/cpuinfo for PER_LINUX32 tasks This patch brings the PER_LINUX32 /proc/cpuinfo format more in line with the 32-bit ARM one by providing an additional line: model name : ARMv8 Processor rev X (v8l) Cc: Acked-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/elf.h | 4 ++-- arch/arm64/kernel/cpuinfo.c | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 7a09c48c0475..579b6e654f2d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) #endif -#ifdef CONFIG_COMPAT - #ifdef __AARCH64EB__ #define COMPAT_ELF_PLATFORM ("v8b") #else #define COMPAT_ELF_PLATFORM ("v8l") #endif +#ifdef CONFIG_COMPAT + #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3808470486f3..c173d329397f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -22,6 +22,8 @@ #include #include +#include +#include #include #include #include @@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = { static int c_show(struct seq_file *m, void *v) { int i, j; + bool compat = personality(current->personality) == PER_LINUX32; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); @@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); + if (compat) + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000UL/HZ), @@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v) * software which does already (at least for 32-bit). */ seq_puts(m, "Features\t:"); - if (personality(current->personality) == PER_LINUX32) { + if (compat) { #ifdef CONFIG_COMPAT for (j = 0; compat_hwcap_str[j]; j++) if (compat_elf_hwcap & (1 << j)) -- cgit v1.2.3-58-ga151 From d6c886006c948141f24e84acebfb757d3200f20c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 31 May 2016 11:56:29 +0530 Subject: powerpc/mm/radix: Update LPCR only if it is powernv LPCR cannot be updated when running in guest mode. Fixes: 2bfd65e45e87 ("powerpc/mm/radix: Add radix callbacks for early init routines") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 18b2c11604fa..c939e6e57a9e 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -296,11 +296,6 @@ found: void __init radix__early_init_mmu(void) { unsigned long lpcr; - /* - * setup LPCR UPRT based on mmu_features - */ - lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); #ifdef CONFIG_PPC_64K_PAGES /* PAGE_SIZE mappings */ @@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void) __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; radix_init_page_sizes(); - if (!firmware_has_feature(FW_FEATURE_LPAR)) + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); radix_init_partition_table(); + } radix_init_pgtable(); } @@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void) { unsigned long lpcr; /* - * setup LPCR UPRT based on mmu_features + * update partition table control register and UPRT */ - lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); - /* - * update partition table control register, 64 K size. - */ - if (!firmware_has_feature(FW_FEATURE_LPAR)) + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); + mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); + } } void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, -- cgit v1.2.3-58-ga151 From dc47c0c1f8099fccb2c1e2f3775855066a9e4484 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 31 May 2016 11:56:30 +0530 Subject: powerpc/mm/hash: Fix the reference bit update when handling hash fault When we converted the asm routines to C functions, we missed updating HPTE_R_R based on _PAGE_ACCESSED. ASM code used to copy over the lower bits from pte via. andi. r3,r30,0x1fe /* Get basic set of flags */ We also update the code such that we won't update the Change bit ('C' bit) always. This was added by commit c5cf0e30bf3d8 ("powerpc: Fix buglet with MMU hash management"). With hash64, we need to make sure that hardware doesn't do a pte update directly. This is because we do end up with entries in TLB with no hash page table entry. This happens because when we find a hash bucket full, we "evict" a more/less random entry from it. When we do that we don't invalidate the TLB (hpte_remove) because we assume the old translation is still technically "valid". For more info look at commit 0608d692463("powerpc/mm: Always invalidate tlb on hpte invalidate and update"). Thus it's critical that valid hash PTEs always have reference bit set and writeable ones have change bit set. We do this by hashing a non-dirty linux PTE as read-only and always setting _PAGE_ACCESSED (and thus R) when hashing anything else in. Any attempt by Linux at clearing those bits also removes the corresponding hash entry. Commit 5cf0e30bf3d8 did that for 'C' bit by enabling 'C' bit always. We don't really need to do that because we never map a RW pte entry without setting 'C' bit. On READ fault on a RW pte entry, we still map it READ only, hence a store update in the page will still cause a hash pte fault. This patch reverts the part of commit c5cf0e30bf3d8 ("[PATCH] powerpc: Fix buglet with MMU hash management") and retain the updatepp part. - If we hit the updatepp path on native, the old code without that commit, would fail to set C bcause native_hpte_updatepp() was implemented to filter the same bits as H_PROTECT and not let C through thus we would "upgrade" a RO HPTE to RW without setting C thus causing the bug. So the real fix in that commit was the change to native_hpte_updatepp Fixes: 89ff725051d1 ("powerpc/mm: Convert __hash_page_64K to C") Cc: stable@vger.kernel.org # v4.5+ Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_utils_64.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 59268969a0bc..b2740c67e172 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; +/* + * 'R' and 'C' update notes: + * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* + * create writeable HPTEs without C set, because the hcall H_PROTECT + * that we use in that case will not update C + * - The above is however not a problem, because we also don't do that + * fancy "no flush" variant of eviction and we use H_REMOVE which will + * do the right thing and thus we don't have the race I described earlier + * + * - Under bare metal, we do have the race, so we need R and C set + * - We make sure R is always set and never lost + * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping + */ unsigned long htab_convert_pte_flags(unsigned long pteflags) { unsigned long rflags = 0; @@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= 0x1; } /* - * Always add "C" bit for perf. Memory coherence is always enabled + * We can't allow hardware to update hpte bits. Hence always + * set 'R' bit and set 'C' if it is a write fault + * Memory coherence is always enabled */ - rflags |= HPTE_R_C | HPTE_R_M; + rflags |= HPTE_R_R | HPTE_R_M; + + if (pteflags & _PAGE_DIRTY) + rflags |= HPTE_R_C; /* * Add in WIG bits */ -- cgit v1.2.3-58-ga151 From 157d4d0620879b7d89ca1e3cd7bf0be1e29be198 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 31 May 2016 11:56:31 +0530 Subject: powerpc/mm/radix: Add missing tlb flush This should not have any impact on hash, because hash does tlb invalidate with every pte update and we don't implement flush_tlb_* functions for hash. With radix we should make an explicit call to flush tlb outside pte update. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-book3s64.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index eb4451144746..670318766545 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, changed = !pmd_same(*(pmdp), entry); if (changed) { __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); - /* - * Since we are not supporting SW TLB systems, we don't - * have any thing similar to flush_tlb_page_nohash() - */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; } -- cgit v1.2.3-58-ga151 From 7cc851039d643a2ee7df4d18177150f2c3a484f5 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 31 May 2016 07:51:17 +0200 Subject: powerpc/pseries: Add POWER8NVL support to ibm,client-architecture-support call If we do not provide the PVR for POWER8NVL, a guest on this system currently ends up in PowerISA 2.06 compatibility mode on KVM, since QEMU does not provide a generic PowerISA 2.07 mode yet. So some new instructions from POWER8 (like "mtvsrd") get disabled for the guest, resulting in crashes when using code compiled explicitly for POWER8 (e.g. with the "-mcpu=power8" option of GCC). Fixes: ddee09c099c3 ("powerpc: Add PVR for POWER8NVL processor") Cc: stable@vger.kernel.org # v4.0+ Signed-off-by: Thomas Huth Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index da5192590c44..ccd2037c797f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = { W(0xffff0000), W(0x003e0000), /* POWER6 */ W(0xffff0000), W(0x003f0000), /* POWER7 */ W(0xffff0000), W(0x004b0000), /* POWER8E */ + W(0xffff0000), W(0x004c0000), /* POWER8NVL */ W(0xffff0000), W(0x004d0000), /* POWER8 */ W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ -- cgit v1.2.3-58-ga151 From 10fdf8513f776c8b2588bb7b924fb243ae2462d6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 1 Jun 2016 18:48:20 +0100 Subject: arm64: unistd32.h: wire up missing syscalls for compat tasks We're missing entries for mlock2, copy_file_range, preadv2 and pwritev2 in our compat syscall table, so hook them up. Only the last two need compat wrappers. Signed-off-by: Will Deacon --- arch/arm64/include/asm/unistd.h | 2 +- arch/arm64/include/asm/unistd32.h | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c041..e78ac26324bd 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 390 +#define __NR_compat_syscalls 394 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761a2a..b7e8ef16ff0d 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) __SYSCALL(__NR_userfaultfd, sys_userfaultfd) #define __NR_membarrier 389 __SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 390 +__SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 391 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) +#define __NR_preadv2 392 +__SYSCALL(__NR_preadv2, compat_sys_preadv2) +#define __NR_pwritev2 393 +__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) /* * Please add new compat syscalls above this comment and update -- cgit v1.2.3-58-ga151 From e2dfb4b880146bfd4b6aa8e138c0205407cebbaf Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 30 May 2016 23:14:56 +0100 Subject: ARM: fix PTRACE_SETVFPREGS on SMP systems PTRACE_SETVFPREGS fails to properly mark the VFP register set to be reloaded, because it undoes one of the effects of vfp_flush_hwstate(). Specifically vfp_flush_hwstate() sets thread->vfpstate.hard.cpu to an invalid CPU number, but vfp_set() overwrites this with the original CPU number, thereby rendering the hardware state as apparently "valid", even though the software state is more recent. Fix this by reverting the previous change. Cc: Fixes: 8130b9d7b9d8 ("ARM: 7308/1: vfp: flush thread hwstate before copying ptrace registers") Acked-by: Will Deacon Tested-by: Simon Marchi Signed-off-by: Russell King --- arch/arm/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ef9119f7462e..4d9375814b53 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - vfp_flush_hwstate(thread); thread->vfpstate.hard = new_vfp; + vfp_flush_hwstate(thread); return 0; } -- cgit v1.2.3-58-ga151 From b19ee2ff3b287fea48a2896a381e31319394fe58 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Wed, 11 May 2016 08:04:29 -0700 Subject: KVM: x86: avoid write-tearing of TDP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In theory, nothing prevents the compiler from write-tearing PTEs, or split PTE writes. These partially-modified PTEs can be fetched by other cores and cause mayhem. I have not really encountered such case in real-life, but it does seem possible. For example, the compiler may try to do something creative for kvm_set_pte_rmapp() and perform multiple writes to the PTE. Signed-off-by: Nadav Amit Signed-off-by: Radim Krčmář --- arch/x86/kvm/mmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24e800116ab4..def97b3a392b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) #ifdef CONFIG_X86_64 static void __set_spte(u64 *sptep, u64 spte) { - *sptep = spte; + WRITE_ONCE(*sptep, spte); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) { - *sptep = spte; + WRITE_ONCE(*sptep, spte); } static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) @@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) */ smp_wmb(); - ssptep->spte_low = sspte.spte_low; + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) @@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) ssptep = (union split_spte *)sptep; sspte = (union split_spte)spte; - ssptep->spte_low = sspte.spte_low; + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); /* * If we map the spte from present to nonpresent, we should clear -- cgit v1.2.3-58-ga151 From 0c2df2a1affd183ba9c114915f42a2d464b4f58f Mon Sep 17 00:00:00 2001 From: Dmitry Bilunov Date: Tue, 31 May 2016 17:38:24 +0300 Subject: KVM: Handle MSR_IA32_PERF_CTL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Intel CPUs having Turbo Boost feature implement an MSR to provide a control interface via rdmsr/wrmsr instructions. One could detect the presence of this feature by issuing one of these instructions and handling the #GP exception which is generated in case the referenced MSR is not implemented by the CPU. KVM's vCPU model behaves exactly as a real CPU in this case by injecting a fault when MSR_IA32_PERF_CTL is called (which KVM does not support). However, some operating systems use this register during an early boot stage in which their kernel is not capable of handling #GP correctly, causing #DP and finally a triple fault effectively resetting the vCPU. This patch implements a dummy handler for MSR_IA32_PERF_CTL to avoid the crashes. Signed-off-by: Dmitry Bilunov Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c805cf494154..d0a5b4b4e64d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: + case MSR_IA32_PERF_CTL: msr_info->data = 0; break; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: -- cgit v1.2.3-58-ga151 From b21629da120dd6145d14dbd6d028e1bba680a92b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 1 Jun 2016 14:09:18 +0200 Subject: kvm: x86: avoid warning on repeated KVM_SET_TSS_ADDR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Found by syzkaller: WARNING: CPU: 3 PID: 15175 at arch/x86/kvm/x86.c:7705 __x86_set_memory_region+0x1dc/0x1f0 [kvm]() CPU: 3 PID: 15175 Comm: a.out Tainted: G W 4.4.6-300.fc23.x86_64 #1 Hardware name: LENOVO 2325F51/2325F51, BIOS G2ET32WW (1.12 ) 05/30/2012 0000000000000286 00000000950899a7 ffff88011ab3fbf0 ffffffff813b542e 0000000000000000 ffffffffa0966496 ffff88011ab3fc28 ffffffff810a40f2 00000000000001fd 0000000000003000 ffff88014fc50000 0000000000000000 Call Trace: [] dump_stack+0x63/0x85 [] warn_slowpath_common+0x82/0xc0 [] warn_slowpath_null+0x1a/0x20 [] __x86_set_memory_region+0x1dc/0x1f0 [kvm] [] x86_set_memory_region+0x3b/0x60 [kvm] [] vmx_set_tss_addr+0x3c/0x150 [kvm_intel] [] kvm_arch_vm_ioctl+0x654/0xbc0 [kvm] [] kvm_vm_ioctl+0x9a/0x6f0 [kvm] [] do_vfs_ioctl+0x298/0x480 [] SyS_ioctl+0x79/0x90 [] entry_SYSCALL_64_fastpath+0x12/0x71 Testcase: #include #include #include #include #include long r[8]; int main() { memset(r, -1, sizeof(r)); r[2] = open("/dev/kvm", O_RDONLY|O_TRUNC); r[3] = ioctl(r[2], KVM_CREATE_VM, 0x0ul); r[5] = ioctl(r[3], KVM_SET_TSS_ADDR, 0x20000000ul); r[7] = ioctl(r[3], KVM_SET_TSS_ADDR, 0x20000000ul); return 0; } Reported-by: Dmitry Vyukov Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d0a5b4b4e64d..990929bbeb50 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7816,7 +7816,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) slot = id_to_memslot(slots, id); if (size) { - if (WARN_ON(slot->npages)) + if (slot->npages) return -EEXIST; /* -- cgit v1.2.3-58-ga151 From 83676e923895adf2af392cfd36a05709950aaeef Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 1 Jun 2016 14:09:19 +0200 Subject: KVM: x86: avoid vmalloc(0) in the KVM_SET_CPUID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This causes an ugly dmesg splat. Beautified syzkaller testcase: #include #include #include #include #include long r[8]; int main() { struct kvm_cpuid2 c = { 0 }; r[2] = open("/dev/kvm", O_RDWR); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[4] = ioctl(r[3], KVM_CREATE_VCPU, 0x8); r[7] = ioctl(r[4], KVM_SET_CPUID, &c); return 0; } Reported-by: Dmitry Vyukov Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/cpuid.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 769af907f824..7597b42a8a88 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry __user *entries) { int r, i; - struct kvm_cpuid_entry *cpuid_entries; + struct kvm_cpuid_entry *cpuid_entries = NULL; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -ENOMEM; - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); - if (!cpuid_entries) - goto out; - r = -EFAULT; - if (copy_from_user(cpuid_entries, entries, - cpuid->nent * sizeof(struct kvm_cpuid_entry))) - goto out_free; + if (cpuid->nent) { + cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * + cpuid->nent); + if (!cpuid_entries) + goto out; + r = -EFAULT; + if (copy_from_user(cpuid_entries, entries, + cpuid->nent * sizeof(struct kvm_cpuid_entry))) + goto out; + } for (i = 0; i < cpuid->nent; i++) { vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; @@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, kvm_x86_ops->cpuid_update(vcpu); r = kvm_update_cpuid(vcpu); -out_free: - vfree(cpuid_entries); out: + vfree(cpuid_entries); return r; } -- cgit v1.2.3-58-ga151 From 78e546c824fa8f96d323b7edd6f5cad5b74af057 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 1 Jun 2016 14:09:20 +0200 Subject: KVM: fail KVM_SET_VCPU_EVENTS with invalid exception number MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This cannot be returned by KVM_GET_VCPU_EVENTS, so it is okay to return EINVAL. It causes a WARN from exception_type: WARNING: CPU: 3 PID: 16732 at arch/x86/kvm/x86.c:345 exception_type+0x49/0x50 [kvm]() CPU: 3 PID: 16732 Comm: a.out Tainted: G W 4.4.6-300.fc23.x86_64 #1 Hardware name: LENOVO 2325F51/2325F51, BIOS G2ET32WW (1.12 ) 05/30/2012 0000000000000286 000000006308a48b ffff8800bec7fcf8 ffffffff813b542e 0000000000000000 ffffffffa0966496 ffff8800bec7fd30 ffffffff810a40f2 ffff8800552a8000 0000000000000000 00000000002c267c 0000000000000001 Call Trace: [] dump_stack+0x63/0x85 [] warn_slowpath_common+0x82/0xc0 [] warn_slowpath_null+0x1a/0x20 [] exception_type+0x49/0x50 [kvm] [] kvm_arch_vcpu_ioctl_run+0x10a2/0x14e0 [kvm] [] kvm_vcpu_ioctl+0x33d/0x620 [kvm] [] do_vfs_ioctl+0x298/0x480 [] SyS_ioctl+0x79/0x90 [] entry_SYSCALL_64_fastpath+0x12/0x71 ---[ end trace b1a0391266848f50 ]--- Testcase (beautified/reduced from syzkaller output): #include #include #include #include #include #include #include long r[31]; int main() { memset(r, -1, sizeof(r)); r[2] = open("/dev/kvm", O_RDONLY); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[7] = ioctl(r[3], KVM_CREATE_VCPU, 0); struct kvm_vcpu_events ve = { .exception.injected = 1, .exception.nr = 0xd4 }; r[27] = ioctl(r[7], KVM_SET_VCPU_EVENTS, &ve); r[30] = ioctl(r[7], KVM_RUN, 0); return 0; } Reported-by: Dmitry Vyukov Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 990929bbeb50..d9db2a486377 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2973,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | KVM_VCPUEVENT_VALID_SMM)) return -EINVAL; + if (events->exception.injected && + (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) + return -EINVAL; + process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; -- cgit v1.2.3-58-ga151 From d14bdb553f9196169f003058ae1cdabe514470e6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 1 Jun 2016 14:09:23 +0200 Subject: KVM: x86: fix OOPS after invalid KVM_SET_DEBUGREGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MOV to DR6 or DR7 causes a #GP if an attempt is made to write a 1 to any of bits 63:32. However, this is not detected at KVM_SET_DEBUGREGS time, and the next KVM_RUN oopses: general protection fault: 0000 [#1] SMP CPU: 2 PID: 14987 Comm: a.out Not tainted 4.4.9-300.fc23.x86_64 #1 Hardware name: LENOVO 2325F51/2325F51, BIOS G2ET32WW (1.12 ) 05/30/2012 [...] Call Trace: [] kvm_arch_vcpu_ioctl_run+0x141d/0x14e0 [kvm] [] kvm_vcpu_ioctl+0x33d/0x620 [kvm] [] do_vfs_ioctl+0x298/0x480 [] SyS_ioctl+0x79/0x90 [] entry_SYSCALL_64_fastpath+0x12/0x71 Code: 55 83 ff 07 48 89 e5 77 27 89 ff ff 24 fd 90 87 80 81 0f 23 fe 5d c3 0f 23 c6 5d c3 0f 23 ce 5d c3 0f 23 d6 5d c3 0f 23 de 5d c3 <0f> 23 f6 5d c3 0f 0b 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 RIP [] native_set_debugreg+0x2b/0x40 RSP Testcase (beautified/reduced from syzkaller output): #include #include #include #include #include #include #include long r[8]; int main() { struct kvm_debugregs dr = { 0 }; r[2] = open("/dev/kvm", O_RDONLY); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[4] = ioctl(r[3], KVM_CREATE_VCPU, 7); memcpy(&dr, "\x5d\x6a\x6b\xe8\x57\x3b\x4b\x7e\xcf\x0d\xa1\x72" "\xa3\x4a\x29\x0c\xfc\x6d\x44\x00\xa7\x52\xc7\xd8" "\x00\xdb\x89\x9d\x78\xb5\x54\x6b\x6b\x13\x1c\xe9" "\x5e\xd3\x0e\x40\x6f\xb4\x66\xf7\x5b\xe3\x36\xcb", 48); r[7] = ioctl(r[4], KVM_SET_DEBUGREGS, &dr); r[6] = ioctl(r[4], KVM_RUN, 0); } Reported-by: Dmitry Vyukov Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d9db2a486377..902d9da12392 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3041,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (dbgregs->flags) return -EINVAL; + if (dbgregs->dr6 & ~0xffffffffull) + return -EINVAL; + if (dbgregs->dr7 & ~0xffffffffull) + return -EINVAL; + memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); vcpu->arch.dr6 = dbgregs->dr6; -- cgit v1.2.3-58-ga151 From fbf8f40e1658cb2f17452dbd3c708e329c5d27e0 Mon Sep 17 00:00:00 2001 From: Ganapatrao Kulkarni Date: Wed, 25 May 2016 15:29:20 +0200 Subject: irqchip/gicv3-its: numa: Enable workaround for Cavium thunderx erratum 23144 The erratum fixes the hang of ITS SYNC command by avoiding inter node io and collections/cpu mapping on thunderx dual-socket platform. This fix is only applicable for Cavium's ThunderX dual-socket platform. Reviewed-by: Robert Richter Signed-off-by: Ganapatrao Kulkarni Signed-off-by: Robert Richter Signed-off-by: Marc Zyngier --- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/Kconfig | 9 +++++++ drivers/irqchip/irq-gic-v3-its.c | 49 ++++++++++++++++++++++++++++++++-- 3 files changed, 57 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index c6938e50e71f..4da60b463995 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -56,6 +56,7 @@ stable kernels. | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | +| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..57a9f67971d3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -426,6 +426,15 @@ config CAVIUM_ERRATUM_22375 If unsure, say Y. +config CAVIUM_ERRATUM_23144 + bool "Cavium erratum 23144: ITS SYNC hang on dual socket system" + depends on NUMA + default y + help + ITS SYNC command hang for cross node io and collections/cpu mapping. + + If unsure, say Y. + config CAVIUM_ERRATUM_23154 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" default y diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -41,6 +41,7 @@ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -82,6 +83,7 @@ struct its_node { u64 flags; u32 ite_size; u32 device_ids; + int numa_node; }; #define ITS_ITT_ALIGN SZ_256 @@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) list_for_each_entry(its, &its_nodes, entry) { u64 target; + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + continue; + } + /* * We now have to bind each collection to its target * redistributor. @@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); @@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; } +static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1451,6 +1487,14 @@ static const struct gic_quirk its_quirks[] = { .mask = 0xffff0fff, .init = its_enable_quirk_cavium_22375, }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, #endif { } @@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, its->base = its_base; its->phys_base = res.start; its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->numa_node = of_node_to_nid(node); its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); if (!its->cmd_base) { -- cgit v1.2.3-58-ga151 From 8051f4d16ef1d037e7b12abab79c3e0b960f4d36 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 May 2016 12:07:47 +0100 Subject: arm64: report CPU number in bad_mode If we take an exception we don't expect (e.g. SError), we report this in the bad_mode handler with pr_crit. Depending on the configured log level, we may or may not log additional information in functions called subsequently. Notably, the messages in dump_stack (including the CPU number) are printed with KERN_DEFAULT and may not appear. Some exceptions have an IMPLEMENTATION DEFINED ESR_ELx.ISS encoding, and knowing the CPU number is crucial to correctly decode them. To ensure that this is always possible, we should log the CPU number along with the ESR_ELx value, so we are not reliant on subsequent logs or additional printk configuration options. This patch logs the CPU number in bad_mode such that it is possible for a developer to decode these exceptions, provided access to sufficient documentation. Signed-off-by: Mark Rutland Reported-by: Al Grant Cc: Catalin Marinas Cc: Dave Martin Cc: Robin Murphy Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c5392081b49b..f7cf463107df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); - pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", - handler[reason], esr, esr_get_class_string(esr)); + pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", + handler[reason], smp_processor_id(), esr, + esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; -- cgit v1.2.3-58-ga151 From a13e3a5b54e59ffdeba738ea6cb57a7856425206 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 1 Jun 2016 12:07:17 +0100 Subject: arm64: update stale PAGE_OFFSET comment Commit ab893fb9f1b17f02 ("arm64: introduce KIMAGE_VADDR as the virtual base of the kernel region") logically split KIMAGE_VADDR from PAGE_OFFSET, and since commit f9040773b7bbbd9e ("arm64: move kernel image to base of vmalloc area") the two have been distinct values. Unfortunately, neither commit updated the comment above these definitions, which now erroneously states that PAGE_OFFSET is the start of the kernel image rather than the start of the linear mapping. This patch fixes said comment, and introduces an explanation of KIMAGE_VADDR. Signed-off-by: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Cc: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 72a3025bb583..31b73227b41f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -55,8 +55,9 @@ #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) /* - * PAGE_OFFSET - the virtual address of the start of the kernel image (top + * PAGE_OFFSET - the virtual address of the start of the linear map (top * (VA_BITS - 1)) + * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. -- cgit v1.2.3-58-ga151 From 48dd73c55d45d60037ea8d73eab60f2033d90721 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 May 2016 14:49:02 +0100 Subject: arm64: mm: dump: log span level The page table dump code logs spans of entries at the same level (pgd/pud/pmd/pte) which have the same attributes. While we log the (decoded) attributes, we don't log the level, which leaves the output ambiguous and/or confusing in some cases. For example: 0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL If using 4K pages, this may describe a span of 6 1G block entries at the PGD/PUD level, or 3072 2M block entries at the PMD level. This patch adds the page table level to each output line, removing this ambiguity. For the example above, this will produce: 0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL When 3 level tables are in use, and we use the asm-generic/nopud.h definitions, the dump code treats each entry in the PGD as a 1 element table at the PUD level, and logs spans as being PUDs, which can be confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD" when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when CONFIG_PGTABLE_LEVELS <= 2. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Huang Shijie Cc: Laura Abbott Cc: Steve Capper Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/mm/dump.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8404190fe2bd..ccfde237d6e6 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = { struct pg_level { const struct prot_bits *bits; + const char *name; size_t num; u64 mask; }; @@ -157,15 +158,19 @@ struct pg_level { static struct pg_level pg_level[] = { { }, { /* pgd */ + .name = "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pud */ + .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pmd */ + .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pte */ + .name = "PTE", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, @@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, delta >>= 10; unit++; } - seq_printf(st->seq, "%9lu%c", delta, *unit); + seq_printf(st->seq, "%9lu%c %s", delta, *unit, + pg_level[st->level].name); if (pg_level[st->level].bits) dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); -- cgit v1.2.3-58-ga151 From 030c4d24447cbf2bd612baea5695952e5f62c042 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 May 2016 15:57:59 +0100 Subject: arm64: move {PAGE,CONT}_SHIFT into Kconfig In some cases (e.g. the awk for CONFIG_RANDOMIZE_TEXT_OFFSET) we would like to make use of PAGE_SHIFT outside of code that can include the usual header files. Add a new CONFIG_ARM64_PAGE_SHIFT for this, likewise with ARM64_CONT_SHIFT for consistency. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Sudeep Holla Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 12 ++++++++++++ arch/arm64/include/asm/page.h | 12 ++---------- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..5fe320899eb4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT config MMU def_bool y +config ARM64_PAGE_SHIFT + int + default 16 if ARM64_64K_PAGES + default 14 if ARM64_16K_PAGES + default 12 + +config ARM64_CONT_SHIFT + int + default 5 if ARM64_64K_PAGES + default 7 if ARM64_16K_PAGES + default 4 + config ARCH_MMAP_RND_BITS_MIN default 14 if ARM64_64K_PAGES default 16 if ARM64_16K_PAGES diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 17b45f7d96d3..8472c6def5ef 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -23,16 +23,8 @@ /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ -#ifdef CONFIG_ARM64_64K_PAGES -#define PAGE_SHIFT 16 -#define CONT_SHIFT 5 -#elif defined(CONFIG_ARM64_16K_PAGES) -#define PAGE_SHIFT 14 -#define CONT_SHIFT 7 -#else -#define PAGE_SHIFT 12 -#define CONT_SHIFT 4 -#endif +#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT +#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -- cgit v1.2.3-58-ga151 From aed7eb8367939244ba19445292ffdfc398e0d66a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 May 2016 15:58:00 +0100 Subject: arm64: fix alignment when RANDOMIZE_TEXT_OFFSET is enabled With ARM64_64K_PAGES and RANDOMIZE_TEXT_OFFSET enabled, we hit the following issue on the boot: kernel BUG at arch/arm64/mm/mmu.c:480! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 4.6.0 #310 Hardware name: ARM Juno development board (r2) (DT) task: ffff000008d58a80 ti: ffff000008d30000 task.ti: ffff000008d30000 PC is at map_kernel_segment+0x44/0xb0 LR is at paging_init+0x84/0x5b0 pc : [] lr : [] pstate: 600002c5 Call trace: [] map_kernel_segment+0x44/0xb0 [] paging_init+0x84/0x5b0 [] setup_arch+0x198/0x534 [] start_kernel+0x70/0x388 [] __primary_switched+0x30/0x74 Commit 7eb90f2ff7e3 ("arm64: cover the .head.text section in the .text segment mapping") removed the alignment between the .head.text and .text sections, and used the _text rather than the _stext interval for mapping the .text segment. Prior to this commit _stext was always section aligned and didn't cause any issue even when RANDOMIZE_TEXT_OFFSET was enabled. Since that alignment has been removed and _text is used to map the .text segment, we need ensure _text is always page aligned when RANDOMIZE_TEXT_OFFSET is enabled. This patch adds logic to TEXT_OFFSET fuzzing to ensure that the offset is always aligned to the kernel page size. To ensure this, we rely on the PAGE_SHIFT being available via Kconfig. Signed-off-by: Mark Rutland Reported-by: Sudeep Holla Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Will Deacon Fixes: 7eb90f2ff7e3 ("arm64: cover the .head.text section in the .text segment mapping") Signed-off-by: Will Deacon --- arch/arm64/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 354d75402ace..7085e322dc42 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o # The byte offset of the kernel image in RAM from the start of RAM. ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) -TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') +TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ + int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ + rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") else TEXT_OFFSET := 0x00080000 endif -- cgit v1.2.3-58-ga151 From be24a89700eef61bedaba40f3b05ef07f5806e38 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 29 Jun 2011 00:48:19 +0200 Subject: parisc: Fix backtrace on PA-RISC This patch fixes backtrace on PA-RISC There were several problems: 1) The code that decodes instructions handles instructions that subtract from the stack pointer incorrectly. If the instruction subtracts the number X from the stack pointer the code increases the frame size by (0x100000000-X). This results in invalid accesses to memory and recursive page faults. 2) Because gcc reorders blocks, handling instructions that subtract from the frame pointer is incorrect. For example, this function int f(int a) { if (__builtin_expect(a, 1)) return a; g(); return a; } is compiled in such a way, that the code that decreases the stack pointer for the first "return a" is placed before the code for "g" call. If we recognize this decrement, we mistakenly believe that the frame size for the "g" call is zero. To fix problems 1) and 2), the patch doesn't recognize instructions that decrease the stack pointer at all. To further safeguard the unwind code against nonsense values, we don't allow frame size larger than Total_frame_size. 3) The backtrace is not locked. If stack dump races with module unload, invalid table can be accessed. This patch adds a spinlock when processing module tables. Note, that for correct backtrace, you need recent binutils. Binutils 2.18 from Debian 5 produce garbage unwind tables. Binutils 2.21 work better (it sometimes forgets function frames, but at least it doesn't generate garbage). Signed-off-by: Mikulas Patocka Signed-off-by: Helge Deller --- arch/parisc/kernel/unwind.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index ddd988b267a9..e278a87f43cc 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr) if (addr >= kernel_unwind_table.start && addr <= kernel_unwind_table.end) e = find_unwind_entry_in_table(&kernel_unwind_table, addr); - else + else { + unsigned long flags; + + spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->start && addr <= table->end) @@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr) break; } } + spin_unlock_irqrestore(&unwind_lock, flags); + } return e; } @@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info) insn = *(unsigned int *)npc; - if ((insn & 0xffffc000) == 0x37de0000 || - (insn & 0xffe00000) == 0x6fc00000) { + if ((insn & 0xffffc001) == 0x37de0000 || + (insn & 0xffe00001) == 0x6fc00000) { /* ldo X(sp), sp, or stwm X,D(sp) */ - frame_size += (insn & 0x1 ? -1 << 13 : 0) | - ((insn & 0x3fff) >> 1); + frame_size += (insn & 0x3fff) >> 1; dbg("analyzing func @ %lx, insn=%08x @ " "%lx, frame_size = %ld\n", info->ip, insn, npc, frame_size); - } else if ((insn & 0xffe00008) == 0x73c00008) { + } else if ((insn & 0xffe00009) == 0x73c00008) { /* std,ma X,D(sp) */ - frame_size += (insn & 0x1 ? -1 << 13 : 0) | - (((insn >> 4) & 0x3ff) << 3); + frame_size += ((insn >> 4) & 0x3ff) << 3; dbg("analyzing func @ %lx, insn=%08x @ " "%lx, frame_size = %ld\n", info->ip, insn, npc, frame_size); @@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info) } } + if (frame_size > e->Total_frame_size << 3) + frame_size = e->Total_frame_size << 3; + if (!unwind_special(info, e->region_start, frame_size)) { info->prev_sp = info->sp - frame_size; if (e->Millicode) -- cgit v1.2.3-58-ga151 From 0032c08833ab7c7861d12eb35da26dce85f3e229 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 3 Jun 2016 19:22:31 +0200 Subject: parisc: Fix printk time during boot Avoid showing invalid printk time stamps during boot. Signed-off-by: Helge Deller Reviewed-by: Aaro Koskinen --- arch/parisc/kernel/processor.c | 5 +++-- arch/parisc/kernel/time.c | 5 ----- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e81ccf1716e9..5adc339eb7c8 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -324,8 +324,9 @@ int init_per_cpu(int cpunum) per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; - printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", - cpunum, coproc_cfg.revision, coproc_cfg.model); + if (cpunum == 0) + printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", + cpunum, coproc_cfg.revision, coproc_cfg.model); /* ** store status register to stack (hopefully aligned) diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 58dd6801f5be..31ec99a5f119 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -309,11 +309,6 @@ void __init time_init(void) clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, NSEC_PER_MSEC, 0); -#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) - /* At bootup only one 64bit CPU is online and cr16 is "stable" */ - set_sched_clock_stable(); -#endif - start_cpu_itimer(); /* get CPU 0 started */ /* register at clocksource framework */ -- cgit v1.2.3-58-ga151 From 8b78f260887df532da529f225c49195d18fef36b Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 4 Jun 2016 17:21:33 +0200 Subject: parisc: Fix pagefault crash in unaligned __get_user() call One of the debian buildd servers had this crash in the syslog without any other information: Unaligned handler failed, ret = -2 clock_adjtime (pid 22578): Unaligned data reference (code 28) CPU: 1 PID: 22578 Comm: clock_adjtime Tainted: G E 4.5.0-2-parisc64-smp #1 Debian 4.5.4-1 task: 000000007d9960f8 ti: 00000001bde7c000 task.ti: 00000001bde7c000 YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI PSW: 00001000000001001111100000001111 Tainted: G E r00-03 000000ff0804f80f 00000001bde7c2b0 00000000402d2be8 00000001bde7c2b0 r04-07 00000000409e1fd0 00000000fa6f7fff 00000001bde7c148 00000000fa6f7fff r08-11 0000000000000000 00000000ffffffff 00000000fac9bb7b 000000000002b4d4 r12-15 000000000015241c 000000000015242c 000000000000002d 00000000fac9bb7b r16-19 0000000000028800 0000000000000001 0000000000000070 00000001bde7c218 r20-23 0000000000000000 00000001bde7c210 0000000000000002 0000000000000000 r24-27 0000000000000000 0000000000000000 00000001bde7c148 00000000409e1fd0 r28-31 0000000000000001 00000001bde7c320 00000001bde7c350 00000001bde7c218 sr00-03 0000000001200000 0000000001200000 0000000000000000 0000000001200000 sr04-07 0000000000000000 0000000000000000 0000000000000000 0000000000000000 IASQ: 0000000000000000 0000000000000000 IAOQ: 00000000402d2e84 00000000402d2e88 IIR: 0ca0d089 ISR: 0000000001200000 IOR: 00000000fa6f7fff CPU: 1 CR30: 00000001bde7c000 CR31: ffffffffffffffff ORIG_R28: 00000002369fe628 IAOQ[0]: compat_get_timex+0x2dc/0x3c0 IAOQ[1]: compat_get_timex+0x2e0/0x3c0 RP(r2): compat_get_timex+0x40/0x3c0 Backtrace: [<00000000402d4608>] compat_SyS_clock_adjtime+0x40/0xc0 [<0000000040205024>] syscall_exit+0x0/0x14 This means the userspace program clock_adjtime called the clock_adjtime() syscall and then crashed inside the compat_get_timex() function. Syscalls should never crash programs, but instead return EFAULT. The IIR register contains the executed instruction, which disassebles into "ldw 0(sr3,r5),r9". This load-word instruction is part of __get_user() which tried to read the word at %r5/IOR (0xfa6f7fff). This means the unaligned handler jumped in. The unaligned handler is able to emulate all ldw instructions, but it fails if it fails to read the source e.g. because of page fault. The following program reproduces the problem: #define _GNU_SOURCE #include #include #include int main(void) { /* allocate 8k */ char *ptr = mmap(NULL, 2*4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); /* free second half (upper 4k) and make it invalid. */ munmap(ptr+4096, 4096); /* syscall where first int is unaligned and clobbers into invalid memory region */ /* syscall should return EFAULT */ return syscall(__NR_clock_adjtime, 0, ptr+4095); } To fix this issue we simply need to check if the faulting instruction address is in the exception fixup table when the unaligned handler failed. If it is, call the fixup routine instead of crashing. While looking at the unaligned handler I found another issue as well: The target register should not be modified if the handler was unsuccessful. Signed-off-by: Helge Deller Cc: stable@vger.kernel.org --- arch/parisc/kernel/unaligned.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index d7c0acb35ec2..8d49614d600d 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs) break; } - if (modify && R1(regs->iir)) + if (ret == 0 && modify && R1(regs->iir)) regs->gr[R1(regs->iir)] = newbase; @@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs) if (ret) { + /* + * The unaligned handler failed. + * If we were called by __get_user() or __put_user() jump + * to it's exception fixup handler instead of crashing. + */ + if (!user_mode(regs) && fixup_exception(regs)) + return; + printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); -- cgit v1.2.3-58-ga151 From 58f1c654d13a42575d507ea61f6de0332a761e75 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 4 Jun 2016 17:38:09 +0200 Subject: parisc: Move die_if_kernel() prototype into traps.h header Signed-off-by: Helge Deller --- arch/parisc/include/asm/traps.h | 2 ++ arch/parisc/kernel/unaligned.c | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 4736020ba5ea..5e953ab4530d 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -8,6 +8,8 @@ struct pt_regs; void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) __noreturn __cold; +void die_if_kernel(char *str, struct pt_regs *regs, long err); + /* mm/fault.c */ void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 8d49614d600d..2b65c0177778 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -28,6 +28,7 @@ #include #include #include +#include /* #define DEBUG_UNALIGNED 1 */ @@ -130,8 +131,6 @@ int unaligned_enabled __read_mostly = 1; -void die_if_kernel (char *str, struct pt_regs *regs, long err); - static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; -- cgit v1.2.3-58-ga151 From 624531886987f0f1b5d01fb598034d039198e090 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 7 Jun 2016 17:57:54 +0100 Subject: ARM: 8578/1: mm: ensure pmd_present only checks the valid bit In a subsequent patch, pmd_mknotpresent will clear the valid bit of the pmd entry, resulting in a not-present entry from the hardware's perspective. Unfortunately, pmd_present simply checks for a non-zero pmd value and will therefore continue to return true even after a pmd_mknotpresent operation. Since pmd_mknotpresent is only used for managing huge entries, this is only an issue for the 3-level case. This patch fixes the 3-level pmd_present implementation to take into account the valid bit. For bisectability, the change is made before the fix to pmd_mknotpresent. [catalin.marinas@arm.com: comment update regarding pmd_mknotpresent patch] Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.") Cc: # 3.11+ Cc: Russell King Cc: Steve Capper Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/pgtable-2level.h | 1 + arch/arm/include/asm/pgtable-3level.h | 1 + arch/arm/include/asm/pgtable.h | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index aeddd28b3595..92fd2c8a9af0 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_large(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2) +#define pmd_present(pmd) (pmd_val(pmd)) #define copy_pmd(pmdpd,pmdps) \ do { \ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index fa70db7c714b..4dce1580bc15 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) : !!(pmd_val(pmd) & (val))) #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) +#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) static inline pte_t pte_mkspecial(pte_t pte) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 348caabb7625..d62204060cbe 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { -- cgit v1.2.3-58-ga151 From 56530f5d2ddc9b9fade7ef8db9cb886e9dc689b5 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Tue, 7 Jun 2016 17:58:06 +0100 Subject: ARM: 8579/1: mm: Fix definition of pmd_mknotpresent Currently pmd_mknotpresent will use a zero entry to respresent an invalidated pmd. Unfortunately this definition clashes with pmd_none, thus it is possible for a race condition to occur if zap_pmd_range sees pmd_none whilst __split_huge_pmd_locked is running too with pmdp_invalidate just called. This patch fixes the race condition by modifying pmd_mknotpresent to create non-zero faulting entries (as is done in other architectures), removing the ambiguity with pmd_none. [catalin.marinas@arm.com: using L_PMD_SECT_VALID instead of PMD_TYPE_SECT] Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.") Cc: # 3.11+ Reported-by: Kirill A. Shutemov Acked-by: Will Deacon Cc: Russell King Signed-off-by: Steve Capper Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/pgtable-3level.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 4dce1580bc15..2a029bceaf2f 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -250,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) -/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ +/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */ static inline pmd_t pmd_mknotpresent(pmd_t pmd) { - return __pmd(0); + return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) -- cgit v1.2.3-58-ga151 From 5745eef6b813194b4dd3e2aee1dd712d8512bf91 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 May 2016 16:34:27 +0100 Subject: ARM: rename S_FRAME_SIZE to PT_REGS_SIZE S_FRAME_SIZE is no longer the size of the kernel stack frame, so this name is misleading. It is the size of the kernel pt_regs structure. Name it so. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 4 ++-- arch/arm/kernel/asm-offsets.c | 2 +- arch/arm/kernel/entry-armv.S | 12 ++++++------ arch/arm/kernel/entry-common.S | 2 +- arch/arm/kernel/entry-header.S | 8 ++++---- arch/arm/kernel/entry-v7m.S | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b2bc8e11471d..60d9c5471eb6 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -480,13 +480,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .macro uaccess_save, tmp #ifdef CONFIG_CPU_SW_DOMAIN_PAN mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #S_FRAME_SIZE] + str \tmp, [sp, #PT_REGS_SIZE] #endif .endm .macro uaccess_restore #ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #S_FRAME_SIZE] + ldr r0, [sp, #PT_REGS_SIZE] mcr p15, 0, r0, c3, c0, 0 #endif .endm diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 27d05813ff09..2841c9980bcd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -107,7 +107,7 @@ int main(void) DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc)); DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr)); DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); - DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); BLANK(); #ifdef CONFIG_CACHE_L2X0 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e2550500486d..a6cb019d9920 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -92,7 +92,7 @@ * Invalid mode handlers */ .macro inv_entry, reason - sub sp, sp, #S_FRAME_SIZE + sub sp, sp, #PT_REGS_SIZE ARM( stmib sp, {r1 - lr} ) THUMB( stmia sp, {r0 - r12} ) THUMB( str sp, [sp, #S_SP] ) @@ -152,7 +152,7 @@ ENDPROC(__und_invalid) .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) + sub sp, sp, #(PT_REGS_SIZE + 8 + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) @@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4) + add r2, sp, #(PT_REGS_SIZE + 8 + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack @@ -366,17 +366,17 @@ ENDPROC(__fiq_abt) /* * User mode handlers * - * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE + * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE */ -#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7) #error "sizeof(struct pt_regs) must be a multiple of 8" #endif .macro usr_entry, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space - sub sp, sp, #S_FRAME_SIZE + sub sp, sp, #PT_REGS_SIZE ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 30a7228eaceb..10c3283d6c19 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -145,7 +145,7 @@ ENTRY(vector_swi) #ifdef CONFIG_CPU_V7M v7m_exception_entry #else - sub sp, sp, #S_FRAME_SIZE + sub sp, sp, #PT_REGS_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 ARM( add r8, sp, #S_PC ) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 0d22ad206d52..5e1d029147cb 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -90,7 +90,7 @@ @ Linux expects to have irqs off. Do it here before taking stack space cpsid i - sub sp, #S_FRAME_SIZE-S_IP + sub sp, #PT_REGS_SIZE-S_IP stmdb sp!, {r0-r11} @ load saved r12, lr, return address and xPSR. @@ -160,7 +160,7 @@ ldmia sp!, {r0-r11} @ restore main sp - add sp, sp, #S_FRAME_SIZE-S_IP + add sp, sp, #PT_REGS_SIZE-S_IP cpsie i bx lr @@ -307,7 +307,7 @@ .endif mov r0, r0 @ ARMv5T and earlier require a nop @ after ldm {}^ - add sp, sp, #\offset + S_FRAME_SIZE + add sp, sp, #\offset + PT_REGS_SIZE movs pc, lr @ return & move spsr_svc into cpsr #elif defined(CONFIG_CPU_V7M) @ V7M restore. @@ -334,7 +334,7 @@ .else ldmdb sp, {r0 - r12} @ get calling r0 - r12 .endif - add sp, sp, #S_FRAME_SIZE - S_SP + add sp, sp, #PT_REGS_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr #endif /* !CONFIG_THUMB2_KERNEL */ .endm diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index 907534f97053..abcf47848525 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -73,7 +73,7 @@ __irq_entry: @ correctness they don't need to be restored. So only r8-r11 must be @ restored here. The easiest way to do so is to restore r0-r7, too. ldmia sp!, {r0-r11} - add sp, #S_FRAME_SIZE-S_IP + add sp, #PT_REGS_SIZE-S_IP cpsie i bx lr ENDPROC(__irq_entry) -- cgit v1.2.3-58-ga151 From e6a9dc6129d23cd3025e841c4e13a70910a37135 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 13 May 2016 10:22:38 +0100 Subject: ARM: introduce svc_pt_regs structure Since the privileged mode pt_regs are an extended version of the saved userland pt_regs, introduce a new svc_pt_regs structure to describe this layout. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 4 ++-- arch/arm/include/asm/ptrace.h | 8 ++++++++ arch/arm/kernel/asm-offsets.c | 2 ++ arch/arm/kernel/entry-armv.S | 4 ++-- 4 files changed, 14 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 60d9c5471eb6..4eaea2173bf8 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -480,13 +480,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .macro uaccess_save, tmp #ifdef CONFIG_CPU_SW_DOMAIN_PAN mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #PT_REGS_SIZE] + str \tmp, [sp, #SVC_DACR] #endif .endm .macro uaccess_restore #ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #PT_REGS_SIZE] + ldr r0, [sp, #SVC_DACR] mcr p15, 0, r0, c3, c0, 0 #endif .endm diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 51622ba7c4a6..5194cf79c90f 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -13,10 +13,18 @@ #include #ifndef __ASSEMBLY__ +#include + struct pt_regs { unsigned long uregs[18]; }; +struct svc_pt_regs { + struct pt_regs regs; + u32 dacr; + u32 unused; +}; + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 2841c9980bcd..9a8ce342cd82 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -108,6 +108,8 @@ int main(void) DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr)); DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); + DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); BLANK(); #ifdef CONFIG_CACHE_L2X0 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a6cb019d9920..0d6f5413be18 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -152,7 +152,7 @@ ENDPROC(__und_invalid) .macro svc_entry, stack_hole=0, trace=1, uaccess=1 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(PT_REGS_SIZE + 8 + \stack_hole - 4) + sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) @@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(PT_REGS_SIZE + 8 + \stack_hole - 4) + add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack -- cgit v1.2.3-58-ga151 From 5fa9da5043a81b9eea5d4522d1371455bf64894a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 13 May 2016 10:26:10 +0100 Subject: ARM: get rid of horrible *(unsigned int *)(regs + 1) Get rid of the horrible "*(unsigned int *)(regs + 1)" to get at the parent context domain access register value, instead using the newly introduced svc_pt_regs structure. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/ptrace.h | 2 ++ arch/arm/kernel/process.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 5194cf79c90f..0ef0093800f2 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -25,6 +25,8 @@ struct svc_pt_regs { u32 unused; }; +#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 4a803c5a1ff7..f1c720c0d568 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -106,7 +106,7 @@ void __show_regs(struct pt_regs *regs) if (user_mode(regs)) domain = DACR_UACCESS_ENABLE; else - domain = *(unsigned int *)(regs + 1); + domain = to_svc_pt_regs(regs)->dacr; #else domain = get_domain(); #endif -- cgit v1.2.3-58-ga151 From 9f85eae622cd9b63dc44b7dcfa958b3881a09cbb Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 9 Jun 2016 11:21:20 +0100 Subject: ARM: 8580/1: Remove orphaned __addr_ok() definition Since commit 8c56cc8be5b3 ("ARM: 7449/1: use generic strnlen_user and strncpy_from_user functions"), the definition of __addr_ok() has been languishing unused; eradicate the sucker. Cc: Russell King Signed-off-by: Robin Murphy Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 35c9db857ebe..7badc3e55109 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -104,14 +104,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) -#define __addr_ok(addr) ({ \ - unsigned long flag; \ - __asm__("cmp %2, %0; movlo %0, #0" \ - : "=&r" (flag) \ - : "0" (current_thread_info()->addr_limit), "r" (addr) \ - : "cc"); \ - (flag == 0); }) - /* We use 33-bit arithmetic here... */ #define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ -- cgit v1.2.3-58-ga151 From 9f73bd8bb445e0cbe4bcef6d4cfc788f1e184007 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Feb 2016 16:54:45 +0000 Subject: ARM: uaccess: remove put_user() code duplication Remove the code duplication between put_user() and __put_user(). The code which selected the implementation based upon the pointer size, and declared the local variable to hold the value to be put are common to both implementations. Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 106 +++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 57 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 7badc3e55109..62a6f65029e6 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -230,49 +230,23 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2, __p, __e, __l, __s) \ - __asm__ __volatile__ ( \ - __asmeq("%0", "r0") __asmeq("%2", "r2") \ - __asmeq("%3", "r1") \ - "bl __put_user_" #__s \ - : "=&r" (__e) \ - : "0" (__p), "r" (__r2), "r" (__l) \ - : "ip", "lr", "cc") - -#define __put_user_check(x, p) \ +#define __put_user_check(__pu_val, __ptr, __err, __s) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ - const typeof(*(p)) __user *__tmp_p = (p); \ - register const typeof(*(p)) __r2 asm("r2") = (x); \ - register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ + register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ + register const void __user *__p asm("r0") = __ptr; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ - unsigned int __ua_flags = uaccess_save_and_enable(); \ - switch (sizeof(*(__p))) { \ - case 1: \ - __put_user_x(__r2, __p, __e, __l, 1); \ - break; \ - case 2: \ - __put_user_x(__r2, __p, __e, __l, 2); \ - break; \ - case 4: \ - __put_user_x(__r2, __p, __e, __l, 4); \ - break; \ - case 8: \ - __put_user_x(__r2, __p, __e, __l, 8); \ - break; \ - default: __e = __put_user_bad(); break; \ - } \ - uaccess_restore(__ua_flags); \ - __e; \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ + "bl __put_user_" #__s \ + : "=&r" (__e) \ + : "0" (__p), "r" (__r2), "r" (__l) \ + : "ip", "lr", "cc"); \ + __err = __e; \ }) -#define put_user(x, p) \ - ({ \ - might_fault(); \ - __put_user_check(x, p); \ - }) - #else /* CONFIG_MMU */ /* @@ -290,7 +264,7 @@ static inline void set_fs(mm_segment_t fs) } #define get_user(x, p) __get_user(x, p) -#define put_user(x, p) __put_user(x, p) +#define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ @@ -381,36 +355,54 @@ do { \ #define __get_user_asm_word(x, addr, err) \ __get_user_asm(x, addr, err, ldr) + +#define __put_user_switch(x, ptr, __err, __fn) \ + do { \ + const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + unsigned int __ua_flags; \ + might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ + switch (sizeof(*(ptr))) { \ + case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \ + case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \ + case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \ + case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \ + default: __err = __put_user_bad(); break; \ + } \ + uaccess_restore(__ua_flags); \ + } while (0) + +#define put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + __put_user_switch((x), (ptr), __pu_err, __put_user_check); \ + __pu_err; \ +}) + #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ + __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ __pu_err; \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), err); \ + __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ (void) 0; \ }) -#define __put_user_err(x, ptr, err) \ -do { \ - unsigned long __pu_addr = (unsigned long)(ptr); \ - unsigned int __ua_flags; \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - might_fault(); \ - __ua_flags = uaccess_save_and_enable(); \ - switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ - case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ - case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ - case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ - default: __put_user_bad(); \ - } \ - uaccess_restore(__ua_flags); \ -} while (0) +#define __put_user_nocheck(x, __pu_ptr, __err, __size) \ + do { \ + unsigned long __pu_addr = (unsigned long)__pu_ptr; \ + __put_user_nocheck_##__size(x, __pu_addr, __err); \ + } while (0) + +#define __put_user_nocheck_1 __put_user_asm_byte +#define __put_user_nocheck_2 __put_user_asm_half +#define __put_user_nocheck_4 __put_user_asm_word +#define __put_user_nocheck_8 __put_user_asm_dword #define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ -- cgit v1.2.3-58-ga151 From cb6f8344f8780d75929c6a20f1f094d2585003f4 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 4 Apr 2016 09:22:28 +0100 Subject: ARM: 8556/1: on a generic DT system: do not touch l2x0 Set no bits, mask all bits in the AUX l2x0 register for the default DT ARM system: if anything needs to be modified, it should be done using DT bindings. Suggested-by: Arnd Bergmann Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/devtree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 2e26016a91a5..f8966bb25f9d 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -213,6 +213,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) #if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M) DT_MACHINE_START(GENERIC_DT, "Generic DT based system") + .l2c_aux_val = 0x0, + .l2c_aux_mask = ~0x0, MACHINE_END mdesc_best = &__mach_desc_GENERIC_DT; -- cgit v1.2.3-58-ga151 From c6bbfbb729e88188b7c9113d58647766db03f1d4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 30 May 2016 03:01:23 +0100 Subject: ARM: 8576/1: avoid duplicating "Kernel: arch/arm/boot/*Image is ready" Commit 3939f3345050 ("ARM: 8418/1: add boot image dependencies to not generate invalid images") fixed bad image generation for the parallel building, but as its side effect, Kbuild now descends into arch/arm/boot/ again and again, duplicating the log messages. It looks clumsy, so let's display the same message only once. This commit moves the log rules from arch/arm/boot/Makefile to arch/arm/Makefile. I did not delete them completely because *Image are the final targets that users are interested in. Without this commit, the log of incremental build is like follows: $ make ARCH=arm UIMAGE_LOADADDR=0x80208000 uImage CHK include/config/kernel.release CHK include/generated/uapi/linux/version.h CHK include/generated/utsrelease.h CHK include/generated/bounds.h CHK include/generated/timeconst.h CHK include/generated/asm-offsets.h CALL scripts/checksyscalls.sh CHK include/generated/compile.h Kernel: arch/arm/boot/Image is ready Kernel: arch/arm/boot/Image is ready Kernel: arch/arm/boot/zImage is ready Kernel: arch/arm/boot/Image is ready Kernel: arch/arm/boot/zImage is ready Image arch/arm/boot/uImage is ready With this commit, it will look like follows: $ make ARCH=arm UIMAGE_LOADADDR=0x80208000 uImage CHK include/config/kernel.release CHK include/generated/uapi/linux/version.h CHK include/generated/utsrelease.h CHK include/generated/bounds.h CHK include/generated/timeconst.h CHK include/generated/asm-offsets.h CALL scripts/checksyscalls.sh CHK include/generated/compile.h Kernel: arch/arm/boot/Image is ready Kernel: arch/arm/boot/zImage is ready Kernel: arch/arm/boot/uImage is ready Signed-off-by: Masahiro Yamada Signed-off-by: Russell King --- arch/arm/Makefile | 1 + arch/arm/boot/Makefile | 6 +----- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 274e8a6582f1..229afaf2058b 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -327,6 +327,7 @@ zImage: Image $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ + @$(kecho) ' Kernel: $(boot)/$@ is ready' $(INSTALL_TARGETS): $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 5be33a2d59a9..bdc1d5af03d2 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -31,7 +31,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y) $(obj)/xipImage: vmlinux FORCE $(call if_changed,objcopy) - @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' + @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' $(obj)/Image $(obj)/zImage: FORCE @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' @@ -46,14 +46,12 @@ $(obj)/xipImage: FORCE $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) - @$(kecho) ' Kernel: $@ is ready' $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) - @$(kecho) ' Kernel: $@ is ready' endif @@ -78,14 +76,12 @@ fi $(obj)/uImage: $(obj)/zImage FORCE @$(check_for_multiple_loadaddr) $(call if_changed,uimage) - @$(kecho) ' Image $@ is ready' $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(Q)$(MAKE) $(build)=$(obj)/bootp $@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE $(call if_changed,objcopy) - @$(kecho) ' Kernel: $@ is ready' PHONY += initrd install zinstall uinstall initrd: -- cgit v1.2.3-58-ga151 From 2374b063c3dfbb68a40b52067eaff81510f384b1 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Fri, 17 Jun 2016 18:21:07 +0100 Subject: ARM: 8581/1: add missing to arch/arm/kernel/devtree.c Fix the following warnings by including declarations from : arch/arm/kernel/devtree.c:69:13: warning: symbol 'arm_dt_init_cpu_maps' was not declared. Should it be static? arch/arm/kernel/devtree.c:210:27: warning: symbol 'setup_machine_fdt' was not declared. Should it be static? Signed-off-by: Ben Dooks Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/kernel/devtree.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index f8966bb25f9d..40ecd5f514a2 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-58-ga151 From 215e362dafede7cc691d8a573d2b31bc7138a770 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 25 Feb 2015 22:50:39 +0100 Subject: ARM: 8306/1: loop_udelay: remove bogomips value limitation Now that we don't support ARMv3 anymore, the loop based delay code can convert microsecs into number of loops using a 64-bit multiplication and more precision. This allows us to lift the hard limit of 3355 on the bogomips value as loops_per_jiffy may now safely span the full 32-bit range. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/delay.h | 6 +++--- arch/arm/lib/Makefile | 5 ++++- arch/arm/lib/delay-loop.S | 15 +++++---------- 3 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index dff714d886d5..b7a428154355 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -10,8 +10,8 @@ #include /* HZ */ #define MAX_UDELAY_MS 2 -#define UDELAY_MULT ((UL(2199023) * HZ) >> 11) -#define UDELAY_SHIFT 30 +#define UDELAY_MULT UL(2047 * HZ + 483648 * HZ / 1000000) +#define UDELAY_SHIFT 31 #ifndef __ASSEMBLY__ @@ -34,7 +34,7 @@ extern struct arm_delay_ops { * it, it means that you're calling udelay() with an out of range value. * * With currently imposed limits, this means that we support a max delay - * of 2000us. Further limits: HZ<=1000 and bogomips<=3355 + * of 2000us. Further limits: HZ<=1000 */ extern void __bad_udelay(void); diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index d8a780799506..27f4d96258a2 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -29,7 +29,10 @@ else lib-y += io-readsw-armv4.o io-writesw-armv4.o endif -lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o +ifeq ($(CONFIG_ARCH_RPC),y) + lib-y += ecard.o io-acorn.o floppydma.o + AFLAGS_delay-loop.o += -march=armv4 +endif $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S index 518bf6e93f78..792c59d885bc 100644 --- a/arch/arm/lib/delay-loop.S +++ b/arch/arm/lib/delay-loop.S @@ -10,6 +10,7 @@ #include #include #include + .text .LC0: .word loops_per_jiffy @@ -17,7 +18,6 @@ /* * r0 <= 2000 - * lpj <= 0x01ffffff (max. 3355 bogomips) * HZ <= 1000 */ @@ -25,16 +25,11 @@ ENTRY(__loop_udelay) ldr r2, .LC1 mul r0, r2, r0 ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 - mov r1, #-1 ldr r2, .LC0 - ldr r2, [r2] @ max = 0x01ffffff - add r0, r0, r1, lsr #32-14 - mov r0, r0, lsr #14 @ max = 0x0001ffff - add r2, r2, r1, lsr #32-10 - mov r2, r2, lsr #10 @ max = 0x00007fff - mul r0, r2, r0 @ max = 2^32-1 - add r0, r0, r1, lsr #32-6 - movs r0, r0, lsr #6 + ldr r2, [r2] + umull r1, r0, r2, r0 + adds r1, r1, #0xffffffff + adcs r0, r0, r0 reteq lr /* -- cgit v1.2.3-58-ga151 From 520319de0ced43c21c0aaef3c2392fb607d05419 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 21 Jun 2016 02:32:25 +0100 Subject: ARM: 8582/1: remove unused CONFIG_ARCH_HAS_BARRIERS Since commit 2b749cb3a515 ("ARM: realview: remove private barrier implementation"), this config is not used by any platform. Signed-off-by: Masahiro Yamada Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/barrier.h | 4 +--- arch/arm/mm/Kconfig | 6 ------ 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 112cc1a5d47f..f5d698182d50 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -44,9 +44,7 @@ extern void arm_heavy_mb(void); #define __arm_heavy_mb(x...) dsb(x) #endif -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() __arm_heavy_mb() #define rmb() dsb() #define wmb() __arm_heavy_mb(st) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index cb569b65a54d..d15a7fe51618 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1025,12 +1025,6 @@ config ARM_DMA_MEM_BUFFERABLE You are recommended say 'Y' here and debug any affected drivers. -config ARCH_HAS_BARRIERS - bool - help - This option allows the use of custom mandatory barriers - included via the mach/barriers.h file. - config ARM_HEAVY_MB bool -- cgit v1.2.3-58-ga151 From 14c4a533e0996f95a0a64dfd0b6252d788cebc74 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jun 2016 21:28:47 +0100 Subject: ARM: 8583/1: mm: fix location of _etext The _etext position is defined to be the end of the kernel text code, and should not include any part of the data segments. This interferes with things that might check memory ranges and expect executable code up to _etext. Just to be conservative, leave the kernel resource as it was, using __init_begin instead of _etext as the end mark. Signed-off-by: Kees Cook Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 2 +- arch/arm/kernel/vmlinux.lds.S | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7b5350060612..dd84f03dc2d4 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -844,7 +844,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) struct resource *res; kernel_code.start = virt_to_phys(_text); - kernel_code.end = virt_to_phys(_etext - 1); + kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e2c6da096cef..99420fc1f066 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -125,6 +125,8 @@ SECTIONS #ifdef CONFIG_DEBUG_ALIGN_RODATA . = ALIGN(1< Date: Fri, 1 Jul 2016 18:02:22 +0100 Subject: ARM: 8584/1: floppy: avoid gcc-6 warning gcc-6.0 warns about comparisons between two identical expressions, which is what we get in the floppy driver when writing to the FD_DOR register: drivers/block/floppy.c: In function 'set_dor': drivers/block/floppy.c:810:44: error: self-comparison always evaluates to true [-Werror=tautological-compare] fd_outb(newdor, FD_DOR); It would be nice to use a static inline function instead of the macro, to avoid the warning, but we cannot do that because the FD_DOR definition is incomplete at this point. Adding a cast to (u32) is a harmless way to shut up the warning, just not very nice. Signed-off-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/floppy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index f4882553fbb0..85a34cc8316a 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -17,7 +17,7 @@ #define fd_outb(val,port) \ do { \ - if ((port) == FD_DOR) \ + if ((port) == (u32)FD_DOR) \ fd_setdor((val)); \ else \ outb((val),(port)); \ -- cgit v1.2.3-58-ga151 From f6492164ecb19d43687875fdb456845acc504590 Mon Sep 17 00:00:00 2001 From: Matija Glavinic Pecotic Date: Tue, 7 Jun 2016 16:19:16 +0100 Subject: ARM: 8577/1: Fix Cortex-A15 798181 errata initialization Current errata initialization doesn't take properly revision and REVIDR into account. Depending on the core revision, revidr bits should not be taken into account. Errata misleadingly declares r3p3 to be error-free, but this is not the case. Include rp3p3 in errata initialization. Here are possible fixes defined in revidr register for r2 and r3 [1,2]: r0p0-r2p1: No fixes applied r2p2,r2p3: REVIDR[4]: 798181 Moving a virtual page that is being accessed by an active process can lead to unexpected behavior REVIDR[9]: Not defined r2p4,r3p0,r3p1,r3p2: REVIDR[4]: 798181 Moving a virtual page that is being accessed by an active process can lead to unexpected behavior REVIDR[9]: 798181 Moving a virtual page that is being accessed by an active process can lead to unexpected behavior - This is an update to a previously released ECO. r3p3: REVIDR[4]: Reserved REVIDR[9]: 798181 Moving a virtual page that is being accessed by an active process can lead to unexpected behavior - This is an update to a previously released ECO. And here is proposed handling from the same document: * In r3p2 and earlier versions with REVIDR[4]= 0,the full workaround is required. * In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=0, only the portion of the workaround up to the end of step 6 is required. * In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=1, no workaround is required. * In r3p3, if REVIDR[9]=0, only the portion of the workaround up to the end of step 6 is required. * In r3p3, if REVIDR[9]=1, no workaround is required. These imply following: REVIDR[9] set -> No WA REVIDR[4] set, REVIDR[9] cleared -> Partial WA Both cleared -> Full WA Where certain bits should and should not be taken into account depending on whether they are defined for the revision. Although not explicitly mentioned in the errata note, REVIDR[9] set, with REVIDR[4] cleared is valid combination which requires no WA. This is confirmed by ARM support and errata will be updated. [1] ARM CortexTM-A15 MPCore - NEON Product revision r3 Software Developers Errata Notice ARM-EPM-028093 v20.0 Released [2] ARM CortexTM-A15 MPCore - NEON Product Revision r2 Software Developers Errata Notice ARM-EPM-028090 v19.3 Released Signed-off-by: Matija Glavinic Pecotic Reviewed-by: Alexander Sverdlin Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/smp_tlb.c | 44 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 2e72be4f623e..22313cb53362 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -93,17 +93,53 @@ void erratum_a15_798181_init(void) unsigned int revidr = read_cpuid(CPUID_REVIDR); /* Brahma-B15 r0p0..r0p2 affected - * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */ - if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) + * Cortex-A15 r0p0..r3p3 w/o ECO fix affected + * Fixes applied to A15 with respect to the revision and revidr are: + * + * r0p0-r2p1: No fixes applied + * r2p2,r2p3: + * REVIDR[4]: 798181 Moving a virtual page that is being accessed + * by an active process can lead to unexpected behavior + * REVIDR[9]: Not defined + * r2p4,r3p0,r3p1,r3p2: + * REVIDR[4]: 798181 Moving a virtual page that is being accessed + * by an active process can lead to unexpected behavior + * REVIDR[9]: 798181 Moving a virtual page that is being accessed + * by an active process can lead to unexpected behavior + * - This is an update to a previously released ECO. + * r3p3: + * REVIDR[4]: Reserved + * REVIDR[9]: 798181 Moving a virtual page that is being accessed + * by an active process can lead to unexpected behavior + * - This is an update to a previously released ECO. + * + * Handling: + * REVIDR[9] set -> No WA + * REVIDR[4] set, REVIDR[9] cleared -> Partial WA + * Both cleared -> Full WA + */ + if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) { erratum_a15_798181_handler = erratum_a15_798181_broadcast; - else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 && - (revidr & 0x210) != 0x210) { + } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) { + erratum_a15_798181_handler = erratum_a15_798181_broadcast; + } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) { if (revidr & 0x10) erratum_a15_798181_handler = erratum_a15_798181_partial; else erratum_a15_798181_handler = erratum_a15_798181_broadcast; + } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) { + if ((revidr & 0x210) == 0) + erratum_a15_798181_handler = + erratum_a15_798181_broadcast; + else if (revidr & 0x10) + erratum_a15_798181_handler = + erratum_a15_798181_partial; + } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) { + if ((revidr & 0x200) == 0) + erratum_a15_798181_handler = + erratum_a15_798181_partial; } } #endif -- cgit v1.2.3-58-ga151 From e6978e4bf181fb3b5f8cb6f71b4fe30fbf1b655c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 13 May 2016 11:40:20 +0100 Subject: ARM: save and reset the address limit when entering an exception When we enter an exception, the current address limit should not apply to the exception context: if the exception context wishes to access kernel space via the user accessors (eg, perf code), it must explicitly request such access. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/ptrace.h | 2 +- arch/arm/kernel/asm-offsets.c | 1 + arch/arm/kernel/entry-armv.S | 7 ++++++- arch/arm/kernel/entry-header.S | 4 ++++ arch/arm/kernel/process.c | 12 ++++++++---- 5 files changed, 20 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 0ef0093800f2..e9c9a117bd25 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -22,7 +22,7 @@ struct pt_regs { struct svc_pt_regs { struct pt_regs regs; u32 dacr; - u32 unused; + u32 addr_limit; }; #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 9a8ce342cd82..608008229c7d 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -109,6 +109,7 @@ int main(void) DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); + DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); BLANK(); #ifdef CONFIG_CACHE_L2X0 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0d6f5413be18..bc5f50799d75 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -185,6 +185,12 @@ ENDPROC(__und_invalid) @ stmia r7, {r2 - r6} + get_thread_info tsk + ldr r0, [tsk, #TI_ADDR_LIMIT] + mov r1, #TASK_SIZE + str r1, [tsk, #TI_ADDR_LIMIT] + str r0, [sp, #SVC_ADDR_LIMIT] + uaccess_save r0 .if \uaccess uaccess_disable r0 @@ -213,7 +219,6 @@ __irq_svc: irq_handler #ifdef CONFIG_PREEMPT - get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 5e1d029147cb..6391728c8f03 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -215,7 +215,9 @@ blne trace_hardirqs_off #endif .endif + ldr r1, [sp, #SVC_ADDR_LIMIT] uaccess_restore + str r1, [tsk, #TI_ADDR_LIMIT] #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -259,7 +261,9 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq + ldr r1, [sp, #SVC_ADDR_LIMIT] uaccess_restore + str r1, [tsk, #TI_ADDR_LIMIT] #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index f1c720c0d568..612eb530f33f 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -96,19 +96,23 @@ void __show_regs(struct pt_regs *regs) unsigned long flags; char buf[64]; #ifndef CONFIG_CPU_V7M - unsigned int domain; + unsigned int domain, fs; #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* * Get the domain register for the parent context. In user * mode, we don't save the DACR, so lets use what it should * be. For other modes, we place it after the pt_regs struct. */ - if (user_mode(regs)) + if (user_mode(regs)) { domain = DACR_UACCESS_ENABLE; - else + fs = get_fs(); + } else { domain = to_svc_pt_regs(regs)->dacr; + fs = to_svc_pt_regs(regs)->addr_limit; + } #else domain = get_domain(); + fs = get_fs(); #endif #endif @@ -144,7 +148,7 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (get_fs() == get_ds()) + else if (fs == get_ds()) segment = "kernel"; else segment = "user"; -- cgit v1.2.3-58-ga151 From 62c0f4a53447bc298c337713d2ede1e6bdec6fdf Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Thu, 7 Apr 2016 00:25:00 +0100 Subject: ARM: 8558/1: errata: Workaround errata A12 818325/852422 A17 852423 There are several similar errata on Cortex A12 and A17 that all have the same workaround: setting bit[12] of the Feature Register. Technically the list of errata are: - A12 818325: Execution of an UNPREDICTABLE STR or STM instruction might deadlock. Fixed in r0p1. - A12 852422: Execution of a sequence of instructions might lead to either a data corruption or a CPU deadlock. Not fixed in any A12s yet. - A17 852423: Execution of a sequence of instructions might lead to either a data corruption or a CPU deadlock. Not fixed in any A17s yet. Since A12 got renamed to A17 it seems likely that there won't be any future Cortex-A12 cores, so we'll enable for all Cortex-A12. For Cortex-A17 I believe that all known revisions are affected and that all knows revisions means <= r1p2. Presumably if a new A17 was released it would have this problem fixed. Note that in folks previously expressed opposition to this change because: A) It was thought to only apply to r0p0 and there were no known r0p0 boards supported in mainline. B) It was argued that such a workaround beloned in firmware. Now that this same fix solves other errata on real boards (like rk3288) point A) is addressed. Point B) is impossible to address on boards like rk3288. On rk3288 the firmware doesn't stay resident in RAM and isn't involved at all in the suspend/resume process nor in the SMP bringup process. That means that the most the firmware could do would be to set the bit on "core 0" and this bit would be lost at suspend/resume time. It is true that we could write a "generic" solution that saved the boot-time "core 0" value of this register and applied it at SMP bringup / resume time. However, since this register (described as the "Feature Register" in errata) appears to be undocumented (as far as I can tell) and is only modified for these errata, that "generic" solution seems questionably cleaner. The generic solution also won't fix existing users that haven't happened to do a FW update. Note that in ARM64 presumably PSCI will be universal and fixes like this will end up in ATF. Hopefully we are nearing the end of this style of errata workaround. Signed-off-by: Douglas Anderson Signed-off-by: Huang Tao Signed-off-by: Kever Yang Signed-off-by: Russell King --- arch/arm/Kconfig | 26 ++++++++++++++++++++++++++ arch/arm/mm/proc-v7.S | 27 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 90542db1220d..39cd37f29aaf 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,32 @@ config ARM_ERRATA_773022 loop buffer may deliver incorrect instructions. This workaround disables the loop buffer to avoid the erratum. +config ARM_ERRATA_818325_852422 + bool "ARM errata: A12: some seqs of opposed cond code instrs => deadlock or corruption" + depends on CPU_V7 + help + This option enables the workaround for: + - Cortex-A12 818325: Execution of an UNPREDICTABLE STR or STM + instruction might deadlock. Fixed in r0p1. + - Cortex-A12 852422: Execution of a sequence of instructions might + lead to either a data corruption or a CPU deadlock. Not fixed in + any Cortex-A12 cores yet. + This workaround for all both errata involves setting bit[12] of the + Feature Register. This bit disables an optimisation applied to a + sequence of 2 instructions that use opposing condition codes. + +config ARM_ERRATA_852423 + bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption" + depends on CPU_V7 + help + This option enables the workaround for: + - Cortex-A17 852423: Execution of a sequence of instructions might + lead to either a data corruption or a CPU deadlock. Not fixed in + any Cortex-A17 cores yet. + This is identical to Cortex-A12 erratum 852422. It is a separate + config option from the A12 erratum due to the way errata are checked + for and handled. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6fcaac8e200f..b20b02e0b727 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -362,6 +362,23 @@ __ca15_errata: #endif b __errata_finish +__ca12_errata: +#ifdef CONFIG_ARM_ERRATA_818325_852422 + mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register + orr r10, r10, #1 << 12 @ set bit #12 + mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif + b __errata_finish + +__ca17_errata: +#ifdef CONFIG_ARM_ERRATA_852423 + cmp r6, #0x12 @ only present up to r1p2 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register + orrle r10, r10, #1 << 12 @ set bit #12 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif + b __errata_finish + __v7_pj4b_setup: #ifdef CONFIG_CPU_PJ4B @@ -443,6 +460,16 @@ __v7_setup_cont: teq r0, r10 beq __ca9_errata + /* Cortex-A12 Errata */ + ldr r10, =0x00000c0d @ Cortex-A12 primary part number + teq r0, r10 + beq __ca12_errata + + /* Cortex-A17 Errata */ + ldr r10, =0x00000c0e @ Cortex-A17 primary part number + teq r0, r10 + beq __ca17_errata + /* Cortex-A15 Errata */ ldr r10, =0x00000c0f @ Cortex-A15 primary part number teq r0, r10 -- cgit v1.2.3-58-ga151 From 416bcf21591850cd12066b2f11655695118e6908 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Thu, 7 Apr 2016 00:26:05 +0100 Subject: ARM: 8559/1: errata: Workaround erratum A12 821420 This erratum has a very simple workaround (set a bit in a register), so let's apply it. Apparently the workaround's downside is a very slight power impact. Note that applying this errata fixes deadlocks that are easy to reproduce with real world applications. The arguments for why this needs to be in the kernel are similar to the arugments made in the patch "Workaround errata A12 818325/852422 A17 852423". Signed-off-by: Douglas Anderson Signed-off-by: Russell King --- arch/arm/Kconfig | 10 ++++++++++ arch/arm/mm/proc-v7.S | 5 +++++ 2 files changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 39cd37f29aaf..ed275aa293e5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1200,6 +1200,16 @@ config ARM_ERRATA_818325_852422 Feature Register. This bit disables an optimisation applied to a sequence of 2 instructions that use opposing condition codes. +config ARM_ERRATA_821420 + bool "ARM errata: A12: sequence of VMOV to core registers might lead to a dead lock" + depends on CPU_V7 + help + This option enables the workaround for the 821420 Cortex-A12 + (all revs) erratum. In very rare timing conditions, a sequence + of VMOV to Core registers instructions, for which the second + one is in the shadow of a branch or abort, can lead to a + deadlock when the VMOV instructions are issued out-of-order. + config ARM_ERRATA_852423 bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption" depends on CPU_V7 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b20b02e0b727..eefc10ff8e7e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -367,6 +367,11 @@ __ca12_errata: mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register orr r10, r10, #1 << 12 @ set bit #12 mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif +#ifdef CONFIG_ARM_ERRATA_821420 + mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg + orr r10, r10, #1 << 1 @ set bit #1 + mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg #endif b __errata_finish -- cgit v1.2.3-58-ga151 From 9f6f93543d473d656bdc5c94f567c7684e956e52 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Thu, 7 Apr 2016 00:27:26 +0100 Subject: ARM: 8560/1: errata: Workaround errata A12 825619 / A17 852421 The workaround for both errata is to set bit 24 in the diagnostic register. There are no known end-user bugs solved by fixing this errata, but the fix is trivial and it seems sane to apply it. The arguments for why this needs to be in the kernel are similar to the arugments made in the patch "Workaround errata A12 818325/852422 A17 852423". Signed-off-by: Douglas Anderson Signed-off-by: Russell King --- arch/arm/Kconfig | 18 ++++++++++++++++++ arch/arm/mm/proc-v7.S | 11 +++++++++++ 2 files changed, 29 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ed275aa293e5..1be1022c21e8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1210,6 +1210,24 @@ config ARM_ERRATA_821420 one is in the shadow of a branch or abort, can lead to a deadlock when the VMOV instructions are issued out-of-order. +config ARM_ERRATA_825619 + bool "ARM errata: A12: DMB NSHST/ISHST mixed ... might cause deadlock" + depends on CPU_V7 + help + This option enables the workaround for the 825619 Cortex-A12 + (all revs) erratum. Within rare timing constraints, executing a + DMB NSHST or DMB ISHST instruction followed by a mix of Cacheable + and Device/Strongly-Ordered loads and stores might cause deadlock + +config ARM_ERRATA_852421 + bool "ARM errata: A17: DMB ST might fail to create order between stores" + depends on CPU_V7 + help + This option enables the workaround for the 852421 Cortex-A17 + (r1p0, r1p1, r1p2) erratum. Under very rare timing conditions, + execution of a DMB ST instruction might fail to properly order + stores from GroupA and stores from GroupB. + config ARM_ERRATA_852423 bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption" depends on CPU_V7 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index eefc10ff8e7e..a7123b4e129d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -372,10 +372,21 @@ __ca12_errata: mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg orr r10, r10, #1 << 1 @ set bit #1 mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg +#endif +#ifdef CONFIG_ARM_ERRATA_825619 + mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register + orr r10, r10, #1 << 24 @ set bit #24 + mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif b __errata_finish __ca17_errata: +#ifdef CONFIG_ARM_ERRATA_852421 + cmp r6, #0x12 @ only present up to r1p2 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register + orrle r10, r10, #1 << 24 @ set bit #24 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif #ifdef CONFIG_ARM_ERRATA_852423 cmp r6, #0x12 @ only present up to r1p2 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register -- cgit v1.2.3-58-ga151 From f12708965069410691e47d1d216ec7ad1516bfd2 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Fri, 15 Apr 2016 11:15:18 +0100 Subject: ARM: 8561/3: dma-mapping: Don't use outer_flush_range when the L2C is coherent When a L2 cache controller is used in a system that provides hardware coherency, the entire outer cache operations are useless, and can be skipped. Moreover, on some systems, it is harmful as it causes deadlocks between the Marvell coherency mechanism, the Marvell PCIe controller and the Cortex-A9. In the current kernel implementation, the outer cache flush range operation is triggered by the dma_alloc function. This operation can be take place during runtime and in some circumstances may lead to the PCIe/PL310 deadlock on Armada 375/38x SoCs. This patch extends the __dma_clear_buffer() function to receive a boolean argument related to the coherency of the system. The same things is done for the calling functions. Reported-by: Nadav Haklai Signed-off-by: Gregory CLEMENT Cc: # v3.16+ Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 62 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ff7ed5697d3e..d2485c749ad5 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -49,6 +49,7 @@ struct arm_dma_alloc_args { pgprot_t prot; const void *caller; bool want_vaddr; + int coherent_flag; }; struct arm_dma_free_args { @@ -59,6 +60,9 @@ struct arm_dma_free_args { bool want_vaddr; }; +#define NORMAL 0 +#define COHERENT 1 + struct arm_dma_allocator { void *(*alloc)(struct arm_dma_alloc_args *args, struct page **ret_page); @@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev) return mask; } -static void __dma_clear_buffer(struct page *page, size_t size) +static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { /* * Ensure that the allocated pages are zeroed, and that any data @@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size) while (size > 0) { void *ptr = kmap_atomic(page); memset(ptr, 0, PAGE_SIZE); - dmac_flush_range(ptr, ptr + PAGE_SIZE); + if (coherent_flag != COHERENT) + dmac_flush_range(ptr, ptr + PAGE_SIZE); kunmap_atomic(ptr); page++; size -= PAGE_SIZE; } - outer_flush_range(base, end); + if (coherent_flag != COHERENT) + outer_flush_range(base, end); } else { void *ptr = page_address(page); memset(ptr, 0, size); - dmac_flush_range(ptr, ptr + size); - outer_flush_range(__pa(ptr), __pa(ptr) + size); + if (coherent_flag != COHERENT) { + dmac_flush_range(ptr, ptr + size); + outer_flush_range(__pa(ptr), __pa(ptr) + size); + } } } @@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size) * Allocate a DMA buffer for 'dev' of size 'size' using the * specified gfp mask. Note that 'size' must be page aligned. */ -static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) +static struct page *__dma_alloc_buffer(struct device *dev, size_t size, + gfp_t gfp, int coherent_flag) { unsigned long order = get_order(size); struct page *page, *p, *e; @@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); return page; } @@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller, bool want_vaddr); + const void *caller, bool want_vaddr, + int coherent_flag); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, @@ -405,10 +415,13 @@ static int __init atomic_pool_init(void) atomic_pool = gen_pool_create(PAGE_SHIFT, -1); if (!atomic_pool) goto out; - + /* + * The atomic pool is only used for non-coherent allocations + * so we must pass NORMAL for coherent_flag. + */ if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, - &page, atomic_pool_init, true); + &page, atomic_pool_init, true, NORMAL); else ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, &page, atomic_pool_init, true); @@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, { struct page *page; void *ptr = NULL; - page = __dma_alloc_buffer(dev, size, gfp); + /* + * __alloc_remap_buffer is only called when the device is + * non-coherent + */ + page = __dma_alloc_buffer(dev, size, gfp, NORMAL); if (!page) return NULL; if (!want_vaddr) @@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller, bool want_vaddr) + const void *caller, bool want_vaddr, + int coherent_flag) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; @@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, if (!page) return NULL; - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); if (!want_vaddr) goto out; @@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL #define __free_from_pool(cpu_addr, size) do { } while (0) #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) @@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { struct page *page; - page = __dma_alloc_buffer(dev, size, gfp); + /* __alloc_simple_buffer is only called when the device is coherent */ + page = __dma_alloc_buffer(dev, size, gfp, COHERENT); if (!page) return NULL; @@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, { return __alloc_from_contiguous(args->dev, args->size, args->prot, ret_page, args->caller, - args->want_vaddr); + args->want_vaddr, args->coherent_flag); } static void cma_allocator_free(struct arm_dma_free_args *args) @@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, .prot = prot, .caller = caller, .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), + .coherent_flag = is_coherent ? COHERENT : NORMAL, }; #ifdef CONFIG_DMA_API_DEBUG @@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, static const int iommu_order_array[] = { 9, 8, 4, 0 }; static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, struct dma_attrs *attrs, + int coherent_flag) { struct page **pages; int count = size >> PAGE_SHIFT; @@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!page) goto error; - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); for (i = 0; i < count; i++) pages[i] = page + i; @@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, pages[i + j] = pages[i] + j; } - __dma_clear_buffer(pages[i], PAGE_SIZE << order); + __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); i += 1 << order; count -= 1 << order; } @@ -1505,7 +1526,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, */ gfp &= ~(__GFP_COMP); - pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + /* For now always consider we are in a non-coherent case */ + pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL); if (!pages) return NULL; -- cgit v1.2.3-58-ga151 From 565068221b90573d0a4ac3e73f1aa960f5571b84 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Fri, 15 Apr 2016 11:20:13 +0100 Subject: ARM: 8561/4: dma-mapping: Fix the coherent case when iommu is used When doing dma allocation with IOMMU the __iommu_alloc_atomic() was used even when the system was coherent. However, this function allocates from a non-cacheable pool, which is fine when the device is not cache coherent but won't work as expected in the device is cache coherent. Indeed, the CPU and device must access the memory using the same cacheability attributes. Moreover when the devices are coherent, the mmap call must not change the pg_prot flags in the vma struct. The arm_coherent_iommu_mmap_attrs has been updated in the same way that it was done for the arm_dma_mmap in commit 55af8a91640d ("ARM: 8387/1: arm/mm/dma-mapping.c: Add arm_coherent_dma_mmap"). Suggested-by: Catalin Marinas Signed-off-by: Gregory CLEMENT Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 86 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 65 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d2485c749ad5..b7eed75960fe 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1476,13 +1476,16 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) return NULL; } -static void *__iommu_alloc_atomic(struct device *dev, size_t size, - dma_addr_t *handle) +static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, + dma_addr_t *handle, int coherent_flag) { struct page *page; void *addr; - addr = __alloc_from_pool(size, &page); + if (coherent_flag == COHERENT) + addr = __alloc_simple_buffer(dev, size, gfp, &page); + else + addr = __alloc_from_pool(size, &page); if (!addr) return NULL; @@ -1498,14 +1501,18 @@ err_mapping: } static void __iommu_free_atomic(struct device *dev, void *cpu_addr, - dma_addr_t handle, size_t size) + dma_addr_t handle, size_t size, int coherent_flag) { __iommu_remove_mapping(dev, handle, size); - __free_from_pool(cpu_addr, size); + if (coherent_flag == COHERENT) + __dma_free_buffer(virt_to_page(cpu_addr), size); + else + __free_from_pool(cpu_addr, size); } -static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) +static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs, + int coherent_flag) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; @@ -1514,8 +1521,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); - if (!gfpflags_allow_blocking(gfp)) - return __iommu_alloc_atomic(dev, size, handle); + if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) + return __iommu_alloc_simple(dev, size, gfp, handle, + coherent_flag); /* * Following is a work-around (a.k.a. hack) to prevent pages @@ -1526,8 +1534,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, */ gfp &= ~(__GFP_COMP); - /* For now always consider we are in a non-coherent case */ - pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL); + pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); if (!pages) return NULL; @@ -1552,7 +1559,19 @@ err_buffer: return NULL; } -static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, +static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) +{ + return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); +} + +static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) +{ + return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); +} + +static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { @@ -1562,8 +1581,6 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); - if (!pages) return -ENXIO; @@ -1584,19 +1601,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, return 0; } +static int arm_iommu_mmap_attrs(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); + + return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static int arm_coherent_iommu_mmap_attrs(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); +} /* * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ -void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) +void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag) { struct page **pages; size = PAGE_ALIGN(size); - if (__in_atomic_pool(cpu_addr, size)) { - __iommu_free_atomic(dev, cpu_addr, handle, size); + if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); return; } @@ -1615,6 +1647,18 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, __iommu_free_buffer(dev, pages, size, attrs); } +void arm_iommu_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) +{ + __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); +} + +void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) +{ + __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); +} + static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) @@ -2019,9 +2063,9 @@ struct dma_map_ops iommu_ops = { }; struct dma_map_ops iommu_coherent_ops = { - .alloc = arm_iommu_alloc_attrs, - .free = arm_iommu_free_attrs, - .mmap = arm_iommu_mmap_attrs, + .alloc = arm_coherent_iommu_alloc_attrs, + .free = arm_coherent_iommu_free_attrs, + .mmap = arm_coherent_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_coherent_iommu_map_page, -- cgit v1.2.3-58-ga151