From f6cc0c50164994afd538911603b4a8f8029aeba7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 16 Aug 2018 11:45:50 +0100 Subject: arm64: Avoid calling stop_machine() when patching jump labels Patching a jump label involves patching a single instruction at a time, swizzling between a branch and a NOP. The architecture treats these instructions specially, so a concurrently executing CPU is guaranteed to see either the NOP or the branch, rather than an amalgamation of the two instruction encodings. However, in order to guarantee that the new instruction is visible, it is necessary to send an IPI to the concurrently executing CPU so that it discards any previously fetched instructions from its pipeline. This operation therefore cannot be completed from a context with IRQs disabled, but this is exactly what happens on the jump label path where the hotplug lock is held and irqs are subsequently disabled by stop_machine_cpuslocked(). This results in a deadlock during boot on Hikey-960. Due to the architectural guarantees around patching NOPs and branches, we don't actually need to stop_machine() at all on the jump label path, so we can avoid the deadlock by using the "nosync" variant of our instruction patching routine. Fixes: 693350a79980 ("arm64: insn: Don't fallback on nosync path for general insn patching") Reported-by: Tuomas Tynkkynen Reported-by: John Stultz Tested-by: Valentin Schneider Tested-by: Tuomas Tynkkynen Tested-by: John Stultz Signed-off-by: Will Deacon --- arch/arm64/kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index c2dd1ad3e648..e0756416e567 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -36,7 +36,7 @@ void arch_jump_label_transform(struct jump_entry *entry, insn = aarch64_insn_gen_nop(); } - aarch64_insn_patch_text(&addr, &insn, 1); + aarch64_insn_patch_text_nosync(addr, insn); } void arch_jump_label_transform_static(struct jump_entry *entry, -- cgit v1.2.3-58-ga151 From 5ad356eabc47d26a92140a0c4b20eba471c10de3 Mon Sep 17 00:00:00 2001 From: Greg Hackmann Date: Wed, 15 Aug 2018 12:51:21 -0700 Subject: arm64: mm: check for upper PAGE_SHIFT bits in pfn_valid() ARM64's pfn_valid() shifts away the upper PAGE_SHIFT bits of the input before seeing if the PFN is valid. This leads to false positives when some of the upper bits are set, but the lower bits match a valid PFN. For example, the following userspace code looks up a bogus entry in /proc/kpageflags: int pagemap = open("/proc/self/pagemap", O_RDONLY); int pageflags = open("/proc/kpageflags", O_RDONLY); uint64_t pfn, val; lseek64(pagemap, [...], SEEK_SET); read(pagemap, &pfn, sizeof(pfn)); if (pfn & (1UL << 63)) { /* valid PFN */ pfn &= ((1UL << 55) - 1); /* clear flag bits */ pfn |= (1UL << 55); lseek64(pageflags, pfn * sizeof(uint64_t), SEEK_SET); read(pageflags, &val, sizeof(val)); } On ARM64 this causes the userspace process to crash with SIGSEGV rather than reading (1 << KPF_NOPAGE). kpageflags_read() treats the offset as valid, and stable_page_flags() will try to access an address between the user and kernel address ranges. Fixes: c1cc1552616d ("arm64: MMU initialisation") Cc: stable@vger.kernel.org Signed-off-by: Greg Hackmann Signed-off-by: Will Deacon --- arch/arm64/mm/init.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 325cfb3b858a..811f9f8b3bb0 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_map_memory(pfn << PAGE_SHIFT); + phys_addr_t addr = pfn << PAGE_SHIFT; + + if ((addr >> PAGE_SHIFT) != pfn) + return 0; + return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); #endif -- cgit v1.2.3-58-ga151