summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-18 10:57:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-18 10:57:27 -0800
commit5ba836eb9fdb07843cfa004e511f333745adb76e (patch)
treecc20246cd657225c494bfdd8d3e24f6b7887ff71
parente2ae634014d3a8839a99f8897b3f6346a133a33b (diff)
parent9fd339a45be5c06e239d45a042eab9d25de87882 (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Catalin Marinas: "These are some some trivial updates that mostly fix/clean-up code pushed during the merging window: - Work around broken GCC 4.9 handling of "S" asm constraint - Suppress W=1 missing prototype warnings - Warn the user when a small VA_BITS value cannot map the available memory - Drop the useless update to per-cpu cycles" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Work around broken GCC 4.9 handling of "S" constraint arm64: Warn the user when a small VA_BITS value wastes memory arm64: entry: suppress W=1 prototype warnings arm64: topology: Drop the useless update to per-cpu cycles
-rw-r--r--arch/arm64/include/asm/exception.h4
-rw-r--r--arch/arm64/include/asm/kvm_asm.h8
-rw-r--r--arch/arm64/kernel/topology.c6
-rw-r--r--arch/arm64/mm/init.c3
4 files changed, 15 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 78537393b650..6546158d2f2d 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,6 +31,10 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
+asmlinkage void el1_sync_handler(struct pt_regs *regs);
+asmlinkage void el0_sync_handler(struct pt_regs *regs);
+asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void enter_from_user_mode(void);
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 54387ccd1ab2..8e5fa28b78c2 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -200,6 +200,12 @@ extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
+#if defined(GCC_VERSION) && GCC_VERSION < 50000
+#define SYM_CONSTRAINT "i"
+#else
+#define SYM_CONSTRAINT "S"
+#endif
+
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
@@ -216,7 +222,7 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
- : "=r" (addr) : "S" (&s)); \
+ : "=r" (addr) : SYM_CONSTRAINT (&s)); \
addr; \
})
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index c8308befdb1e..f6faa697e83e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -314,7 +314,7 @@ void topology_scale_freq_tick(void)
if (unlikely(core_cnt <= prev_core_cnt ||
const_cnt <= prev_const_cnt))
- goto store_and_exit;
+ return;
/*
* /\core arch_max_freq_scale
@@ -331,10 +331,6 @@ void topology_scale_freq_tick(void)
scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
this_cpu_write(freq_scale, (unsigned long)scale);
-
-store_and_exit:
- this_cpu_write(arch_core_cycles_prev, core_cnt);
- this_cpu_write(arch_const_cycles_prev, const_cnt);
}
#ifdef CONFIG_ACPI_CPPC_LIB
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 69d4251ee079..75addb36354a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -295,6 +295,9 @@ void __init arm64_memblock_init(void)
memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN);
+ if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
+ pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
+
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be