From fde046e07d3343a0417eafc0533b0c9675b393e5 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 19 Nov 2021 12:46:08 +0800 Subject: arm64: extable: remove unused ex_handler_t definition The ex_handler_t type was introduced in commit d6e2cc564775 ("arm64: extable: add `type` and `data` fields"), but has never been used, and is unnecessary. Remove it. Signed-off-by: Jisheng Zhang Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20211119124608.3f03380b@xhacker Signed-off-by: Catalin Marinas --- arch/arm64/mm/extable.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index c3d53811a15e..c0181e60cc98 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -10,9 +10,6 @@ #include #include -typedef bool (*ex_handler_t)(const struct exception_table_entry *, - struct pt_regs *); - static inline unsigned long get_ex_fixup(const struct exception_table_entry *ex) { -- cgit v1.2.3-58-ga151 From a3a5b763410c7bceacf41a52071134d9dc26202a Mon Sep 17 00:00:00 2001 From: Yunfeng Ye Date: Thu, 9 Dec 2021 09:42:25 +0800 Subject: arm64: mm: Rename asid2idx() to ctxid2asid() The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce the asid2idx and idx2asid macro, but these macros are not really useful after the commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use"). The code "(asid & ~ASID_MASK)" can be instead by a macro, which is the same code with asid2idx(). So rename it to ctxid2asid() for a better understanding. Also we add asid2ctxid() macro, the contextid can be generated based on the asid and generation through this macro. Signed-off-by: Yunfeng Ye Reviewed-by: Kefeng Wang Link: https://lore.kernel.org/r/c31516eb-6d15-94e0-421c-305fc010ea79@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/context.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index cd72576ae2b7..bbc2708fe928 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map; #define ASID_FIRST_VERSION (1UL << asid_bits) #define NUM_USER_ASIDS ASID_FIRST_VERSION -#define asid2idx(asid) ((asid) & ~ASID_MASK) -#define idx2asid(idx) asid2idx(idx) +#define ctxid2asid(asid) ((asid) & ~ASID_MASK) +#define asid2ctxid(asid, genid) ((asid) | (genid)) /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -120,7 +120,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(ctxid2asid(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { - u64 newasid = generation | (asid & ~ASID_MASK); + u64 newasid = asid2ctxid(ctxid2asid(asid), generation); /* * If our current ASID was active during a rollover, we @@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid; } @@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return idx2asid(asid) | generation; + return asid2ctxid(asid, generation); } void check_and_switch_context(struct mm_struct *mm) @@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) } nr_pinned_asids++; - __set_bit(asid2idx(asid), pinned_asid_map); + __set_bit(ctxid2asid(asid), pinned_asid_map); refcount_set(&mm->context.pinned, 1); out_unlock: raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - asid &= ~ASID_MASK; + asid = ctxid2asid(asid); /* Set the equivalent of USER_ASID_BIT */ if (asid && arm64_kernel_unmapped_at_el0()) @@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm) raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(asid2idx(asid), pinned_asid_map); + __clear_bit(ctxid2asid(asid), pinned_asid_map); nr_pinned_asids--; } -- cgit v1.2.3-58-ga151 From 386a74677be13175b5626f094ef37808c45f48b8 Mon Sep 17 00:00:00 2001 From: Yunfeng Ye Date: Thu, 9 Dec 2021 09:46:03 +0800 Subject: arm64: mm: Use asid feature macro for cheanup The commit 95b54c3e4c92 ("KVM: arm64: Add feature register flag definitions") introduce the ID_AA64MMFR0_ASID_8 and ID_AA64MMFR0_ASID_16 macros. We can use these macros for cheanup in get_cpu_asid_bits(). No functional change. Signed-off-by: Yunfeng Ye Reviewed-by: Kefeng Wang Link: https://lore.kernel.org/r/f71c75d3-735e-b32a-8414-b3e513c77240@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/context.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index bbc2708fe928..b8b4cf0bcf39 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void) pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); fallthrough; - case 0: + case ID_AA64MMFR0_ASID_8: asid = 8; break; - case 2: + case ID_AA64MMFR0_ASID_16: asid = 16; } -- cgit v1.2.3-58-ga151 From 6f6cfa5867995c03959ce8c715e54b51cd5a1528 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 7 Dec 2021 18:32:25 +0000 Subject: arm64: mm: use die_kernel_fault() in do_mem_abort() If we take an unhandled fault from EL1, either: a) The xFSC handler calls die_kernel_fault() directly. In this case, die_kernel_fault() calls: pr_alert(..., msg, addr); mem_abort_decode(esr); show_pte(addr); die(); bust_spinlocks(0); do_exit(SIGKILL); b) The xFSC handler returns to do_mem_abort(), indicating failure. In this case, do_mem_abort() calls: pr_alert(..., addr); mem_abort_decode(esr); show_pte(addr); arm64_notify_die() { die(); } This inconstency is unfortunatem, and in theory in case (b) registered notifiers can prevent us from terminating the faulting thread by returning NOTIFY_STOP, whereupon we'll end up returning from the fault, replaying, and almost certainly get stuck in a livelock spewing errors into dmesg. We don't expect notifers to fix things up, since we dump state to dmesg before invoking them, so it would be more sensible to consistently terminate the thread in this case. This patch has do_mem_abort() call die_kernel_fault() for unhandled faults taken from EL1. Where we would previously have logged a messafe of the form: | Unhandled fault at ${ADDR} ... we will now log a message of the form: | Unable to handle kernel ${FAULT_NAME} at virtual address ${ADDR} ... and we will consistently terminate the thread from which the fault was taken. Signed-off-by: Mark Rutland Cc: Will Deacon Tested-by: Andrey Konovalov Acked-by: Will Deacon Link: https://lore.kernel.org/r/20211207183226.834557-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9ae24e3b72be..b7b9caa41bc7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -813,11 +813,8 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) if (!inf->fn(far, esr, regs)) return; - if (!user_mode(regs)) { - pr_alert("Unhandled fault at 0x%016lx\n", addr); - mem_abort_decode(esr); - show_pte(addr); - } + if (!user_mode(regs)) + die_kernel_fault(inf->name, addr, esr, regs); /* * At this point we have an unrecognized fault type whose tag bits may -- cgit v1.2.3-58-ga151 From 07b742a4d91260bdb61cd4cbe5ec3bba2ae7f6f9 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 7 Dec 2021 18:32:26 +0000 Subject: arm64: mm: log potential KASAN shadow alias When the kernel is built with KASAN_GENERIC or KASAN_SW_TAGS, shadow memory is allocated and mapped for all legitimate kernel addresses, and prior to a regular memory access instrumentation will read from the corresponding shadow address. Due to the way memory addresses are converted to shadow addresses, bogus pointers (e.g. NULL) can generate shadow addresses out of the bounds of allocated shadow memory. For example, with KASAN_GENERIC and 48-bit VAs, NULL would have a shadow address of dfff800000000000, which falls between the TTBR ranges. To make such cases easier to debug, this patch makes die_kernel_fault() dump the real memory address range for any potential KASAN shadow access using kasan_non_canonical_hook(), which results in fault information as below when KASAN is enabled: | Unable to handle kernel paging request at virtual address dfff800000000017 | KASAN: null-ptr-deref in range [0x00000000000000b8-0x00000000000000bf] | Mem abort info: | ESR = 0x96000004 | EC = 0x25: DABT (current EL), IL = 32 bits | SET = 0, FnV = 0 | EA = 0, S1PTW = 0 | FSC = 0x04: level 0 translation fault | Data abort info: | ISV = 0, ISS = 0x00000004 | CM = 0, WnR = 0 | [dfff800000000017] address between user and kernel address ranges Signed-off-by: Mark Rutland Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Will Deacon Tested-by: Andrey Konovalov Acked-by: Will Deacon Link: https://lore.kernel.org/r/20211207183226.834557-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b7b9caa41bc7..9a9e7675b187 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -297,6 +297,8 @@ static void die_kernel_fault(const char *msg, unsigned long addr, pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, addr); + kasan_non_canonical_hook(addr); + mem_abort_decode(esr); show_pte(addr); -- cgit v1.2.3-58-ga151 From c2c529b27ceb394ff4d3273ed1f552195fc4d555 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 6 Dec 2021 12:47:12 +0000 Subject: arm64: remove __dma_*_area() aliases The __dma_inv_area() and __dma_clean_area() aliases make cache.S harder to navigate, but don't gain us anything in practice. For clarity, let's remove them along with their redundant comments. The only users are __dma_map_area() and __dma_unmap_area(), which need to be position independent, and can call __pi_dcache_inval_poc() and __pi_dcache_clean_poc() directly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Acked-by: Catalin Marinas Acked-by: Mark Brown Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20211206124715.4101571-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/cache.S | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 5051b3c1a4f1..7d0563db4201 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -140,15 +140,7 @@ SYM_FUNC_END(dcache_clean_pou) * - start - kernel start address of region * - end - kernel end address of region */ -SYM_FUNC_START_LOCAL(__dma_inv_area) SYM_FUNC_START_PI(dcache_inval_poc) - /* FALLTHROUGH */ - -/* - * __dma_inv_area(start, end) - * - start - virtual start address of region - * - end - virtual end address of region - */ dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? @@ -167,7 +159,6 @@ SYM_FUNC_START_PI(dcache_inval_poc) dsb sy ret SYM_FUNC_END_PI(dcache_inval_poc) -SYM_FUNC_END(__dma_inv_area) /* * dcache_clean_poc(start, end) @@ -178,19 +169,10 @@ SYM_FUNC_END(__dma_inv_area) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START_LOCAL(__dma_clean_area) SYM_FUNC_START_PI(dcache_clean_poc) - /* FALLTHROUGH */ - -/* - * __dma_clean_area(start, end) - * - start - virtual start address of region - * - end - virtual end address of region - */ dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(dcache_clean_poc) -SYM_FUNC_END(__dma_clean_area) /* * dcache_clean_pop(start, end) @@ -232,8 +214,8 @@ SYM_FUNC_END_PI(__dma_flush_area) SYM_FUNC_START_PI(__dma_map_area) add x1, x0, x1 cmp w2, #DMA_FROM_DEVICE - b.eq __dma_inv_area - b __dma_clean_area + b.eq __pi_dcache_inval_poc + b __pi_dcache_clean_poc SYM_FUNC_END_PI(__dma_map_area) /* @@ -245,6 +227,6 @@ SYM_FUNC_END_PI(__dma_map_area) SYM_FUNC_START_PI(__dma_unmap_area) add x1, x0, x1 cmp w2, #DMA_TO_DEVICE - b.ne __dma_inv_area + b.ne __pi_dcache_inval_poc ret SYM_FUNC_END_PI(__dma_unmap_area) -- cgit v1.2.3-58-ga151