diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-08-02 15:23:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-08-02 15:23:27 -0700 |
commit | a507f25d1c2048c136f6834f10966510b62af987 (patch) | |
tree | 775deabf9627d6b69c1d30b0323bf0727f0453ee /arch | |
parent | 9100fc5ae83e64d99fd3300104893ef0e0b0aadb (diff) | |
parent | d8bb6718c4db9bcd075dde7ff55d46091ccfae15 (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- Update the compat layer to allow single-byte watchpoints on all
addresses (similar to the native support)
- arm_pmu: fix the restoration of the counters on the
CPU_PM_ENTER_FAILED path
- Fix build regression with vDSO and Makefile not stripping
CROSS_COMPILE_COMPAT
- Fix the CTR_EL0 (cache type register) sanitisation on heterogeneous
machines (e.g. big.LITTLE)
- Fix the interrupt controller priority mask value when pseudo-NMIs are
enabled
- arm64 kprobes fixes: recovering of the PSTATE.D flag in the
single-step exception handler, NOKPROBE annotations for
unwind_frame() and walk_stackframe(), remove unneeded
rcu_read_lock/unlock from debug handlers
- Several gcc fall-through warnings
- Unused variable warnings
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: Make debug exception handlers visible from RCU
arm64: kprobes: Recover pstate.D in single-step exception handler
arm64/mm: fix variable 'tag' set but not used
arm64/mm: fix variable 'pud' set but not used
arm64: Remove unneeded rcu_read_lock from debug handlers
arm64: unwind: Prohibit probing on return_address()
arm64: Lower priority mask for GIC_PRIO_IRQON
arm64/efi: fix variable 'si' set but not used
arm64: cpufeature: Fix feature comparison for CTR_EL0.{CWG,ERG}
arm64: vdso: Fix Makefile regression
arm64: module: Mark expected switch fall-through
arm64: smp: Mark expected switch fall-through
arm64: hw_breakpoint: Fix warnings about implicit fallthrough
drivers/perf: arm_pmu: Fix failure path in PM notifier
arm64: compat: Allow single-byte watchpoints on all addresses
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/arch_gicv3.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/daifflags.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/efi.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/ptrace.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/debug-monitors.c | 14 | ||||
-rw-r--r-- | arch/arm64/kernel/hw_breakpoint.c | 11 | ||||
-rw-r--r-- | arch/arm64/kernel/module.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/probes/kprobes.c | 40 | ||||
-rw-r--r-- | arch/arm64/kernel/return_address.c | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 3 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 57 |
17 files changed, 116 insertions, 65 deletions
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index bb1f1dbb34e8..61de992bbea3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y) ifeq ($(CONFIG_CC_IS_CLANG), y) $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built) - else ifeq ($(CROSS_COMPILE_COMPAT),) + else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),) $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built) else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),) $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT) diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 79155a8cfe7c..89e4c8b79349 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void) BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)); BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); + /* + * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared + * and non-secure PMR accesses are not subject to the shifts that + * are applied to IRQ priorities + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 407e2bf23676..c96ffa4722d3 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -35,9 +35,10 @@ */ enum ftr_type { - FTR_EXACT, /* Use a predefined safe value */ - FTR_LOWER_SAFE, /* Smaller value is safe */ - FTR_HIGHER_SAFE,/* Bigger value is safe */ + FTR_EXACT, /* Use a predefined safe value */ + FTR_LOWER_SAFE, /* Smaller value is safe */ + FTR_HIGHER_SAFE, /* Bigger value is safe */ + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ }; #define FTR_STRICT true /* SANITY check strict matching required */ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 987926ed535e..063c964af705 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -13,6 +13,8 @@ #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) +#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) + /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 8e79ce9c3f5c..76a144702586 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) #define alloc_screen_info(x...) &screen_info -#define free_screen_info(x...) + +static inline void free_screen_info(efi_system_table_t *sys_table_arg, + struct screen_info *si) +{ +} /* redeclare as 'hidden' so the compiler will generate relative references */ extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b7ba75809751..fb04f10a78ab 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -210,7 +210,11 @@ extern u64 vabits_user; #define __tag_reset(addr) untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else -#define __tag_set(addr, tag) (addr) +static inline const void *__tag_set(const void *addr, u8 tag) +{ + return addr; +} + #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 #endif @@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - unsigned long __addr_tag = \ - __tag_set(__addr, page_kasan_tag(page)); \ + const void *__addr_tag = \ + __tag_set((void *)__addr, page_kasan_tag(page)); \ ((void *)__addr_tag); \ }) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3f5461f7b560..5fdcfe237338 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -447,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, PMD_TYPE_SECT) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 -#define pud_sect(pud) (0) -#define pud_table(pud) (1) +static inline bool pud_sect(pud_t pud) { return false; } +static inline bool pud_table(pud_t pud) { return true; } #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index b1dd039023ef..1dcf63a9ac1f 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -30,7 +30,7 @@ * in the the priority mask, it indicates that PSR.I should be set and * interrupt disabling temporarily does not rely on IRQ priorities. */ -#define GIC_PRIO_IRQON 0xc0 +#define GIC_PRIO_IRQON 0xe0 #define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) #define GIC_PRIO_PSR_I_SET (1 << 4) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f29f36a65175..d19d14ba9ae4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), /* * Linux can handle differing I-cache policies. Userspace JITs will @@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_LOWER_SAFE: ret = new < cur ? new : cur; break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (!cur || !new) + break; + /* Fallthrough */ case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index f8719bd30850..48222a4760c2 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; - rcu_read_lock(); - + /* + * Since single-step exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ list_for_each_entry_rcu(hook, list, node) { retval = hook->fn(regs, esr); if (retval == DBG_HOOK_HANDLED) break; } - rcu_read_unlock(); - return retval; } NOKPROBE_SYMBOL(call_step_hook); @@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; - rcu_read_lock(); + /* + * Since brk exception disables interrupt, this function is + * entirely not preemptible, and we can use rcu list safely here. + */ list_for_each_entry_rcu(hook, list, node) { unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~hook->mask) == hook->imm) fn = hook->fn; } - rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index dceb84520948..38ee1514cd9c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + + /* Fallthrough */ + case 3: + /* Allow single byte watchpoint. */ + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; + + /* Fallthrough */ default: return -EINVAL; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 46e643e30708..03ff15bffbb6 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; + /* Fall through */ case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index bd5dfffca272..c4452827419b 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -21,6 +21,7 @@ #include <asm/ptrace.h> #include <asm/cacheflush.h> #include <asm/debug-monitors.h> +#include <asm/daifflags.h> #include <asm/system_misc.h> #include <asm/insn.h> #include <linux/uaccess.h> @@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p) } /* - * When PSTATE.D is set (masked), then software step exceptions can not be - * generated. - * SPSR's D bit shows the value of PSTATE.D immediately before the - * exception was taken. PSTATE.D is set while entering into any exception - * mode, however software clears it for any normal (none-debug-exception) - * mode in the exception entry. Therefore, when we are entering into kprobe - * breakpoint handler from any normal mode then SPSR.D bit is already - * cleared, however it is set when we are entering from any debug exception - * mode. - * Since we always need to generate single step exception after a kprobe - * breakpoint exception therefore we need to clear it unconditionally, when - * we become sure that the current breakpoint exception is for kprobe. - */ -static void __kprobes -spsr_set_debug_flag(struct pt_regs *regs, int mask) -{ - unsigned long spsr = regs->pstate; - - if (mask) - spsr |= PSR_D_BIT; - else - spsr &= ~PSR_D_BIT; - - regs->pstate = spsr; -} - -/* * Interrupts need to be disabled before single-step mode is set, and not * reenabled until after single-step mode ends. * Without disabling interrupt on local CPU, there is a chance of @@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask) static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - kcb->saved_irqflag = regs->pstate; + kcb->saved_irqflag = regs->pstate & DAIF_MASK; regs->pstate |= PSR_I_BIT; + /* Unmask PSTATE.D for enabling software step exceptions. */ + regs->pstate &= ~PSR_D_BIT; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - if (kcb->saved_irqflag & PSR_I_BIT) - regs->pstate |= PSR_I_BIT; - else - regs->pstate &= ~PSR_I_BIT; + regs->pstate &= ~DAIF_MASK; + regs->pstate |= kcb->saved_irqflag; } static void __kprobes @@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, set_ss_context(kcb, slot); /* mark pending ss */ - spsr_set_debug_flag(regs, 0); - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); kernel_enable_single_step(regs); diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index c4ae647d2306..a5e8b3b9d798 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> @@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d) return 0; } } +NOKPROBE_SYMBOL(save_return_addr); void *return_address(unsigned int level) { @@ -49,3 +51,4 @@ void *return_address(unsigned int level) return NULL; } EXPORT_SYMBOL_GPL(return_address); +NOKPROBE_SYMBOL(return_address); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ea90d3bd9253..018a33e01b0e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: died during early boot\n", cpu); break; } - /* Fall through */ pr_crit("CPU%u: may not have shut down cleanly\n", cpu); + /* Fall through */ case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 2b160ae594eb..a336cb124320 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> @@ -111,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } +NOKPROBE_SYMBOL(unwind_frame); void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) @@ -125,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } +NOKPROBE_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9568c116ac7f..cfd65b63f36f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +static void debug_exception_enter(struct pt_regs *regs) +{ + /* + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were + * already disabled to preserve the last enabled/disabled addresses. + */ + if (interrupts_enabled(regs)) + trace_hardirqs_off(); + + if (user_mode(regs)) { + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + } else { + /* + * We might have interrupted pretty much anything. In + * fact, if we're a debug exception, we can even interrupt + * NMI processing. We don't want this code makes in_nmi() + * to return true, but we need to notify RCU. + */ + rcu_nmi_enter(); + } + + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); +} +NOKPROBE_SYMBOL(debug_exception_enter); + +static void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); + + if (!user_mode(regs)) + rcu_nmi_exit(); + + if (interrupts_enabled(regs)) + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(debug_exception_exit); + #ifdef CONFIG_ARM64_ERRATUM_1463225 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, if (cortex_a76_erratum_1463225_debug_handler(regs)) return; - /* - * Tell lockdep we disabled irqs in entry.S. Do nothing if they were - * already disabled to preserve the last enabled/disabled addresses. - */ - if (interrupts_enabled(regs)) - trace_hardirqs_off(); + debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); @@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, inf->sig, inf->code, (void __user *)pc, esr); } - if (interrupts_enabled(regs)) - trace_hardirqs_on(); + debug_exception_exit(regs); } NOKPROBE_SYMBOL(do_debug_exception); |