diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-01 20:05:19 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-01 20:05:19 -0700 |
commit | 79ef0c00142519bc34e1341447f3797436cc48bf (patch) | |
tree | 200c1ada8b4f31b79ec8a67939ca5fbcd0339958 /arch | |
parent | d54f486035fd89f14845a7f34a97a3f5da4e70f2 (diff) | |
parent | feea69ec121f067073868cebe0cb9d003e64ad80 (diff) |
Merge tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
- kprobes: Restructured stack unwinder to show properly on x86 when a
stack dump happens from a kretprobe callback.
- Fix to bootconfig parsing
- Have tracefs allow owner and group permissions by default (only
denying others). There's been pressure to allow non root to tracefs
in a controlled fashion, and using groups is probably the safest.
- Bootconfig memory managament updates.
- Bootconfig clean up to have the tools directory be less dependent on
changes in the kernel tree.
- Allow perf to be traced by function tracer.
- Rewrite of function graph tracer to be a callback from the function
tracer instead of having its own trampoline (this change will happen
on an arch by arch basis, and currently only x86_64 implements it).
- Allow multiple direct trampolines (bpf hooks to functions) be batched
together in one synchronization.
- Allow histogram triggers to add variables that can perform
calculations against the event's fields.
- Use the linker to determine architecture callbacks from the ftrace
trampoline to allow for proper parameter prototypes and prevent
warnings from the compiler.
- Extend histogram triggers to key off of variables.
- Have trace recursion use bit magic to determine preempt context over
if branches.
- Have trace recursion disable preemption as all use cases do anyway.
- Added testing for verification of tracing utilities.
- Various small clean ups and fixes.
* tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (101 commits)
tracing/histogram: Fix semicolon.cocci warnings
tracing/histogram: Fix documentation inline emphasis warning
tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
tracing: Show size of requested perf buffer
bootconfig: Initialize ret in xbc_parse_tree()
ftrace: do CPU checking after preemption disabled
ftrace: disable preemption when recursion locked
tracing/histogram: Document expression arithmetic and constants
tracing/histogram: Optimize division by a power of 2
tracing/histogram: Covert expr to const if both operands are constants
tracing/histogram: Simplify handling of .sym-offset in expressions
tracing: Fix operator precedence for hist triggers expression
tracing: Add division and multiplication support for hist triggers
tracing: Add support for creating hist trigger variables from literal
selftests/ftrace: Stop tracing while reading the trace file by default
MAINTAINERS: Update KPROBES and TRACING entries
test_kprobes: Move it from kernel/ to lib/
docs, kprobes: Remove invalid URL and add new reference
samples/kretprobes: Fix return value if register_kretprobe() failed
lib/bootconfig: Fix the xbc_get_info kerneldoc
...
Diffstat (limited to 'arch')
66 files changed, 358 insertions, 303 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index dcd84e3ae7d4..26b8ed11639d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -191,6 +191,14 @@ config HAVE_OPTPROBES config HAVE_KPROBES_ON_FTRACE bool +config ARCH_CORRECT_STACKTRACE_ON_KRETPROBE + bool + help + Since kretprobes modifies return address on the stack, the + stacktrace may see the kretprobe trampoline address instead + of correct one. If the architecture stacktrace code and + unwinder can adjust such entries, select this configuration. + config HAVE_FUNCTION_ERROR_INJECTION bool diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 2134721dce44..de1566e32cb8 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -46,7 +46,7 @@ struct kprobe_ctlblk { }; int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void trap_is_kprobe(unsigned long address, struct pt_regs *regs); #else #define trap_is_kprobe(address, regs) diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 4c3c9be5bd16..cca8d6583e31 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -149,6 +149,11 @@ static inline long regs_return_value(struct pt_regs *regs) return (long)regs->r0; } +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + instruction_pointer(regs) = val; +} #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PTRACE_H */ diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 5f0415fc7328..e71d64119d71 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -363,8 +363,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, static void __used kretprobe_trampoline_holder(void) { - __asm__ __volatile__(".global kretprobe_trampoline\n" - "kretprobe_trampoline:\n" "nop\n"); + __asm__ __volatile__(".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n" + "nop\n"); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, @@ -375,13 +376,13 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->blink = (unsigned long)&kretprobe_trampoline; + regs->blink = (unsigned long)&__kretprobe_trampoline; } static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + regs->ret = __kretprobe_trampoline_handler(regs, NULL); /* By returning a non zero value, we are telling the kprobe handler * that we don't want the post_handler to run @@ -390,7 +391,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, } static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; @@ -402,7 +403,7 @@ int __init arch_init_kprobes(void) int __kprobes arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline) return 1; return 0; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c056c0e834e..5d3b030e78e3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -3,6 +3,7 @@ config ARM bool default y select ARCH_32BIT_OFF_T + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index 2d76a2e29f05..8f54f9ad8a9b 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -3,6 +3,7 @@ #define __ASM_STACKTRACE_H #include <asm/ptrace.h> +#include <linux/llist.h> struct stackframe { /* @@ -13,6 +14,10 @@ struct stackframe { unsigned long sp; unsigned long lr; unsigned long pc; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; + struct task_struct *tsk; +#endif }; static __always_inline @@ -22,6 +27,10 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) frame->sp = regs->ARM_sp; frame->lr = regs->ARM_lr; frame->pc = regs->ARM_pc; +#ifdef CONFIG_KRETPROBES + frame->kr_cur = NULL; + frame->tsk = current; +#endif } extern int unwind_frame(struct stackframe *frame); diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 3c83b5d29697..a006585e1c09 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -193,11 +193,6 @@ int ftrace_make_nop(struct module *mod, return ret; } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 7b42ac010fdf..00c11579406c 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -42,6 +42,10 @@ void *return_address(unsigned int level) frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)return_address; +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = current; +#endif walk_stackframe(&frame, save_return_addr, &data); diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 76ea4178a55c..75e905508f27 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/stacktrace.h> @@ -54,8 +55,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = frame->fp; frame->fp = *(unsigned long *)(fp); - frame->pc = frame->lr; - frame->lr = *(unsigned long *)(fp + 4); + frame->pc = *(unsigned long *)(fp + 4); #else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) @@ -66,6 +66,11 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(frame->tsk, + (void *)frame->fp, &frame->kr_cur); +#endif return 0; } @@ -157,6 +162,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk, frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)__save_stack_trace; } +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = tsk; +#endif walk_stackframe(&frame, save_trace, &data); } @@ -174,6 +183,10 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; frame.pc = regs->ARM_pc; +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = current; +#endif walk_stackframe(&frame, save_trace, &data); } diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 9d8634e2f12f..9090c3a74dcc 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -11,6 +11,8 @@ * Copyright (C) 2007 Marvell Ltd. */ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> @@ -278,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected.\n"); + pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); fallthrough; default: @@ -366,19 +368,41 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, /* * When a retprobed function returns, trampoline_handler() is called, * calling the kretprobe's handler. We construct a struct pt_regs to - * give a view of registers r0-r11 to the user return-handler. This is - * not a complete pt_regs structure, but that should be plenty sufficient - * for kretprobe handlers which should normally be interested in r0 only - * anyway. + * give a view of registers r0-r11, sp, lr, and pc to the user + * return-handler. This is not a complete pt_regs structure, but that + * should be enough for stacktrace from the return handler with or + * without pt_regs. */ -void __naked __kprobes kretprobe_trampoline(void) +void __naked __kprobes __kretprobe_trampoline(void) { __asm__ __volatile__ ( +#ifdef CONFIG_FRAME_POINTER + "ldr lr, =__kretprobe_trampoline \n\t" + /* __kretprobe_trampoline makes a framepointer on pt_regs. */ +#ifdef CONFIG_CC_IS_CLANG + "stmdb sp, {sp, lr, pc} \n\t" + "sub sp, sp, #12 \n\t" + /* In clang case, pt_regs->ip = lr. */ + "stmdb sp!, {r0 - r11, lr} \n\t" + /* fp points regs->r11 (fp) */ + "add fp, sp, #44 \n\t" +#else /* !CONFIG_CC_IS_CLANG */ + /* In gcc case, pt_regs->ip = fp. */ + "stmdb sp, {fp, sp, lr, pc} \n\t" + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" + /* fp points regs->r15 (pc) */ + "add fp, sp, #60 \n\t" +#endif /* CONFIG_CC_IS_CLANG */ +#else /* !CONFIG_FRAME_POINTER */ + "sub sp, sp, #16 \n\t" "stmdb sp!, {r0 - r11} \n\t" +#endif /* CONFIG_FRAME_POINTER */ "mov r0, sp \n\t" "bl trampoline_handler \n\t" "mov lr, r0 \n\t" "ldmia sp!, {r0 - r11} \n\t" + "add sp, sp, #16 \n\t" #ifdef CONFIG_THUMB2_KERNEL "bx lr \n\t" #else @@ -387,11 +411,10 @@ void __naked __kprobes kretprobe_trampoline(void) : : : "memory"); } -/* Called from kretprobe_trampoline */ +/* Called from __kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, - (void *)regs->ARM_fp); + return (void *)kretprobe_trampoline_handler(regs, (void *)regs->ARM_fp); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, @@ -401,7 +424,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = (void *)regs->ARM_fp; /* Replace the return addr with trampoline addr. */ - regs->ARM_lr = (unsigned long)&kretprobe_trampoline; + regs->ARM_lr = (unsigned long)&__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index c78180172120..dbef34ed933f 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist, } int arch_within_optimized_kprobe(struct optimized_kprobe *op, - unsigned long addr) + kprobe_opcode_t *addr) { - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); + return (op->kp.addr <= addr && + op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr); + } void arch_remove_optimized_kprobe(struct optimized_kprobe *op) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dde7e9f28d0c..3a00dfb0711e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,7 @@ config ARM64 select ACPI_PPTT if ACPI select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 5d38ff4a4806..05cd82eeca13 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -39,7 +39,7 @@ void arch_remove_kprobe(struct kprobe *); int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 8aebc00c1718..a4e046ef4568 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/types.h> +#include <linux/llist.h> #include <asm/memory.h> #include <asm/ptrace.h> @@ -59,6 +60,9 @@ struct stackframe { #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph; #endif +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; +#endif }; extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 7f467bd9db7a..fc62dfe73f93 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -236,11 +236,6 @@ void arch_ftrace_update_code(int command) command |= FTRACE_MAY_SLEEP; ftrace_modify_all_code(command); } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 6dbcc89f6662..d9dfa82c1f18 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -7,6 +7,9 @@ * Copyright (C) 2013 Linaro Limited. * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> */ + +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/extable.h> #include <linux/kasan.h> #include <linux/kernel.h> @@ -218,7 +221,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected.\n"); + pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); break; @@ -398,18 +401,17 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, - (void *)kernel_stack_pointer(regs)); + return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; - ri->fp = (void *)kernel_stack_pointer(regs); + ri->fp = (void *)regs->regs[29]; /* replace return addr (x30) with trampoline */ - regs->regs[30] = (long)&kretprobe_trampoline; + regs->regs[30] = (long)&__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S index 288a84e253cc..9a6499bed58b 100644 --- a/arch/arm64/kernel/probes/kprobes_trampoline.S +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S @@ -61,11 +61,14 @@ ldp x28, x29, [sp, #S_X28] .endm -SYM_CODE_START(kretprobe_trampoline) +SYM_CODE_START(__kretprobe_trampoline) sub sp, sp, #PT_REGS_SIZE save_all_base_regs + /* Setup a frame pointer. */ + add x29, sp, #S_FP + mov x0, sp bl trampoline_probe_handler /* @@ -74,9 +77,10 @@ SYM_CODE_START(kretprobe_trampoline) */ mov lr, x0 + /* The frame pointer (x29) is restored with other registers. */ restore_all_base_regs add sp, sp, #PT_REGS_SIZE ret -SYM_CODE_END(kretprobe_trampoline) +SYM_CODE_END(__kretprobe_trampoline) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 8982a2b78acf..c30624fff6ac 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -41,6 +41,9 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame->graph = 0; #endif +#ifdef CONFIG_KRETPROBES + frame->kr_cur = NULL; +#endif /* * Prime the first unwind. @@ -129,6 +132,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ret_stack->ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); +#endif frame->pc = ptrauth_strip_insn_pac(frame->pc); diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h index b647bbde4d6d..55267cbf5204 100644 --- a/arch/csky/include/asm/kprobes.h +++ b/arch/csky/include/asm/kprobes.h @@ -41,7 +41,7 @@ void arch_remove_kprobe(struct kprobe *p); int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); int kprobe_breakpoint_handler(struct pt_regs *regs); int kprobe_single_step_handler(struct pt_regs *regs); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c index b4a7ec1517ff..50bfcf129078 100644 --- a/arch/csky/kernel/ftrace.c +++ b/arch/csky/kernel/ftrace.c @@ -133,11 +133,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) (unsigned long)func, true, true); return ret; } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c index ef2bb9bd9605..834cffcfbce3 100644 --- a/arch/csky/kernel/probes/ftrace.c +++ b/arch/csky/kernel/probes/ftrace.c @@ -2,13 +2,6 @@ #include <linux/kprobes.h> -int arch_check_ftrace_location(struct kprobe *p) -{ - if (ftrace_location((unsigned long)p->addr)) - p->flags |= KPROBE_FLAG_FTRACE; - return 0; -} - /* Ftrace callback handler for kprobes -- called under preepmt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) @@ -24,7 +17,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (!p) { p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); @@ -64,7 +56,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c index 8fffa34d4e1c..42920f25e73c 100644 --- a/arch/csky/kernel/probes/kprobes.c +++ b/arch/csky/kernel/probes/kprobes.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kprobes.h> #include <linux/extable.h> #include <linux/slab.h> @@ -77,10 +79,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long probe_addr = (unsigned long)p->addr; - if (probe_addr & 0x1) { - pr_warn("Address not aligned.\n"); - return -EINVAL; - } + if (probe_addr & 0x1) + return -EILSEQ; /* copy instruction */ p->opcode = le32_to_cpu(*p->addr); @@ -225,7 +225,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected.\n"); + pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); break; @@ -386,7 +386,7 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + return (void *)kretprobe_trampoline_handler(regs, NULL); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, @@ -394,7 +394,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, { ri->ret_addr = (kprobe_opcode_t *)regs->lr; ri->fp = NULL; - regs->lr = (unsigned long) &kretprobe_trampoline; + regs->lr = (unsigned long) &__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S index b1fe3af24f03..ba48ad04a847 100644 --- a/arch/csky/kernel/probes/kprobes_trampoline.S +++ b/arch/csky/kernel/probes/kprobes_trampoline.S @@ -4,7 +4,7 @@ #include <abi/entry.h> -ENTRY(kretprobe_trampoline) +ENTRY(__kretprobe_trampoline) SAVE_REGS_FTRACE mov a0, sp /* pt_regs */ @@ -16,4 +16,4 @@ ENTRY(kretprobe_trampoline) RESTORE_REGS_FTRACE rts -ENDPROC(kretprobe_trampoline) +ENDPROC(__kretprobe_trampoline) diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 08179135905c..8a2d0f72b324 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -51,6 +51,11 @@ * the canonical representation by adding to instruction pointer. */ # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) +# define instruction_pointer_set(regs, val) \ +({ \ + ia64_psr(regs)->ri = (val & 0xf); \ + regs->cr_iip = (val & ~0xfULL); \ +}) static inline unsigned long user_stack_pointer(struct pt_regs *regs) { diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index b2ab2d58fb30..d6360fd404ab 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c @@ -194,9 +194,3 @@ int ftrace_update_ftrace_func(ftrace_func_t func) flush_icache_range(addr, addr + 16); return 0; } - -/* run from kstop_machine */ -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 441ed04b1037..1a7bab1c5d7c 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, __this_cpu_write(current_kprobe, p); } -static void kretprobe_trampoline(void) +void __kretprobe_trampoline(void) { } int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); + regs->cr_iip = __kretprobe_trampoline_handler(regs, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -414,7 +414,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; + regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline); } /* Check the instruction in the slot is break */ @@ -890,11 +890,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -unsigned long arch_deref_entry_point(void *entry) -{ - return ((struct fnptr *)entry)->ip; -} - static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; @@ -902,14 +897,14 @@ static struct kprobe trampoline_p = { int __init arch_init_kprobes(void) { trampoline_p.addr = - (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; + dereference_function_descriptor(__kretprobe_trampoline); return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == - (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) + dereference_function_descriptor(__kretprobe_trampoline)) return 1; return 0; diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index 224eea40e1ee..188749d62709 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -163,11 +163,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ret; } -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 75bff0f77319..6c7f3b143fdc 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -11,6 +11,8 @@ * Copyright (C) IBM Corporation, 2002, 2004 */ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kprobes.h> #include <linux/preempt.h> #include <linux/uaccess.h> @@ -80,8 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) insn = p->addr[0]; if (insn_has_ll_or_sc(insn)) { - pr_notice("Kprobes for ll and sc instructions are not" - "supported\n"); + pr_notice("Kprobes for ll and sc instructions are not supported\n"); ret = -EINVAL; goto out; } @@ -219,7 +220,7 @@ static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, return 0; unaligned: - pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); + pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm); force_sig(SIGBUS); return -EFAULT; @@ -238,10 +239,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, regs->cp0_epc = (unsigned long)p->addr; else if (insn_has_delayslot(p->opcode)) { ret = evaluate_branch_instruction(p, regs, kcb); - if (ret < 0) { - pr_notice("Kprobes: Error in evaluating branch\n"); + if (ret < 0) return; - } } regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; } @@ -461,14 +460,14 @@ static void __used kretprobe_trampoline_holder(void) /* Keep the assembler from reordering and placing JR here. */ ".set noreorder\n\t" "nop\n\t" - ".global kretprobe_trampoline\n" - "kretprobe_trampoline:\n\t" + ".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" "nop\n\t" ".set pop" : : : "memory"); } -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) @@ -477,7 +476,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->regs[31] = (unsigned long)kretprobe_trampoline; + regs->regs[31] = (unsigned long)__kretprobe_trampoline; } /* @@ -486,8 +485,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, - kretprobe_trampoline, NULL); + instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -498,14 +496,14 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, int __kprobes arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) return 1; return 0; } static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *)kretprobe_trampoline, + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index d55b73b18149..711bc8cd186d 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -84,11 +84,6 @@ void _ftrace_caller(unsigned long parent_ip) /* restore all state needed by the compiler epilogue */ } -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - static unsigned long gen_sethi_insn(unsigned long addr) { unsigned long opcode = 0x46000000; diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 2a1f826b3def..4d392e4ed358 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -93,12 +93,6 @@ int ftrace_disable_ftrace_graph_caller(void) #endif #ifdef CONFIG_DYNAMIC_FTRACE - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - int ftrace_update_ftrace_func(ftrace_func_t func) { ftrace_func = func; @@ -217,7 +211,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -246,7 +239,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, } __this_cpu_write(current_kprobe, NULL); out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 6d21a515eea5..e2bdb5a5f93e 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -175,7 +175,7 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) return 1; } -static inline void kretprobe_trampoline(void) +void __kretprobe_trampoline(void) { asm volatile("nop"); asm volatile("nop"); @@ -193,7 +193,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, { unsigned long orig_ret_address; - orig_ret_address = __kretprobe_trampoline_handler(regs, trampoline_p.addr, NULL); + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); instruction_pointer_set(regs, orig_ret_address); return 1; @@ -217,6 +217,6 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) int __init arch_init_kprobes(void) { trampoline_p.addr = (kprobe_opcode_t *) - dereference_function_descriptor(kretprobe_trampoline); + dereference_function_descriptor(__kretprobe_trampoline); return register_kprobe(&trampoline_p); } diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 4fc0e15e23a5..bab364152b29 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -51,7 +51,7 @@ extern kprobe_opcode_t optprobe_template_end[]; #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); /* Architecture specific copy of original instruction */ diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 7154d58338cc..072ebe7f290b 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -26,7 +26,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, return; regs = ftrace_get_regs(fregs); - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -61,7 +60,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7a7cd6bda53e..86d77ff056a6 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -237,7 +237,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)kretprobe_trampoline; + regs->link = (unsigned long)__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); @@ -403,12 +403,12 @@ NOKPROBE_SYMBOL(kprobe_handler); * - When the probed function returns, this probe * causes the handlers to fire */ -asm(".global kretprobe_trampoline\n" - ".type kretprobe_trampoline, @function\n" - "kretprobe_trampoline:\n" +asm(".global __kretprobe_trampoline\n" + ".type __kretprobe_trampoline, @function\n" + "__kretprobe_trampoline:\n" "nop\n" "blr\n" - ".size kretprobe_trampoline, .-kretprobe_trampoline\n"); + ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"); /* * Called when the probe at kretprobe trampoline is hit @@ -417,7 +417,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { unsigned long orig_ret_address; - orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); /* * We get here through one of two paths: * 1. by taking a trap -> kprobe_handler() -> here @@ -427,7 +427,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * as it is used to determine the return address from the trap. * For (2), since nip is not honoured with optprobes, we instead setup * the link register properly so that the subsequent 'blr' in - * kretprobe_trampoline jumps back to the right instruction. + * __kretprobe_trampoline jumps back to the right instruction. * * For nip, we should set the address to the previous instruction since * we end up emulating it in kprobe_handler(), which increments the nip @@ -542,19 +542,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) } NOKPROBE_SYMBOL(kprobe_fault_handler); -unsigned long arch_deref_entry_point(void *entry) -{ -#ifdef PPC64_ELF_ABI_v1 - if (!kernel_text_address((unsigned long)entry)) - return ppc_global_function_entry(entry); - else -#endif - return (unsigned long)entry; -} -NOKPROBE_SYMBOL(arch_deref_entry_point); - static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; @@ -565,7 +554,7 @@ int __init arch_init_kprobes(void) int arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) return 1; return 0; diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index c79899abcec8..ce1903064031 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p) * has a 'nop' instruction, which can be emulated. * So further checks can be skipped. */ - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) return addr + sizeof(kprobe_opcode_t); /* @@ -301,8 +301,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_li } } -int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) +int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr) { - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); + return (op->kp.addr <= addr && + op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr); } diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 9e4a4a7af380..a2443d61728e 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -155,7 +155,7 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum * Mark stacktraces with kretprobed functions on them * as unreliable. */ - if (ip == (unsigned long)kretprobe_trampoline) + if (ip == (unsigned long)__kretprobe_trampoline) return -EINVAL; #endif diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 9ea9b5ec3113..217ef89f22b9 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -40,7 +40,7 @@ void arch_remove_kprobe(struct kprobe *p); int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); bool kprobe_breakpoint_handler(struct pt_regs *regs); bool kprobe_single_step_handler(struct pt_regs *regs); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 7f1e5203de88..4716f4cdc038 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -154,11 +154,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c index aab85a82f419..7142ec42e889 100644 --- a/arch/riscv/kernel/probes/ftrace.c +++ b/arch/riscv/kernel/probes/ftrace.c @@ -15,7 +15,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -52,7 +51,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 00088dc6da4b..e6e950b7cf32 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kprobes.h> #include <linux/extable.h> #include <linux/slab.h> @@ -50,11 +52,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long probe_addr = (unsigned long)p->addr; - if (probe_addr & 0x1) { - pr_warn("Address not aligned.\n"); - - return -EINVAL; - } + if (probe_addr & 0x1) + return -EILSEQ; /* copy instruction */ p->opcode = *p->addr; @@ -191,7 +190,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected.\n"); + pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); break; @@ -348,7 +347,7 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + return (void *)kretprobe_trampoline_handler(regs, NULL); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, @@ -356,7 +355,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, { ri->ret_addr = (kprobe_opcode_t *)regs->ra; ri->fp = NULL; - regs->ra = (unsigned long) &kretprobe_trampoline; + regs->ra = (unsigned long) &__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/kprobes_trampoline.S index 6e85d021e2a2..7bdb09ded39b 100644 --- a/arch/riscv/kernel/probes/kprobes_trampoline.S +++ b/arch/riscv/kernel/probes/kprobes_trampoline.S @@ -75,7 +75,7 @@ REG_L x31, PT_T6(sp) .endm -ENTRY(kretprobe_trampoline) +ENTRY(__kretprobe_trampoline) addi sp, sp, -(PT_SIZE_ON_STACK) save_all_base_regs @@ -90,4 +90,4 @@ ENTRY(kretprobe_trampoline) addi sp, sp, PT_SIZE_ON_STACK ret -ENDPROC(kretprobe_trampoline) +ENDPROC(__kretprobe_trampoline) diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 09cdb632a490..5eb722c984e4 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -70,7 +70,7 @@ struct kprobe_ctlblk { }; void arch_remove_kprobe(struct kprobe *p); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); int kprobe_fault_handler(struct pt_regs *regs, int trapnr); int kprobe_exceptions_notify(struct notifier_block *self, diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 1d94ffdf347b..5165bf344f95 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -262,11 +262,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return 0; } -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - void arch_ftrace_update_code(int command) { if (ftrace_shared_hotpatch_trampoline(NULL)) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 52d056a5f89f..c505c0ee5f47 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -7,6 +7,8 @@ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> */ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/moduleloader.h> #include <linux/kprobes.h> #include <linux/ptrace.h> @@ -240,7 +242,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->gprs[14] = (unsigned long) &kretprobe_trampoline; + regs->gprs[14] = (unsigned long) &__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); @@ -259,7 +261,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) * is a BUG. The code path resides in the .kprobes.text * section and is executed with interrupts disabled. */ - pr_err("Invalid kprobe detected.\n"); + pr_err("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); } @@ -332,8 +334,8 @@ NOKPROBE_SYMBOL(kprobe_handler); */ static void __used kretprobe_trampoline_holder(void) { - asm volatile(".global kretprobe_trampoline\n" - "kretprobe_trampoline: bcr 0,0\n"); + asm volatile(".global __kretprobe_trampoline\n" + "__kretprobe_trampoline: bcr 0,0\n"); } /* @@ -341,7 +343,7 @@ static void __used kretprobe_trampoline_holder(void) */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + regs->psw.addr = __kretprobe_trampoline_handler(regs, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -507,7 +509,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, NOKPROBE_SYMBOL(kprobe_exceptions_notify); static struct kprobe trampoline = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; @@ -518,6 +520,6 @@ int __init arch_init_kprobes(void) int arch_trampoline_kprobe(struct kprobe *p) { - return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; + return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_trampoline_kprobe); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 101477b3e263..b7bb1981e9ee 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -46,7 +46,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * Mark stacktraces with kretprobed functions on them * as unreliable. */ - if (state.ip == (unsigned long)kretprobe_trampoline) + if (state.ip == (unsigned long)__kretprobe_trampoline) return -EINVAL; #endif diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index a03b6680a9d9..ca05c99a3d5b 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c @@ -115,6 +115,9 @@ void __stack_chk_fail(void) void ftrace_stub(void) { } +void arch_ftrace_ops_list_func(void) +{ +} #define stackalign 4 diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 6171682f7798..eeba83e0a7d2 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -26,7 +26,7 @@ typedef insn_size_t kprobe_opcode_t; struct kprobe; void arch_remove_kprobe(struct kprobe *); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 295c43315bbe..930001bb8c6a 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -252,11 +252,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(rec->ip, old, new); } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 1c7f358ef0be..aed1ea8e2c2f 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -207,7 +207,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->pr = (unsigned long)kretprobe_trampoline; + regs->pr = (unsigned long)__kretprobe_trampoline; } static int __kprobes kprobe_handler(struct pt_regs *regs) @@ -293,17 +293,17 @@ no_kprobe: */ static void __used kretprobe_trampoline_holder(void) { - asm volatile (".globl kretprobe_trampoline\n" - "kretprobe_trampoline:\n\t" + asm volatile (".globl __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" "nop\n"); } /* - * Called when we hit the probe point at kretprobe_trampoline + * Called when we hit the probe point at __kretprobe_trampoline */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + regs->pc = __kretprobe_trampoline_handler(regs, NULL); return 1; } @@ -442,7 +442,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, } static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *)&kretprobe_trampoline, + .addr = (kprobe_opcode_t *)&__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index bfcaa6326c20..06c2bc767ef7 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -24,7 +24,7 @@ do { flushi(&(p)->ainsn.insn[0]); \ flushi(&(p)->ainsn.insn[1]); \ } while (0) -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 684b84ce397f..eaead3da8e03 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -82,11 +82,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) new = ftrace_call_replace(ip, (unsigned long)func); return ftrace_modify_code(ip, old, new); } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 4c05a4ee6a0e..535c7b35cb59 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -440,7 +440,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* Replace the return addr with trampoline addr */ regs->u_regs[UREG_RETPC] = - ((unsigned long)kretprobe_trampoline) - 8; + ((unsigned long)__kretprobe_trampoline) - 8; } /* @@ -451,7 +451,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, { unsigned long orig_ret_address = 0; - orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; @@ -465,13 +465,13 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, static void __used kretprobe_trampoline_holder(void) { - asm volatile(".global kretprobe_trampoline\n" - "kretprobe_trampoline:\n" + asm volatile(".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n" "\tnop\n" "\tnop\n"); } static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; @@ -482,7 +482,7 @@ int __init arch_init_kprobes(void) int __kprobes arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) return 1; return 0; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5ddfbbe5c51e..b1d4b481fcdd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -61,6 +61,7 @@ config X86 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT + select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM) select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG @@ -198,7 +199,7 @@ config X86 select HAVE_FAST_GUP select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_TRACER if X86_32 || (X86_64 && DYNAMIC_FTRACE) select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9f3130f40807..024d9797646e 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -57,6 +57,13 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs) #define ftrace_instruction_pointer_set(fregs, _ip) \ do { (fregs)->regs.ip = (_ip); } while (0) + +struct ftrace_ops; +#define ftrace_graph_func ftrace_graph_func +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#else +#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR #endif #ifdef CONFIG_DYNAMIC_FTRACE @@ -65,8 +72,6 @@ struct dyn_arch_ftrace { /* No extra data needed for x86 */ }; -#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR - #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index bd7f5886a789..71ea2eab43d5 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -49,7 +49,6 @@ extern __visible kprobe_opcode_t optprobe_template_end[]; extern const int kretprobe_blacklist_size; void arch_remove_kprobe(struct kprobe *p); -asmlinkage void kretprobe_trampoline(void); extern void arch_kprobe_override_function(struct pt_regs *regs); diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 70fc159ebe69..2a1f8734416d 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> @@ -15,6 +16,9 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; +#endif bool error; #if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; @@ -99,6 +103,31 @@ void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {} #endif +static inline +unsigned long unwind_recover_kretprobe(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ +#ifdef CONFIG_KRETPROBES + return is_kretprobe_trampoline(addr) ? + kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) : + addr; +#else + return addr; +#endif +} + +/* Recover the return address modified by kretprobe and ftrace_graph. */ +static inline +unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ + unsigned long ret; + + ret = ftrace_graph_ret_addr(state->task, &state->graph_idx, + addr, addr_p); + return unwind_recover_kretprobe(state, ret, addr_p); +} + /* * This disables KASAN checking when reading a value from another task's stack, * since the other task could be running on another CPU and could have poisoned diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 8e574c0afef8..8b33674288ea 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -52,6 +52,11 @@ UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC .endm +#else + +#define UNWIND_HINT_FUNC \ + UNWIND_HINT(ORC_REG_SP, 8, UNWIND_HINT_TYPE_FUNC, 0) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_UNWIND_HINTS_H */ diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1b3ce3b4a2a2..c39f906cdc4e 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -252,11 +252,6 @@ void arch_ftrace_update_code(int command) ftrace_modify_all_code(command); } -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - /* Currently only x86_64 supports dynamic trampolines */ #ifdef CONFIG_X86_64 @@ -527,7 +522,7 @@ static void *addr_from_call(void *ptr) return ptr + CALL_INSN_SIZE + call.disp; } -void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, +void prepare_ftrace_return(unsigned long ip, unsigned long *parent, unsigned long frame_pointer); /* @@ -541,7 +536,8 @@ static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) void *ptr; if (ops && ops->trampoline) { -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \ + defined(CONFIG_FUNCTION_GRAPH_TRACER) /* * We only know about function graph tracer setting as static * trampoline. @@ -589,8 +585,9 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) #ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_DYNAMIC_FTRACE -extern void ftrace_graph_call(void); +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +extern void ftrace_graph_call(void); static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); @@ -618,19 +615,28 @@ int ftrace_disable_ftrace_graph_caller(void) return ftrace_mod_jmp(ip, &ftrace_stub); } +#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ +int ftrace_enable_ftrace_graph_caller(void) +{ + return 0; +} +int ftrace_disable_ftrace_graph_caller(void) +{ + return 0; +} +#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ #endif /* !CONFIG_DYNAMIC_FTRACE */ /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, +void prepare_ftrace_return(unsigned long ip, unsigned long *parent, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; - unsigned long old; - int faulted; + int bit; /* * When resuming from suspend-to-ram, this function can be indirectly @@ -650,37 +656,25 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - /* - * Protect against fault, even if it shouldn't - * happen. This tool is too much intrusive to - * ignore such a protection. - */ - asm volatile( - "1: " _ASM_MOV " (%[parent]), %[old]\n" - "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" - " movl $0, %[faulted]\n" - "3:\n" - - ".section .fixup, \"ax\"\n" - "4: movl $1, %[faulted]\n" - " jmp 3b\n" - ".previous\n" - - _ASM_EXTABLE(1b, 4b) - _ASM_EXTABLE(2b, 4b) - - : [old] "=&r" (old), [faulted] "=r" (faulted) - : [parent] "r" (parent), [return_hooker] "r" (return_hooker) - : "memory" - ); - - if (unlikely(faulted)) { - ftrace_graph_stop(); - WARN_ON(1); + bit = ftrace_test_recursion_trylock(ip, *parent); + if (bit < 0) return; - } - if (function_graph_enter(old, self_addr, frame_pointer, parent)) - *parent = old; + if (!function_graph_enter(*parent, ip, frame_pointer, parent)) + *parent = return_hooker; + + ftrace_test_recursion_unlock(bit); } + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + struct pt_regs *regs = &fregs->regs; + unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs); + + prepare_ftrace_return(ip, (unsigned long *)stack, 0); +} +#endif + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 7c273846c687..7a879901f103 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -174,11 +174,6 @@ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) SYM_FUNC_END(ftrace_caller); SYM_FUNC_START(ftrace_epilogue) -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) - jmp ftrace_stub -#endif - /* * This is weak to keep gas from relaxing the jumps. * It is also used to copy the retq for trampolines. @@ -251,7 +246,6 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) * If ORIG_RAX is anything but zero, make this a call to that. * See arch_ftrace_set_direct_caller(). */ - movq ORIG_RAX(%rsp), %rax testq %rax, %rax SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL) jnz 1f @@ -289,15 +283,6 @@ SYM_FUNC_START(__fentry__) cmpq $ftrace_stub, ftrace_trace_function jnz trace -fgraph_trace: -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - cmpq $ftrace_stub, ftrace_graph_return - jnz ftrace_graph_caller - - cmpq $ftrace_graph_entry_stub, ftrace_graph_entry - jnz ftrace_graph_caller -#endif - SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) retq @@ -315,25 +300,12 @@ trace: CALL_NOSPEC r8 restore_mcount_regs - jmp fgraph_trace + jmp ftrace_stub SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -SYM_FUNC_START(ftrace_graph_caller) - /* Saves rbp into %rdx and fills first parameter */ - save_mcount_regs - - leaq MCOUNT_REG_SIZE+8(%rsp), %rsi - movq $0, %rdx /* No framepointers needed */ - call prepare_ftrace_return - - restore_mcount_regs - - retq -SYM_FUNC_END(ftrace_graph_caller) - SYM_FUNC_START(return_to_handler) subq $24, %rsp diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index b6e046e4b289..fce99e249d61 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -809,7 +809,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) ri->fp = sara; /* Replace the return addr with trampoline addr */ - *sara = (unsigned long) &kretprobe_trampoline; + *sara = (unsigned long) &__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); @@ -1019,52 +1019,91 @@ NOKPROBE_SYMBOL(kprobe_int3_handler); */ asm( ".text\n" - ".global kretprobe_trampoline\n" - ".type kretprobe_trampoline, @function\n" - "kretprobe_trampoline:\n" - /* We don't bother saving the ss register */ + ".global __kretprobe_trampoline\n" + ".type __kretprobe_trampoline, @function\n" + "__kretprobe_trampoline:\n" #ifdef CONFIG_X86_64 + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushq $__kretprobe_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 8', this will be fixed later. */ " pushq %rsp\n" " pushfq\n" SAVE_REGS_STRING " movq %rsp, %rdi\n" " call trampoline_handler\n" - /* Replace saved sp with true return address. */ - " movq %rax, 19*8(%rsp)\n" RESTORE_REGS_STRING + /* In trampoline_handler(), 'regs->flags' is copied to 'regs->sp'. */ + " addq $8, %rsp\n" " popfq\n" #else + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushl $__kretprobe_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 4', this will be fixed later. */ " pushl %esp\n" " pushfl\n" SAVE_REGS_STRING " movl %esp, %eax\n" " call trampoline_handler\n" - /* Replace saved sp with true return address. */ - " movl %eax, 15*4(%esp)\n" RESTORE_REGS_STRING + /* In trampoline_handler(), 'regs->flags' is copied to 'regs->sp'. */ + " addl $4, %esp\n" " popfl\n" #endif " ret\n" - ".size kretprobe_trampoline, .-kretprobe_trampoline\n" + ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n" ); -NOKPROBE_SYMBOL(kretprobe_trampoline); -STACK_FRAME_NON_STANDARD(kretprobe_trampoline); +NOKPROBE_SYMBOL(__kretprobe_trampoline); +/* + * __kretprobe_trampoline() skips updating frame pointer. The frame pointer + * saved in trampoline_handler() points to the real caller function's + * frame pointer. Thus the __kretprobe_trampoline() doesn't have a + * standard stack frame with CONFIG_FRAME_POINTER=y. + * Let's mark it non-standard function. Anyway, FP unwinder can correctly + * unwind without the hint. + */ +STACK_FRAME_NON_STANDARD_FP(__kretprobe_trampoline); + +/* This is called from kretprobe_trampoline_handler(). */ +void arch_kretprobe_fixup_return(struct pt_regs *regs, + kprobe_opcode_t *correct_ret_addr) +{ + unsigned long *frame_pointer = ®s->sp + 1; + /* Replace fake return address with real one. */ + *frame_pointer = (unsigned long)correct_ret_addr; +} /* - * Called from kretprobe_trampoline + * Called from __kretprobe_trampoline */ -__used __visible void *trampoline_handler(struct pt_regs *regs) +__used __visible void trampoline_handler(struct pt_regs *regs) { + unsigned long *frame_pointer; + /* fixup registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 regs->gs = 0; #endif - regs->ip = (unsigned long)&kretprobe_trampoline; + regs->ip = (unsigned long)&__kretprobe_trampoline; regs->orig_ax = ~0UL; + regs->sp += sizeof(long); + frame_pointer = ®s->sp + 1; - return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp); + /* + * The return address at 'frame_pointer' is recovered by the + * arch_kretprobe_fixup_return() which called from the + * kretprobe_trampoline_handler(). + */ + kretprobe_trampoline_handler(regs, frame_pointer); + + /* + * Copy FLAGS to 'pt_regs::sp' so that __kretprobe_trapmoline() + * can do RET right after POPF. + */ + regs->sp = regs->flags; } NOKPROBE_SYMBOL(trampoline_handler); diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 596de2f6d3a5..dd2ec14adb77 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,7 +25,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; @@ -59,7 +58,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, NULL); } out: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 71425ebba98a..b4a54a52aa59 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -367,10 +367,10 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) /* Check the addr is within the optimized instructions. */ int arch_within_optimized_kprobe(struct optimized_kprobe *op, - unsigned long addr) + kprobe_opcode_t *addr) { - return ((unsigned long)op->kp.addr <= addr && - (unsigned long)op->kp.addr + op->optinsn.size > addr); + return (op->kp.addr <= addr && + op->kp.addr + op->optinsn.size > addr); } /* Free optimized instruction slot */ diff --git a/arch/x86/kernel/trace.c b/arch/x86/kernel/trace.c index 6b73b6f92ad3..8322e8352777 100644 --- a/arch/x86/kernel/trace.c +++ b/arch/x86/kernel/trace.c @@ -231,4 +231,4 @@ void osnoise_arch_unregister(void) unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer"); unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL); } -#endif /* CONFIG_OSNOISE_TRAECR && CONFIG_X86_LOCAL_APIC */ +#endif /* CONFIG_OSNOISE_TRACER && CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index d7c44b257f7f..8e1c50c86e5d 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -240,8 +240,7 @@ static bool update_stack_state(struct unwind_state *state, else { addr_p = unwind_get_return_address_ptr(state); addr = READ_ONCE_TASK_STACK(state->task, *addr_p); - state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, addr_p); + state->ip = unwind_recover_ret_addr(state, addr, addr_p); } /* Save the original stack pointer for unwind_dump(): */ diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index c49f10ffd8cd..884d68a6e714 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c @@ -15,8 +15,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state) addr = READ_ONCE_NOCHECK(*state->sp); - return ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, state->sp); + return unwind_recover_ret_addr(state, addr, state->sp); } EXPORT_SYMBOL_GPL(unwind_get_return_address); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a1202536fc57..e6f7592790af 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -534,9 +534,8 @@ bool unwind_next_frame(struct unwind_state *state) if (!deref_stack_reg(state, ip_p, &state->ip)) goto err; - state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - state->ip, (void *)ip_p); - + state->ip = unwind_recover_ret_addr(state, state->ip, + (unsigned long *)ip_p); state->sp = sp; state->regs = NULL; state->prev_regs = NULL; @@ -549,7 +548,18 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } - + /* + * There is a small chance to interrupt at the entry of + * __kretprobe_trampoline() where the ORC info doesn't exist. + * That point is right after the RET to __kretprobe_trampoline() + * which was modified return address. + * At that point, the @addr_p of the unwind_recover_kretprobe() + * (this has to point the address of the stack entry storing + * the modified return address) must be "SP - (a stack entry)" + * because SP is incremented by the RET. + */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long))); state->regs = (struct pt_regs *)sp; state->prev_regs = NULL; state->full_regs = true; @@ -562,6 +572,9 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } + /* See UNWIND_HINT_TYPE_REGS case comment. */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long))); if (state->full_regs) state->prev_regs = state->regs; |