diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 15:27:48 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 15:27:48 -0800 |
commit | 8bf1a529cd664c8e5268381f1e24fe67aa611dd3 (patch) | |
tree | c5cec84941923778e4e2ec5a5d65e2fc8ea71b58 /kernel/trace | |
parent | b327dfe05258e09c8db6e1e091c2e6d84dd426a6 (diff) | |
parent | d54170812ef1c80e0fa3ed3e554a0bbfc2920d9d (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
- Support for arm64 SME 2 and 2.1. SME2 introduces a new 512-bit
architectural register (ZT0, for the look-up table feature) that
Linux needs to save/restore
- Include TPIDR2 in the signal context and add the corresponding
kselftests
- Perf updates: Arm SPEv1.2 support, HiSilicon uncore PMU updates, ACPI
support to the Marvell DDR and TAD PMU drivers, reset DTM_PMU_CONFIG
(ARM CMN) at probe time
- Support for DYNAMIC_FTRACE_WITH_CALL_OPS on arm64
- Permit EFI boot with MMU and caches on. Instead of cleaning the
entire loaded kernel image to the PoC and disabling the MMU and
caches before branching to the kernel bare metal entry point, leave
the MMU and caches enabled and rely on EFI's cacheable 1:1 mapping of
all of system RAM to populate the initial page tables
- Expose the AArch32 (compat) ELF_HWCAP features to user in an arm64
kernel (the arm32 kernel only defines the values)
- Harden the arm64 shadow call stack pointer handling: stash the shadow
stack pointer in the task struct on interrupt, load it directly from
this structure
- Signal handling cleanups to remove redundant validation of size
information and avoid reading the same data from userspace twice
- Refactor the hwcap macros to make use of the automatically generated
ID registers. It should make new hwcaps writing less error prone
- Further arm64 sysreg conversion and some fixes
- arm64 kselftest fixes and improvements
- Pointer authentication cleanups: don't sign leaf functions, unify
asm-arch manipulation
- Pseudo-NMI code generation optimisations
- Minor fixes for SME and TPIDR2 handling
- Miscellaneous updates: ARCH_FORCE_MAX_ORDER is now selectable,
replace strtobool() to kstrtobool() in the cpufeature.c code, apply
dynamic shadow call stack in two passes, intercept pfn changes in
set_pte_at() without the required break-before-make sequence, attempt
to dump all instructions on unhandled kernel faults
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (130 commits)
arm64: fix .idmap.text assertion for large kernels
kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests
kselftest/arm64: Copy whole EXTRA context
arm64: kprobes: Drop ID map text from kprobes blacklist
perf: arm_spe: Print the version of SPE detected
perf: arm_spe: Add support for SPEv1.2 inverted event filtering
perf: Add perf_event_attr::config3
arm64/sme: Fix __finalise_el2 SMEver check
drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable
arm64/signal: Only read new data when parsing the ZT context
arm64/signal: Only read new data when parsing the ZA context
arm64/signal: Only read new data when parsing the SVE context
arm64/signal: Avoid rereading context frame sizes
arm64/signal: Make interface for restore_fpsimd_context() consistent
arm64/signal: Remove redundant size validation from parse_user_sigframe()
arm64/signal: Don't redundantly verify FPSIMD magic
arm64/cpufeature: Use helper macros to specify hwcaps
arm64/cpufeature: Always use symbolic name for feature value in hwcaps
arm64/sysreg: Initial unsigned annotations for ID registers
arm64/sysreg: Initial annotation of signed ID registers
...
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 7 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 109 |
2 files changed, 111 insertions, 5 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d7043043f59c..caf32389faf3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -42,6 +42,9 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS bool +config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS + bool + config HAVE_DYNAMIC_FTRACE_WITH_ARGS bool help @@ -257,6 +260,10 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS depends on DYNAMIC_FTRACE_WITH_REGS depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +config DYNAMIC_FTRACE_WITH_CALL_OPS + def_bool y + depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS + config DYNAMIC_FTRACE_WITH_ARGS def_bool y depends on DYNAMIC_FTRACE diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 750aa3f08b25..51896b610414 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -125,6 +125,33 @@ struct ftrace_ops global_ops; void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +/* + * Stub used to invoke the list ops without requiring a separate trampoline. + */ +const struct ftrace_ops ftrace_list_ops = { + .func = ftrace_ops_list_func, + .flags = FTRACE_OPS_FL_STUB, +}; + +static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, + struct ftrace_regs *fregs) +{ + /* do nothing */ +} + +/* + * Stub used when a call site is disabled. May be called transiently by threads + * which have made it into ftrace_caller but haven't yet recovered the ops at + * the point the call site is disabled. + */ +const struct ftrace_ops ftrace_nop_ops = { + .func = ftrace_ops_nop_func, + .flags = FTRACE_OPS_FL_STUB, +}; +#endif + static inline void ftrace_ops_init(struct ftrace_ops *ops) { #ifdef CONFIG_DYNAMIC_FTRACE @@ -1819,6 +1846,18 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, * if rec count is zero. */ } + + /* + * If the rec has a single associated ops, and ops->func can be + * called directly, allow the call site to call via the ops. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && + ftrace_rec_count(rec) == 1 && + ftrace_ops_get_func(ops) == ops->func) + rec->flags |= FTRACE_FL_CALL_OPS; + else + rec->flags &= ~FTRACE_FL_CALL_OPS; + count++; /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ @@ -2113,8 +2152,9 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) struct ftrace_ops *ops = NULL; pr_info("ftrace record flags: %lx\n", rec->flags); - pr_cont(" (%ld)%s", ftrace_rec_count(rec), - rec->flags & FTRACE_FL_REGS ? " R" : " "); + pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), + rec->flags & FTRACE_FL_REGS ? " R" : " ", + rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); if (rec->flags & FTRACE_FL_TRAMP_EN) { ops = ftrace_find_tramp_ops_any(rec); if (ops) { @@ -2182,6 +2222,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) * want the direct enabled (it will be done via the * direct helper). But if DIRECT_EN is set, and * the count is not one, we need to clear it. + * */ if (ftrace_rec_count(rec) == 1) { if (!(rec->flags & FTRACE_FL_DIRECT) != @@ -2190,6 +2231,19 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) } else if (rec->flags & FTRACE_FL_DIRECT_EN) { flag |= FTRACE_FL_DIRECT; } + + /* + * Ops calls are special, as count matters. + * As with direct calls, they must only be enabled when count + * is one, otherwise they'll be handled via the list ops. + */ + if (ftrace_rec_count(rec) == 1) { + if (!(rec->flags & FTRACE_FL_CALL_OPS) != + !(rec->flags & FTRACE_FL_CALL_OPS_EN)) + flag |= FTRACE_FL_CALL_OPS; + } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + flag |= FTRACE_FL_CALL_OPS; + } } /* If the state of this record hasn't changed, then do nothing */ @@ -2234,6 +2288,21 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) rec->flags &= ~FTRACE_FL_DIRECT_EN; } } + + if (flag & FTRACE_FL_CALL_OPS) { + if (ftrace_rec_count(rec) == 1) { + if (rec->flags & FTRACE_FL_CALL_OPS) + rec->flags |= FTRACE_FL_CALL_OPS_EN; + else + rec->flags &= ~FTRACE_FL_CALL_OPS_EN; + } else { + /* + * Can only call directly if there's + * only one set of associated ops. + */ + rec->flags &= ~FTRACE_FL_CALL_OPS_EN; + } + } } /* @@ -2263,7 +2332,8 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) * and REGS states. The _EN flags must be disabled though. */ rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | - FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); + FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | + FTRACE_FL_CALL_OPS_EN); } ftrace_bug_type = FTRACE_BUG_NOP; @@ -2436,6 +2506,25 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) return NULL; } +struct ftrace_ops * +ftrace_find_unique_ops(struct dyn_ftrace *rec) +{ + struct ftrace_ops *op, *found = NULL; + unsigned long ip = rec->ip; + + do_for_each_ftrace_op(op, ftrace_ops_list) { + + if (hash_contains_ip(ip, op->func_hash)) { + if (found) + return NULL; + found = op; + } + + } while_for_each_ftrace_op(op); + + return found; +} + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* Protected by rcu_tasks for reading, and direct_mutex for writing */ static struct ftrace_hash *direct_functions = EMPTY_HASH; @@ -3785,11 +3874,12 @@ static int t_show(struct seq_file *m, void *v) if (iter->flags & FTRACE_ITER_ENABLED) { struct ftrace_ops *ops; - seq_printf(m, " (%ld)%s%s%s", + seq_printf(m, " (%ld)%s%s%s%s", ftrace_rec_count(rec), rec->flags & FTRACE_FL_REGS ? " R" : " ", rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", - rec->flags & FTRACE_FL_DIRECT ? " D" : " "); + rec->flags & FTRACE_FL_DIRECT ? " D" : " ", + rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); if (rec->flags & FTRACE_FL_TRAMP_EN) { ops = ftrace_find_tramp_ops_any(rec); if (ops) { @@ -3805,6 +3895,15 @@ static int t_show(struct seq_file *m, void *v) } else { add_trampoline_func(m, NULL, rec); } + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + if (ops) { + seq_printf(m, "\tops: %pS (%pS)", + ops, ops->func); + } else { + seq_puts(m, "\tops: ERROR!"); + } + } if (rec->flags & FTRACE_FL_DIRECT) { unsigned long direct; |