summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-03-14 17:07:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-03-14 17:07:54 -0700
commit29db00c252c7d0e015f3b436647b6f87d5854833 (patch)
tree62cbe636e5d2ca14a69abcfd46a86670d27442f7
parented38ff164fba98a21993c95d240013f32309ccab (diff)
parentc2679254b9c9980d9045f0f722cf093a2b1f7590 (diff)
Merge tag 'trace-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Do not allow histogram values to have modifies. They can cause a NULL pointer dereference if they do. - Warn if hist_field_name() is passed a NULL. Prevent the NULL pointer dereference mentioned above. - Fix invalid address look up race in lookup_rec() - Define ftrace_stub_graph conditionally to prevent linker errors - Always check if RCU is watching at all tracepoint locations * tag 'trace-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Make tracepoint lockdep check actually test something ftrace,kcfi: Define ftrace_stub_graph conditionally ftrace: Fix invalid address access in lookup_rec() when index is 0 tracing: Check field value in hist_field_name() tracing: Do not let histogram values have some modifiers
-rw-r--r--arch/x86/kernel/ftrace_64.S2
-rw-r--r--include/linux/tracepoint.h15
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/trace_events_hist.c12
4 files changed, 22 insertions, 10 deletions
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 1265ad519249..fb4f1e01b64a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub)
RET
SYM_FUNC_END(ftrace_stub)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_TYPED_FUNC_START(ftrace_stub_graph)
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(ftrace_stub_graph)
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e299f29375bb..6811e43c1b5c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -242,12 +242,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*
- * When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on. However,
- * don't check if the condition is false, due to interaction with idle
- * instrumentation. This lets us find RCU issues triggered with tracepoints
- * even when this tracepoint is off. This code has no purpose other than
- * poking RCU a bit.
+ * When lockdep is enabled, we make sure to always test if RCU is
+ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
+ * require RCU to be active, and it should always warn at the tracepoint
+ * site if it is not watching, as it will need to be active when the
+ * tracepoint is enabled.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
extern int __traceiter_##name(data_proto); \
@@ -260,9 +259,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
TP_ARGS(args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
- rcu_read_lock_sched_notrace(); \
- rcu_dereference_sched(__tracepoint_##name.funcs);\
- rcu_read_unlock_sched_notrace(); \
+ WARN_ON_ONCE(!rcu_is_watching()); \
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 29baa97d0d53..9b2803c7a18f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
- if (end < pg->records[0].ip ||
+ if (pg->index == 0 ||
+ end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 89877a18f933..486cca3c2b75 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field,
{
const char *field_name = "";
+ if (WARN_ON_ONCE(!field))
+ return field_name;
+
if (level > 1)
return field_name;
@@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
goto out;
}
+ /* Some types cannot be a value */
+ if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+ HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+ HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+ HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
+ hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+ ret = -EINVAL;
+ }
+
hist_data->fields[val_idx] = hist_field;
++hist_data->n_vals;