summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Google) <rostedt@goodmis.org>2024-06-07 09:48:33 -0400
committerSteven Rostedt (Google) <rostedt@goodmis.org>2024-06-10 18:08:23 -0400
commit4267fda4afd9fcf1f92b6bfa0259295f140c8ecd (patch)
tree223f2050786c60711d59f1a6614603d2754a25b1 /kernel/trace
parent2f6b884dfcc55065b76d2bf1e2424b93991ae92d (diff)
function_graph: Make fgraph_update_pid_func() a stub for !DYNAMIC_FTRACE
When CONFIG_DYNAMIC_FTRACE is not set, the function fgraph_update_pid_func() doesn't do anything. Currently, most of its logic is within a "#ifdef CONFIG_DYNAMIC_FTRACE" block, but its variables were declared outside that, and when DYNAMIC_FTRACE is not set, it produces unused variable warnings. Instead, just place it (and the helper function fgraph_pid_func()) within the #ifdef block and have the header file use a empty stub function for when DYNAMIC_FTRACE is not defined. Link: https://lore.kernel.org/linux-trace-kernel/20240607094833.6a787d73@rorschach.local.home Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202406071806.BRjaC5FF-lkp@intel.com/ Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/fgraph.c4
-rw-r--r--kernel/trace/ftrace_internal.h4
2 files changed, 6 insertions, 2 deletions
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index a13551a023aa..63d0c2f84ce1 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -1138,6 +1138,7 @@ void ftrace_graph_exit_task(struct task_struct *t)
kfree(ret_stack);
}
+#ifdef CONFIG_DYNAMIC_FTRACE
static int fgraph_pid_func(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
@@ -1164,7 +1165,6 @@ void fgraph_update_pid_func(void)
if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
return;
-#ifdef CONFIG_DYNAMIC_FTRACE
list_for_each_entry(op, &graph_ops.subop_list, list) {
if (op->flags & FTRACE_OPS_FL_PID) {
gops = container_of(op, struct fgraph_ops, ops);
@@ -1174,8 +1174,8 @@ void fgraph_update_pid_func(void)
static_call_update(fgraph_func, gops->entryfunc);
}
}
-#endif
}
+#endif
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h
index 4bb1e881154a..3235470e61b3 100644
--- a/kernel/trace/ftrace_internal.h
+++ b/kernel/trace/ftrace_internal.h
@@ -52,7 +52,11 @@ static inline int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_o
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern int ftrace_graph_active;
+# ifdef CONFIG_DYNAMIC_FTRACE
extern void fgraph_update_pid_func(void);
+# else
+static inline void fgraph_update_pid_func(void) {}
+# endif
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
# define ftrace_graph_active 0
static inline void fgraph_update_pid_func(void) {}