From 9288f99aa52d90a5b82573c4b769c97c55af2f56 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 7 Oct 2009 19:17:45 -0400 Subject: this_cpu: Use this_cpu_xx for ftrace this_cpu_xx can reduce the instruction count here and also avoid address arithmetic. Signed-off-by: Christoph Lameter Acked-by: Ingo Molnar Signed-off-by: Tejun Heo --- kernel/trace/trace.c | 8 ++++---- kernel/trace/trace.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..8439cdcada94 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) */ static int tracing_disabled = 1; -DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); +DEFINE_PER_CPU(int, ftrace_cpu_disabled); static inline void ftrace_disable_cpu(void) { preempt_disable(); - local_inc(&__get_cpu_var(ftrace_cpu_disabled)); + __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); } static inline void ftrace_enable_cpu(void) { - local_dec(&__get_cpu_var(ftrace_cpu_disabled)); + __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); preempt_enable(); } @@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, struct ftrace_entry *entry; /* If we are reading the ring buffer, don't trace */ - if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) + if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) return; event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 405cb850b75d..542f45554883 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -413,7 +413,7 @@ extern int DYN_FTRACE_TEST_NAME(void); extern int ring_buffer_expanded; extern bool tracing_selftest_disabled; -DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); +DECLARE_PER_CPU(int, ftrace_cpu_disabled); #ifdef CONFIG_FTRACE_STARTUP_TEST extern int trace_selftest_startup_function(struct tracer *trace, -- cgit v1.2.3-58-ga151 From dec54bf538326a1503dd780c9f2811f495af95c5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 Oct 2009 23:23:02 +0900 Subject: this_cpu: Use this_cpu_xx in trace_functions_graph.c ftrace_cpu_disabled usage in trace_functions_graph.c were left out during this_cpu_xx conversion in commit 9288f99a causing compile failure. Convert them. Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell Cc: Christoph Lameter --- kernel/trace/trace_functions_graph.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 45e6c01b2e4d..90a6daa10962 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -176,7 +176,7 @@ static int __trace_graph_entry(struct trace_array *tr, struct ring_buffer *buffer = tr->buffer; struct ftrace_graph_ent_entry *entry; - if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) + if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) return 0; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, @@ -240,7 +240,7 @@ static void __trace_graph_return(struct trace_array *tr, struct ring_buffer *buffer = tr->buffer; struct ftrace_graph_ret_entry *entry; - if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) + if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) return; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, -- cgit v1.2.3-58-ga151 From 9705f69ed0a5ef593f45e618bcb3cbfdbf391f64 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 29 Oct 2009 22:34:13 +0900 Subject: percpu: make percpu symbols in tracer unique This patch updates percpu related symbols in kernel tracer such that percpu symbols are unique and don't clash with local symbols. This serves two purposes of decreasing the possibility of global percpu symbol collision and allowing dropping per_cpu__ prefix from percpu symbols. * kernel/trace/trace.c: s/max_data/max_tr_data/ * kernel/trace/trace_hw_branches: s/tracer/hwb_tracer/, s/buffer/hwb_buffer/ Partly based on Rusty Russell's "alloc_percpu: rename percpu vars which cause name clashes" patch. Signed-off-by: Tejun Heo Acked-by: Steven Rostedt Cc: Rusty Russell Cc: Frederic Weisbecker Cc: Ingo Molnar --- kernel/trace/trace.c | 4 ++-- kernel/trace/trace_hw_branches.c | 51 ++++++++++++++++++++-------------------- 2 files changed, 28 insertions(+), 27 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8439cdcada94..85a5ed70b5b2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) */ static struct trace_array max_tr; -static DEFINE_PER_CPU(struct trace_array_cpu, max_data); +static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); /* tracer_enabled is used to toggle activation of a tracer */ static int tracer_enabled = 1; @@ -4426,7 +4426,7 @@ __init static int tracer_alloc_buffers(void) /* Allocate the first page for all buffers */ for_each_tracing_cpu(i) { global_trace.data[i] = &per_cpu(global_trace_cpu, i); - max_tr.data[i] = &per_cpu(max_data, i); + max_tr.data[i] = &per_cpu(max_tr_data, i); } trace_init_cmdlines(); diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 23b63859130e..adaf7a39d0dc 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c @@ -20,10 +20,10 @@ #define BTS_BUFFER_SIZE (1 << 13) -static DEFINE_PER_CPU(struct bts_tracer *, tracer); -static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); +static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); +static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); -#define this_tracer per_cpu(tracer, smp_processor_id()) +#define this_tracer per_cpu(hwb_tracer, smp_processor_id()) static int trace_hw_branches_enabled __read_mostly; static int trace_hw_branches_suspended __read_mostly; @@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly; static void bts_trace_init_cpu(int cpu) { - per_cpu(tracer, cpu) = - ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, - NULL, (size_t)-1, BTS_KERNEL); + per_cpu(hwb_tracer, cpu) = + ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), + BTS_BUFFER_SIZE, NULL, (size_t)-1, + BTS_KERNEL); - if (IS_ERR(per_cpu(tracer, cpu))) - per_cpu(tracer, cpu) = NULL; + if (IS_ERR(per_cpu(hwb_tracer, cpu))) + per_cpu(hwb_tracer, cpu) = NULL; } static int bts_trace_init(struct trace_array *tr) @@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr) for_each_online_cpu(cpu) { bts_trace_init_cpu(cpu); - if (likely(per_cpu(tracer, cpu))) + if (likely(per_cpu(hwb_tracer, cpu))) trace_hw_branches_enabled = 1; } trace_hw_branches_suspended = 0; @@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr) get_online_cpus(); for_each_online_cpu(cpu) { - if (likely(per_cpu(tracer, cpu))) { - ds_release_bts(per_cpu(tracer, cpu)); - per_cpu(tracer, cpu) = NULL; + if (likely(per_cpu(hwb_tracer, cpu))) { + ds_release_bts(per_cpu(hwb_tracer, cpu)); + per_cpu(hwb_tracer, cpu) = NULL; } } trace_hw_branches_enabled = 0; @@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr) get_online_cpus(); for_each_online_cpu(cpu) - if (likely(per_cpu(tracer, cpu))) - ds_resume_bts(per_cpu(tracer, cpu)); + if (likely(per_cpu(hwb_tracer, cpu))) + ds_resume_bts(per_cpu(hwb_tracer, cpu)); trace_hw_branches_suspended = 0; put_online_cpus(); } @@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr) get_online_cpus(); for_each_online_cpu(cpu) - if (likely(per_cpu(tracer, cpu))) - ds_suspend_bts(per_cpu(tracer, cpu)); + if (likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); trace_hw_branches_suspended = 1; put_online_cpus(); } @@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, bts_trace_init_cpu(cpu); if (trace_hw_branches_suspended && - likely(per_cpu(tracer, cpu))) - ds_suspend_bts(per_cpu(tracer, cpu)); + likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); } break; case CPU_DOWN_PREPARE: /* The notification is sent with interrupts enabled. */ - if (likely(per_cpu(tracer, cpu))) { - ds_release_bts(per_cpu(tracer, cpu)); - per_cpu(tracer, cpu) = NULL; + if (likely(per_cpu(hwb_tracer, cpu))) { + ds_release_bts(per_cpu(hwb_tracer, cpu)); + per_cpu(hwb_tracer, cpu) = NULL; } } @@ -256,8 +257,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) get_online_cpus(); for_each_online_cpu(cpu) - if (likely(per_cpu(tracer, cpu))) - ds_suspend_bts(per_cpu(tracer, cpu)); + if (likely(per_cpu(hwb_tracer, cpu))) + ds_suspend_bts(per_cpu(hwb_tracer, cpu)); /* * We need to collect the trace on the respective cpu since ftrace * implicitly adds the record for the current cpu. @@ -266,8 +267,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) on_each_cpu(trace_bts_cpu, iter->tr, 1); for_each_online_cpu(cpu) - if (likely(per_cpu(tracer, cpu))) - ds_resume_bts(per_cpu(tracer, cpu)); + if (likely(per_cpu(hwb_tracer, cpu))) + ds_resume_bts(per_cpu(hwb_tracer, cpu)); put_online_cpus(); } -- cgit v1.2.3-58-ga151