summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-06 12:11:36 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-10 14:33:54 -0400
commit252babcd52aabe37aaad03685e7d6ad454edb9f9 (patch)
tree8698b44de4171656c783879ddf5b4d3be22c3065
parent0598e4f08e3da1fea2ee3b4765a44798147a8c62 (diff)
tracing: Replace the per_cpu() with __this_cpu*() in trace_stack.c
The updates to the trace_active per cpu variable can be updated with the __this_cpu_*() functions as it only gets updated on the CPU that the variable is on. Thanks to Paul McKenney for suggesting __this_cpu_* instead of this_cpu_*. Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace_stack.c23
1 files changed, 7 insertions, 16 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 5fb1f2c87e6b..338d076a06da 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -207,13 +207,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
unsigned long stack;
- int cpu;
preempt_disable_notrace();
- cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
- if (per_cpu(trace_active, cpu)++ != 0)
+ __this_cpu_inc(trace_active);
+ if (__this_cpu_read(trace_active) != 1)
goto out;
ip += MCOUNT_INSN_SIZE;
@@ -221,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
check_stack(ip, &stack);
out:
- per_cpu(trace_active, cpu)--;
+ __this_cpu_dec(trace_active);
/* prevent recursion in schedule */
preempt_enable_notrace();
}
@@ -253,7 +252,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
long *ptr = filp->private_data;
unsigned long val, flags;
int ret;
- int cpu;
ret = kstrtoul_from_user(ubuf, count, 10, &val);
if (ret)
@@ -266,14 +264,13 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
* we will cause circular lock, so we also need to increase
* the percpu trace_active here.
*/
- cpu = smp_processor_id();
- per_cpu(trace_active, cpu)++;
+ __this_cpu_inc(trace_active);
arch_spin_lock(&stack_trace_max_lock);
*ptr = val;
arch_spin_unlock(&stack_trace_max_lock);
- per_cpu(trace_active, cpu)--;
+ __this_cpu_dec(trace_active);
local_irq_restore(flags);
return count;
@@ -307,12 +304,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
- int cpu;
-
local_irq_disable();
- cpu = smp_processor_id();
- per_cpu(trace_active, cpu)++;
+ __this_cpu_inc(trace_active);
arch_spin_lock(&stack_trace_max_lock);
@@ -324,12 +318,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void t_stop(struct seq_file *m, void *p)
{
- int cpu;
-
arch_spin_unlock(&stack_trace_max_lock);
- cpu = smp_processor_id();
- per_cpu(trace_active, cpu)--;
+ __this_cpu_dec(trace_active);
local_irq_enable();
}