diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-06-29 17:06:39 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-07-17 14:59:00 -0700 |
commit | 2cd6ffafec066118365f6d7eb7a42ea16c1f032c (patch) | |
tree | 39656499f5a78c4b61528904e3464c2403a0b83b /kernel/rcu/tree_trace.c | |
parent | 704dd435ac7eaefa89fcd82fd2876b8330e00ff3 (diff) |
rcu: Extend expedited funnel locking to rcu_data structure
The strictly rcu_node based funnel-locking scheme works well in many
cases, but systems with CONFIG_RCU_FANOUT_LEAF=64 won't necessarily get
all that much concurrency. This commit therefore extends the funnel
locking into the per-CPU rcu_data structure, providing concurrency equal
to the number of CPUs.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_trace.c')
-rw-r--r-- | kernel/rcu/tree_trace.c | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index d9982a2ce305..ec62369f1b02 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c @@ -185,11 +185,12 @@ static int show_rcuexp(struct seq_file *m, void *v) { struct rcu_state *rsp = (struct rcu_state *)m->private; - seq_printf(m, "t=%lu tf=%lu wd1=%lu wd2=%lu n=%lu enq=%d sc=%lu\n", + seq_printf(m, "t=%lu tf=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n", rsp->expedited_sequence, atomic_long_read(&rsp->expedited_tryfail), atomic_long_read(&rsp->expedited_workdone1), atomic_long_read(&rsp->expedited_workdone2), + atomic_long_read(&rsp->expedited_workdone3), atomic_long_read(&rsp->expedited_normal), atomic_read(&rsp->expedited_need_qs), rsp->expedited_sequence / 2); |