diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-01-31 19:40:22 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-02-20 16:12:28 -0800 |
commit | 7f5d42d05155523a4c42c2c5170f2a368217aed5 (patch) | |
tree | 622ba4ed926c9ec62fb2b40a0063db2b0050c7c7 /kernel/rcu | |
parent | 9a414201ae7ea089699a0cbd36533345ca17233b (diff) |
rcu: Trace expedited GP delays due to transitioning CPUs
If a CPU is transitioning to or from offline state, an expedited
grace period may undergo a timed wait. This timed wait can unduly
delay grace periods, so this commit adds a trace statement to make
it visible.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree_exp.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 70ad12abde36..fecb6b6ab452 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -32,7 +32,7 @@ static void rcu_exp_gp_seq_start(struct rcu_state *rsp) * Return then value that expedited-grace-period counter will have * at the end of the current grace period. */ -static unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp) +static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp) { return rcu_seq_endval(&rsp->expedited_sequence); } @@ -428,6 +428,7 @@ retry_ipi: (rnp->expmask & mask)) { /* Online, so delay for a bit and try again. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl")); schedule_timeout_uninterruptible(1); goto retry_ipi; } |