diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-10-15 12:49:59 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-10-15 12:49:59 -0700 |
commit | 252997330908cb8ee3d5714539ed967b977c2eae (patch) | |
tree | 627395514c622dc6eb51ae3a2a5bbebddc788299 /kernel/rcutree.c | |
parent | 25e03a74e4a14e0d52a66fb56c728f049a6a26d3 (diff) | |
parent | 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 (diff) |
Merge branch 'idle.2013.09.25a' into HEAD
idle.2013.09.25a: Topic branch for idle entry-/exit-related changes.
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index a06d172c75e0..240604aa3f70 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -650,21 +650,34 @@ void rcu_nmi_exit(void) } /** - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle + * __rcu_is_watching - are RCU read-side critical sections safe? + * + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. Unlike + * rcu_is_watching(), the caller of __rcu_is_watching() must have at + * least disabled preemption. + */ +bool __rcu_is_watching(void) +{ + return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; +} + +/** + * rcu_is_watching - see if RCU thinks that the current CPU is idle * * If the current CPU is in its idle loop and is neither in an interrupt * or NMI handler, return true. */ -int rcu_is_cpu_idle(void) +bool rcu_is_watching(void) { int ret; preempt_disable(); - ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0; + ret = __rcu_is_watching(); preempt_enable(); return ret; } -EXPORT_SYMBOL(rcu_is_cpu_idle); +EXPORT_SYMBOL_GPL(rcu_is_watching); #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -2321,7 +2334,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, * If called from an extended quiescent state, invoke the RCU * core in order to force a re-evaluation of RCU's idleness. */ - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) + if (!rcu_is_watching() && cpu_online(smp_processor_id())) invoke_rcu_core(); /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ |