diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2024-03-07 20:14:55 -0800 |
---|---|---|
committer | Uladzislau Rezki (Sony) <urezki@gmail.com> | 2024-04-15 16:48:26 +0200 |
commit | 09e077cf22c4302ab4ca7932f56c5a8b20c9e32b (patch) | |
tree | a3f3e21b7f1c16499eca1e002cd829963f4866bd /kernel/rcu/tree.c | |
parent | a542d116bab28b4bcade2f41488f4617373c620f (diff) |
rcu: Mark loads from rcu_state.n_online_cpus
The rcu_state.n_online_cpus value is only ever updated by CPU-hotplug
operations, which are serialized. However, this value is read locklessly.
This commit therefore marks those reads. While in the area, it also
adds ASSERT_EXCLUSIVE_WRITER() calls just in case parallel CPU hotplug
becomes a thing.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 559f2d0d271f..7149b2d5cdd6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4328,7 +4328,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); // whether spinlocks may be acquired safely. static bool rcu_init_invoked(void) { - return !!rcu_state.n_online_cpus; + return !!READ_ONCE(rcu_state.n_online_cpus); } /* @@ -4538,6 +4538,7 @@ int rcutree_prepare_cpu(unsigned int cpu) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); rcu_spawn_rnp_kthreads(rnp); rcu_spawn_cpu_nocb_kthread(cpu); + ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); return 0; @@ -4806,6 +4807,7 @@ void rcutree_migrate_callbacks(int cpu) */ int rcutree_dead_cpu(unsigned int cpu) { + ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); // Stop-machine done, so allow nohz_full to disable tick. tick_dep_clear(TICK_DEP_BIT_RCU); |