diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-03-22 10:10:07 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-04-27 11:03:52 -0700 |
commit | 81b4a7bc3b54b0b839dbf3d2b8c9a353ae910688 (patch) | |
tree | 1dbb044c34bb0615d647d80b556542890dee0aeb /kernel | |
parent | b38f57c1fe64276773b124dffb0a139cc32ab3cb (diff) |
rcu-tasks: Disable CPU hotplug across RCU tasks trace scans
This commit disables CPU hotplug across RCU tasks trace scans, which
is a first step towards correctly recognizing idle tasks "running" on
offline CPUs.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/tasks.h | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index dd311e93ed0f..361e17d57191 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -910,16 +910,16 @@ static void rcu_tasks_trace_pregp_step(void) { int cpu; - // Wait for CPU-hotplug paths to complete. - cpus_read_lock(); - cpus_read_unlock(); - // Allow for fast-acting IPIs. atomic_set(&trc_n_readers_need_end, 1); // There shouldn't be any old IPIs, but... for_each_possible_cpu(cpu) WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); + + // Disable CPU hotplug across the tasklist scan. + // This also waits for all readers in CPU-hotplug code paths. + cpus_read_lock(); } /* Do first-round processing for the specified task. */ @@ -935,6 +935,9 @@ static void rcu_tasks_trace_pertask(struct task_struct *t, /* Do intermediate processing between task and holdout scans. */ static void rcu_tasks_trace_postscan(void) { + // Re-enable CPU hotplug now that the tasklist scan has completed. + cpus_read_unlock(); + // Wait for late-stage exiting tasks to finish exiting. // These might have passed the call to exit_tasks_rcu_finish(). synchronize_rcu(); @@ -979,6 +982,9 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, { struct task_struct *g, *t; + // Disable CPU hotplug across the holdout list scan. + cpus_read_lock(); + list_for_each_entry_safe(t, g, hop, trc_holdout_list) { // If safe and needed, try to check the current task. if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && @@ -991,6 +997,10 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, else if (needreport) show_stalled_task_trace(t, firstreport); } + + // Re-enable CPU hotplug now that the holdout list scan has completed. + cpus_read_unlock(); + if (needreport) { if (firstreport) pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |