diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-05-26 18:10:59 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-05-28 10:54:15 +0200 |
commit | 52103be07d8b08311955f8c30e535c2dda290cf4 (patch) | |
tree | 63cef1a4ad4f066054182d9a1ce44cbeee69218f /kernel/smp.c | |
parent | 19a1f5ec699954d21be10f74ff71c2a7079e99ad (diff) |
smp: Optimize flush_smp_call_function_queue()
The call_single_queue can contain (two) different callbacks,
synchronous and asynchronous. The current interrupt handler runs them
in-order, which means that remote CPUs that are waiting for their
synchronous call can be delayed by running asynchronous callbacks.
Rework the interrupt handler to first run the synchonous callbacks.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.836818381@infradead.org
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 27 |
1 files changed, 23 insertions, 4 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 786092aabdcd..db2f73808db5 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -209,9 +209,9 @@ void generic_smp_call_function_single_interrupt(void) */ static void flush_smp_call_function_queue(bool warn_cpu_offline) { - struct llist_head *head; - struct llist_node *entry; call_single_data_t *csd, *csd_next; + struct llist_node *entry, *prev; + struct llist_head *head; static bool warned; lockdep_assert_irqs_disabled(); @@ -235,21 +235,40 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) csd->func); } + /* + * First; run all SYNC callbacks, people are waiting for us. + */ + prev = NULL; llist_for_each_entry_safe(csd, csd_next, entry, llist) { smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { + if (prev) { + prev->next = &csd_next->llist; + } else { + entry = &csd_next->llist; + } func(info); csd_unlock(csd); } else { - csd_unlock(csd); - func(info); + prev = &csd->llist; } } /* + * Second; run all !SYNC callbacks. + */ + llist_for_each_entry_safe(csd, csd_next, entry, llist) { + smp_call_func_t func = csd->func; + void *info = csd->info; + + csd_unlock(csd); + func(info); + } + + /* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting |