diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-05-26 18:11:01 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-05-28 10:54:15 +0200 |
commit | b2a02fc43a1f40ef4eb2fb2b06357382608d4d84 (patch) | |
tree | beedd3ab17cd9da7d49948aff6e1d82fe535dc81 /kernel/smp.c | |
parent | afaa653c564da38c0b34c4baba31e88c46a8764c (diff) |
smp: Optimize send_call_function_single_ipi()
Just like the ttwu_queue_remote() IPI, make use of _TIF_POLLING_NRFLAG
to avoid sending IPIs to idle CPUs.
[ mingo: Fix UP build bug. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.953304789@infradead.org
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index f720e38e880d..9f1181375141 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -135,6 +135,8 @@ static __always_inline void csd_unlock(call_single_data_t *csd) static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); +extern void send_call_function_single_ipi(int cpu); + /* * Insert a previously allocated call_single_data_t element * for execution on the given CPU. data must already have @@ -178,7 +180,7 @@ static int generic_exec_single(int cpu, call_single_data_t *csd, * equipped to do the right thing... */ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) - arch_send_call_function_single_ipi(cpu); + send_call_function_single_ipi(cpu); return 0; } @@ -278,6 +280,18 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) } } +void flush_smp_call_function_from_idle(void) +{ + unsigned long flags; + + if (llist_empty(this_cpu_ptr(&call_single_queue))) + return; + + local_irq_save(flags); + flush_smp_call_function_queue(true); + local_irq_restore(flags); +} + /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. |