diff options
-rw-r--r-- | include/linux/irq_work.h | 2 | ||||
-rw-r--r-- | kernel/irq_work.c | 31 |
2 files changed, 11 insertions, 22 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 6a9e8f5399e2..ce60c084635b 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -16,7 +16,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) work->func = func; } -bool irq_work_queue(struct irq_work *work); +void irq_work_queue(struct irq_work *work); void irq_work_run(void); void irq_work_sync(struct irq_work *work); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 64eddd59ed83..c9d7478e4889 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -63,12 +63,20 @@ void __weak arch_irq_work_raise(void) } /* - * Queue the entry and raise the IPI if needed. + * Enqueue the irq_work @entry unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. */ -static void __irq_work_queue(struct irq_work *work) +void irq_work_queue(struct irq_work *work) { bool empty; + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return; + + /* Queue the entry and raise the IPI if needed. */ preempt_disable(); empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); @@ -78,25 +86,6 @@ static void __irq_work_queue(struct irq_work *work) preempt_enable(); } - -/* - * Enqueue the irq_work @entry, returns true on success, failure when the - * @entry was already enqueued by someone else. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue(struct irq_work *work) -{ - if (!irq_work_claim(work)) { - /* - * Already enqueued, can't do! - */ - return false; - } - - __irq_work_queue(work); - return true; -} EXPORT_SYMBOL_GPL(irq_work_queue); /* |