summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d951daa0ca9a..1dae900df798 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
timer->data != (unsigned long)dwork);
- BUG_ON(timer_pending(timer));
- BUG_ON(!list_empty(&work->entry));
+ WARN_ON_ONCE(timer_pending(timer));
+ WARN_ON_ONCE(!list_empty(&work->entry));
+
+ /*
+ * If @delay is 0, queue @dwork->work immediately. This is for
+ * both optimization and correctness. The earliest @timer can
+ * expire is on the closest next tick and delayed_work users depend
+ * on that there's no such delay when @delay is 0.
+ */
+ if (!delay) {
+ __queue_work(cpu, wq, &dwork->work);
+ return;
+ }
timer_stats_timer_set_start_info(&dwork->timer);
@@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
- if (!delay)
- return queue_work_on(cpu, wq, &dwork->work);
-
/* read the comment in __queue_work() */
local_irq_save(flags);
@@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
repeat:
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
return 0;
+ }
/*
* See whether any cpu is asking for help. Unbounded
@@ -2982,7 +2992,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
local_irq_restore(flags);
- return true;
+ return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);