diff options
author | Tejun Heo <tj@kernel.org> | 2024-02-20 19:36:15 -1000 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2024-02-20 19:36:15 -1000 |
commit | bccdc1faafaf32e00d6e4dddca1ded64e3272189 (patch) | |
tree | 7028201407afaae74aa0197087fb19eae678d965 /kernel/workqueue.c | |
parent | afe928c1dc611bec155d834020e0631e026aeb8a (diff) |
workqueue: Make @flags handling consistent across set_work_data() and friends
- set_work_data() takes a separate @flags argument but just ORs it to @data.
This is more confusing than helpful. Just take @data.
- Use the name @flags consistently and add the parameter to
set_work_pool_and_{keep|clear}_pending(). This will be used by the planned
disable/enable support.
No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4f9c85f7c57a..65a27be81452 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -777,29 +777,28 @@ static int work_next_color(int color) * but stay off timer and worklist for arbitrarily long and nobody should * try to steal the PENDING bit. */ -static inline void set_work_data(struct work_struct *work, unsigned long data, - unsigned long flags) +static inline void set_work_data(struct work_struct *work, unsigned long data) { WARN_ON_ONCE(!work_pending(work)); - atomic_long_set(&work->data, data | flags | work_static(work)); + atomic_long_set(&work->data, data | work_static(work)); } static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, - unsigned long extra_flags) + unsigned long flags) { - set_work_data(work, (unsigned long)pwq, - WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); + set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | + WORK_STRUCT_PWQ | flags); } static void set_work_pool_and_keep_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, - WORK_STRUCT_PENDING); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + WORK_STRUCT_PENDING | flags); } static void set_work_pool_and_clear_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { /* * The following wmb is paired with the implied mb in @@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, * owner. */ smp_wmb(); - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + flags); /* * The following mb guarantees that previous clear of a PENDING bit * will not be reordered with any speculative LOADS or STORES from @@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work) unsigned long pool_id = get_work_pool_id(work); pool_id <<= WORK_OFFQ_POOL_SHIFT; - set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); + set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING); } static bool work_is_canceling(struct work_struct *work) @@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags, * this destroys work->data needed by the next step, stash it. */ work_data = *work_data_bits(work); - set_work_pool_and_keep_pending(work, pool->id); + set_work_pool_and_keep_pending(work, pool->id, 0); /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); @@ -3205,7 +3205,7 @@ __acquires(&pool->lock) * PENDING and queued state changes happen together while IRQ is * disabled. */ - set_work_pool_and_clear_pending(work, pool->id); + set_work_pool_and_clear_pending(work, pool->id, 0); pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); @@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) if (unlikely(ret < 0)) return false; - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0); local_irq_restore(irq_flags); return ret; } @@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) * with prepare_to_wait() above so that either waitqueue_active() is * visible here or !work_is_canceling() is visible there. */ - set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0); if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); |