diff options
author | Tejun Heo <tj@kernel.org> | 2018-03-14 12:45:13 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-03-19 10:12:03 -0700 |
commit | 05f0fe6b74dbd7690a4cbd61810948b7d575576a (patch) | |
tree | f8aa6fef133b092f7dac48a8fd30db9319aadefd /kernel/workqueue.c | |
parent | c698ca5278934c0ae32297a8725ced2e27585d7f (diff) |
RCU, workqueue: Implement rcu_work
There are cases where RCU callback needs to be bounced to a sleepable
context. This is currently done by the RCU callback queueing a work
item, which can be cumbersome to write and confusing to read.
This patch introduces rcu_work, a workqueue work variant which gets
executed after a RCU grace period, and converts the open coded
bouncing in fs/aio and kernel/cgroup.
v3: Dropped queue_rcu_work_on(). Documented rcu grace period behavior
after queue_rcu_work().
v2: Use rcu_barrier() instead of synchronize_rcu() to wait for
completion of previously queued rcu callback as per Paul.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bb9a519cbf50..7df85fa9f651 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1604,6 +1604,40 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(mod_delayed_work_on); +static void rcu_work_rcufn(struct rcu_head *rcu) +{ + struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); + + /* read the comment in __queue_work() */ + local_irq_disable(); + __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); + local_irq_enable(); +} + +/** + * queue_rcu_work - queue work after a RCU grace period + * @wq: workqueue to use + * @rwork: work to queue + * + * Return: %false if @rwork was already pending, %true otherwise. Note + * that a full RCU grace period is guaranteed only after a %true return. + * While @rwork is guarnateed to be executed after a %false return, the + * execution may happen before a full RCU grace period has passed. + */ +bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) +{ + struct work_struct *work = &rwork->work; + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + rwork->wq = wq; + call_rcu(&rwork->rcu, rcu_work_rcufn); + return true; + } + + return false; +} +EXPORT_SYMBOL(queue_rcu_work); + /** * worker_enter_idle - enter idle state * @worker: worker which is entering idle state @@ -3001,6 +3035,26 @@ bool flush_delayed_work(struct delayed_work *dwork) } EXPORT_SYMBOL(flush_delayed_work); +/** + * flush_rcu_work - wait for a rwork to finish executing the last queueing + * @rwork: the rcu work to flush + * + * Return: + * %true if flush_rcu_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_rcu_work(struct rcu_work *rwork) +{ + if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { + rcu_barrier(); + flush_work(&rwork->work); + return true; + } else { + return flush_work(&rwork->work); + } +} +EXPORT_SYMBOL(flush_rcu_work); + static bool __cancel_work(struct work_struct *work, bool is_dwork) { unsigned long flags; |