From 309bd96af9e26da3038661bf5cdad780eef49dd9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 25 Jan 2017 11:15:20 +0100 Subject: md: cleanup bio op / flags handling in raid1_write_request No need for the local variables, the bio is still live and we can just assign the bits we want directly. Make me wonder why we can't assign all the bio flags to start with. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Martin K. Petersen Signed-off-by: Jens Axboe --- drivers/md/raid1.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7b0f647bcccb..67b0365d49b5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, int i, disks; struct bitmap *bitmap = mddev->bitmap; unsigned long flags; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); - const unsigned long do_flush_fua = (bio->bi_opf & - (REQ_PREFLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); + mbio->bi_opf = bio_op(bio) | + (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA)); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && conf->raid_disks - mddev->degraded > 1) -- cgit v1.2.3-58-ga151 From 5ea708d15a928f7a479987704203616d3274c03b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Jan 2017 14:52:44 +0300 Subject: block: simplify blk_init_allocated_queue Return an errno value instead of the passed in queue so that the callers don't have to keep track of two queues, and move the assignment of the request_fn and lock to the caller as passing them as argument doesn't simplify anything. While we're at it also remove two pointless NULL assignments, given that the request structure is zeroed on allocation. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- block/blk-core.c | 38 +++++++++++++++----------------------- drivers/md/dm-rq.c | 3 ++- include/linux/blkdev.h | 3 +-- 3 files changed, 18 insertions(+), 26 deletions(-) (limited to 'drivers/md') diff --git a/block/blk-core.c b/block/blk-core.c index 4cad535592c3..09819d24d385 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -823,15 +823,19 @@ EXPORT_SYMBOL(blk_init_queue); struct request_queue * blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { - struct request_queue *uninit_q, *q; + struct request_queue *q; - uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); - if (!uninit_q) + q = blk_alloc_queue_node(GFP_KERNEL, node_id); + if (!q) return NULL; - q = blk_init_allocated_queue(uninit_q, rfn, lock); - if (!q) - blk_cleanup_queue(uninit_q); + q->request_fn = rfn; + if (lock) + q->queue_lock = lock; + if (blk_init_allocated_queue(q) < 0) { + blk_cleanup_queue(q); + return NULL; + } return q; } @@ -839,30 +843,19 @@ EXPORT_SYMBOL(blk_init_queue_node); static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); -struct request_queue * -blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, - spinlock_t *lock) -{ - if (!q) - return NULL; +int blk_init_allocated_queue(struct request_queue *q) +{ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); if (!q->fq) - return NULL; + return -ENOMEM; if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) goto fail; INIT_WORK(&q->timeout_work, blk_timeout_work); - q->request_fn = rfn; - q->prep_rq_fn = NULL; - q->unprep_rq_fn = NULL; q->queue_flags |= QUEUE_FLAG_DEFAULT; - /* Override internal queue lock with supplied lock pointer */ - if (lock) - q->queue_lock = lock; - /* * This also sets hw/phys segments, boundary and size */ @@ -880,13 +873,12 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, } mutex_unlock(&q->sysfs_lock); - - return q; + return 0; fail: blk_free_flush_queue(q->fq); wbt_exit(q); - return NULL; + return -ENOMEM; } EXPORT_SYMBOL(blk_init_allocated_queue); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 9d7275fb541a..93f6e9f1ebe2 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -823,7 +823,8 @@ static void dm_old_request_fn(struct request_queue *q) int dm_old_init_request_queue(struct mapped_device *md) { /* Fully initialize the queue */ - if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) + md->queue->request_fn = dm_old_request_fn; + if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 05675b1dfd20..6b1efc5760ea 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1137,8 +1137,7 @@ extern void blk_unprep_request(struct request *); extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); -extern struct request_queue *blk_init_allocated_queue(struct request_queue *, - request_fn_proc *, spinlock_t *); +extern int blk_init_allocated_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); -- cgit v1.2.3-58-ga151 From 4bf58435fae39ec2ea01a2ff9934d377add234b1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 10 Jan 2017 10:03:39 +0100 Subject: dm: remove incomplete BLOCK_PC support DM tries to copy a few fields around for BLOCK_PC requests, but given that no dm-target ever wires up scsi_cmd_ioctl BLOCK_PC can't actually be sent to dm. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/dm-rq.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 93f6e9f1ebe2..3f12916f2424 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -270,19 +270,6 @@ static void dm_end_request(struct request *clone, int error) struct mapped_device *md = tio->md; struct request *rq = tio->orig; - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - rq->errors = clone->errors; - rq->resid_len = clone->resid_len; - - if (rq->sense) - /* - * We are using the sense buffer of the original - * request. - * So setting the length of the sense data is enough. - */ - rq->sense_len = clone->sense_len; - } - free_rq_clone(clone); rq_end_stats(md, rq); if (!rq->q->mq_ops) @@ -511,9 +498,6 @@ static int setup_clone(struct request *clone, struct request *rq, if (r) return r; - clone->cmd = rq->cmd; - clone->cmd_len = rq->cmd_len; - clone->sense = rq->sense; clone->end_io = end_clone_request; clone->end_io_data = tio; -- cgit v1.2.3-58-ga151 From eb8db831be80692bf4bda3dfc55001daf64ec299 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 22 Jan 2017 18:32:46 +0100 Subject: dm: always defer request allocation to the owner of the request_queue DM already calls blk_mq_alloc_request on the request_queue of the underlying device if it is a blk-mq device. But now that we allow drivers to allocate additional data and initialize it ahead of time we need to do the same for all drivers. Doing so and using the new cmd_size infrastructure in the block layer greatly simplifies the dm-rq and mpath code, and should also make arbitrary combinations of SQ and MQ devices with SQ or MQ device mapper tables easily possible as a further step. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Mike Snitzer Signed-off-by: Jens Axboe --- drivers/md/dm-core.h | 1 - drivers/md/dm-mpath.c | 132 ++++------------------ drivers/md/dm-rq.c | 251 ++++++++++-------------------------------- drivers/md/dm-rq.h | 2 +- drivers/md/dm-target.c | 7 -- drivers/md/dm.c | 30 ++--- drivers/md/dm.h | 3 +- include/linux/device-mapper.h | 3 - 8 files changed, 85 insertions(+), 344 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 40ceba1fe8be..136fda3ff9e5 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -92,7 +92,6 @@ struct mapped_device { * io objects are allocated from here. */ mempool_t *io_pool; - mempool_t *rq_pool; struct bio_set *bs; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6400cffb986d..784f2374c3a4 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -92,12 +92,6 @@ struct multipath { unsigned queue_mode; - /* - * We must use a mempool of dm_mpath_io structs so that we - * can resubmit bios on error. - */ - mempool_t *mpio_pool; - struct mutex work_mutex; struct work_struct trigger_event; @@ -115,8 +109,6 @@ struct dm_mpath_io { typedef int (*action_fn) (struct pgpath *pgpath); -static struct kmem_cache *_mpio_cache; - static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); @@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); - m->mpio_pool = NULL; m->queue_mode = DM_TYPE_NONE; m->ti = ti; @@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; else m->queue_mode = DM_TYPE_REQUEST_BASED; - } - - if (m->queue_mode == DM_TYPE_REQUEST_BASED) { - unsigned min_ios = dm_get_reserved_rq_based_ios(); - - m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); - if (!m->mpio_pool) - return -ENOMEM; - } - else if (m->queue_mode == DM_TYPE_BIO_BASED) { + } else if (m->queue_mode == DM_TYPE_BIO_BASED) { INIT_WORK(&m->process_queued_bios, process_queued_bios); /* * bio-based doesn't support any direct scsi_dh management; @@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m) kfree(m->hw_handler_name); kfree(m->hw_handler_params); - mempool_destroy(m->mpio_pool); kfree(m); } @@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info) return info->ptr; } -static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info) -{ - struct dm_mpath_io *mpio; - - if (!m->mpio_pool) { - /* Use blk-mq pdu memory requested via per_io_data_size */ - mpio = get_mpio(info); - memset(mpio, 0, sizeof(*mpio)); - return mpio; - } - - mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); - if (!mpio) - return NULL; - - memset(mpio, 0, sizeof(*mpio)); - info->ptr = mpio; - - return mpio; -} - -static void clear_request_fn_mpio(struct multipath *m, union map_info *info) -{ - /* Only needed for non blk-mq (.request_fn) multipath */ - if (m->mpio_pool) { - struct dm_mpath_io *mpio = info->ptr; - - info->ptr = NULL; - mempool_free(mpio, m->mpio_pool); - } -} - static size_t multipath_per_bio_data_size(void) { return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); @@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m) /* * Map cloned requests (request-based multipath) */ -static int __multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context, - struct request *rq, struct request **__clone) +static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, + union map_info *map_context, + struct request **__clone) { struct multipath *m = ti->private; int r = DM_MAPIO_REQUEUE; - size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); + size_t nr_bytes = blk_rq_bytes(rq); struct pgpath *pgpath; struct block_device *bdev; - struct dm_mpath_io *mpio; + struct dm_mpath_io *mpio = get_mpio(map_context); + struct request *clone; /* Do we need to select a new pgpath? */ pgpath = lockless_dereference(m->current_pgpath); @@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, return r; } - mpio = set_mpio(m, map_context); - if (!mpio) - /* ENOMEM, requeue */ - return r; - + memset(mpio, 0, sizeof(*mpio)); mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; bdev = pgpath->path.dev->bdev; - if (clone) { - /* - * Old request-based interface: allocated clone is passed in. - * Used by: .request_fn stacked on .request_fn path(s). - */ - clone->q = bdev_get_queue(bdev); - clone->rq_disk = bdev->bd_disk; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - } else { - /* - * blk-mq request-based interface; used by both: - * .request_fn stacked on blk-mq path(s) and - * blk-mq stacked on blk-mq path(s). - */ - clone = blk_mq_alloc_request(bdev_get_queue(bdev), - rq_data_dir(rq), BLK_MQ_REQ_NOWAIT); - if (IS_ERR(clone)) { - /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ - clear_request_fn_mpio(m, map_context); - return r; - } - clone->bio = clone->biotail = NULL; - clone->rq_disk = bdev->bd_disk; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - *__clone = clone; + clone = blk_get_request(bdev_get_queue(bdev), + rq->cmd_flags | REQ_NOMERGE, + GFP_ATOMIC); + if (IS_ERR(clone)) { + /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ + return r; } + clone->bio = clone->biotail = NULL; + clone->rq_disk = bdev->bd_disk; + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; + *__clone = clone; if (pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, @@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, return DM_MAPIO_REMAPPED; } -static int multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - return __multipath_map(ti, clone, map_context, NULL, NULL); -} - -static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, - union map_info *map_context, - struct request **clone) -{ - return __multipath_map(ti, NULL, map_context, rq, clone); -} - static void multipath_release_clone(struct request *clone) { - blk_mq_free_request(clone); + blk_put_request(clone); } /* @@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->num_write_same_bios = 1; if (m->queue_mode == DM_TYPE_BIO_BASED) ti->per_io_data_size = multipath_per_bio_data_size(); - else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) + else ti->per_io_data_size = sizeof(struct dm_mpath_io); return 0; @@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, if (ps->type->end_io) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); } - clear_request_fn_mpio(m, map_context); return r; } @@ -2060,7 +1977,6 @@ static struct target_type multipath_target = { .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, - .map_rq = multipath_map, .clone_and_map_rq = multipath_clone_and_map, .release_clone_rq = multipath_release_clone, .rq_end_io = multipath_end_io, @@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void) { int r; - /* allocate a slab for the dm_mpath_ios */ - _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); - if (!_mpio_cache) - return -ENOMEM; - r = dm_register_target(&multipath_target); if (r < 0) { DMERR("request-based register failed %d", r); @@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd: bad_alloc_kmultipathd: dm_unregister_target(&multipath_target); bad_register_target: - kmem_cache_destroy(_mpio_cache); - return r; } @@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void) destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); - kmem_cache_destroy(_mpio_cache); } module_init(dm_multipath_init); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3f12916f2424..8d0683474767 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q) dm_mq_stop_queue(q); } -static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, - gfp_t gfp_mask) -{ - return mempool_alloc(md->io_pool, gfp_mask); -} - -static void free_old_rq_tio(struct dm_rq_target_io *tio) -{ - mempool_free(tio, tio->md->io_pool); -} - -static struct request *alloc_old_clone_request(struct mapped_device *md, - gfp_t gfp_mask) -{ - return mempool_alloc(md->rq_pool, gfp_mask); -} - -static void free_old_clone_request(struct mapped_device *md, struct request *rq) -{ - mempool_free(rq, md->rq_pool); -} - /* * Partial completion handling for request-based dm */ @@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone) static struct dm_rq_target_io *tio_from_request(struct request *rq) { - return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); + return blk_mq_rq_to_pdu(rq); } static void rq_end_stats(struct mapped_device *md, struct request *orig) @@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone) -{ - struct dm_rq_target_io *tio = clone->end_io_data; - struct mapped_device *md = tio->md; - - blk_rq_unprep_clone(clone); - - /* - * It is possible for a clone_old_rq() allocated clone to - * get passed in -- it may not yet have a request_queue. - * This is known to occur if the error target replaces - * a multipath target that has a request_fn queue stacked - * on blk-mq queue(s). - */ - if (clone->q && clone->q->mq_ops) - /* stacked on blk-mq queue(s) */ - tio->ti->type->release_clone_rq(clone); - else if (!md->queue->mq_ops) - /* request_fn queue stacked on request_fn queue(s) */ - free_old_clone_request(md, clone); - - if (!md->queue->mq_ops) - free_old_rq_tio(tio); -} - /* * Complete the clone and the original request. * Must be called without clone's queue lock held, @@ -270,7 +223,9 @@ static void dm_end_request(struct request *clone, int error) struct mapped_device *md = tio->md; struct request *rq = tio->orig; - free_rq_clone(clone); + blk_rq_unprep_clone(clone); + tio->ti->type->release_clone_rq(clone); + rq_end_stats(md, rq); if (!rq->q->mq_ops) blk_end_request_all(rq, error); @@ -279,22 +234,6 @@ static void dm_end_request(struct request *clone, int error) rq_completed(md, rw, true); } -static void dm_unprep_request(struct request *rq) -{ - struct dm_rq_target_io *tio = tio_from_request(rq); - struct request *clone = tio->clone; - - if (!rq->q->mq_ops) { - rq->special = NULL; - rq->rq_flags &= ~RQF_DONTPREP; - } - - if (clone) - free_rq_clone(clone); - else if (!tio->md->queue->mq_ops) - free_old_rq_tio(tio); -} - /* * Requeue the original request of a clone. */ @@ -333,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ int rw = rq_data_dir(rq); rq_end_stats(md, rq); - dm_unprep_request(rq); + if (tio->clone) { + blk_rq_unprep_clone(tio->clone); + tio->ti->type->release_clone_rq(tio->clone); + } if (!rq->q->mq_ops) dm_old_requeue_request(rq); @@ -388,14 +330,11 @@ static void dm_softirq_done(struct request *rq) if (!clone) { rq_end_stats(tio->md, rq); rw = rq_data_dir(rq); - if (!rq->q->mq_ops) { + if (!rq->q->mq_ops) blk_end_request_all(rq, tio->error); - rq_completed(tio->md, rw, false); - free_old_rq_tio(tio); - } else { + else blk_mq_end_request(rq, tio->error); - rq_completed(tio->md, rw, false); - } + rq_completed(tio->md, rw, false); return; } @@ -439,16 +378,6 @@ static void end_clone_request(struct request *clone, int error) { struct dm_rq_target_io *tio = clone->end_io_data; - if (!clone->q->mq_ops) { - /* - * For just cleaning up the information of the queue in which - * the clone was dispatched. - * The clone is *NOT* freed actually here because it is alloced - * from dm own mempool (RQF_ALLOCED isn't set). - */ - __blk_put_request(clone->q, clone); - } - /* * Actual request completion is done in a softirq context which doesn't * hold the clone's queue lock. Otherwise, deadlock could occur because: @@ -506,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq, return 0; } -static struct request *clone_old_rq(struct request *rq, struct mapped_device *md, - struct dm_rq_target_io *tio, gfp_t gfp_mask) -{ - /* - * Create clone for use with .request_fn request_queue - */ - struct request *clone; - - clone = alloc_old_clone_request(md, gfp_mask); - if (!clone) - return NULL; - - blk_rq_init(NULL, clone); - if (setup_clone(clone, rq, tio, gfp_mask)) { - /* -ENOMEM */ - free_old_clone_request(md, clone); - return NULL; - } - - return clone; -} - static void map_tio_request(struct kthread_work *work); static void init_tio(struct dm_rq_target_io *tio, struct request *rq, @@ -549,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, kthread_init_work(&tio->work, map_tio_request); } -static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, - struct mapped_device *md, - gfp_t gfp_mask) -{ - struct dm_rq_target_io *tio; - int srcu_idx; - struct dm_table *table; - - tio = alloc_old_rq_tio(md, gfp_mask); - if (!tio) - return NULL; - - init_tio(tio, rq, md); - - table = dm_get_live_table(md, &srcu_idx); - /* - * Must clone a request if this .request_fn DM device - * is stacked on .request_fn device(s). - */ - if (!dm_table_all_blk_mq_devices(table)) { - if (!clone_old_rq(rq, md, tio, gfp_mask)) { - dm_put_live_table(md, srcu_idx); - free_old_rq_tio(tio); - return NULL; - } - } - dm_put_live_table(md, srcu_idx); - - return tio; -} - -/* - * Called with the queue lock held. - */ -static int dm_old_prep_fn(struct request_queue *q, struct request *rq) -{ - struct mapped_device *md = q->queuedata; - struct dm_rq_target_io *tio; - - if (unlikely(rq->special)) { - DMWARN("Already has something in rq->special."); - return BLKPREP_KILL; - } - - tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); - if (!tio) - return BLKPREP_DEFER; - - rq->special = tio; - rq->rq_flags |= RQF_DONTPREP; - - return BLKPREP_OK; -} - /* * Returns: * DM_MAPIO_* : the request has been processed as indicated @@ -617,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio) struct request *rq = tio->orig; struct request *clone = NULL; - if (tio->clone) { - clone = tio->clone; - r = ti->type->map_rq(ti, clone, &tio->info); - if (r == DM_MAPIO_DELAY_REQUEUE) - return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */ - } else { - r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); - if (r < 0) { - /* The target wants to complete the I/O */ - dm_kill_unmapped_request(rq, r); - return r; - } - if (r == DM_MAPIO_REMAPPED && - setup_clone(clone, rq, tio, GFP_ATOMIC)) { - /* -ENOMEM */ - ti->type->release_clone_rq(clone); - return DM_MAPIO_REQUEUE; - } - } - + r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: + if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { + /* -ENOMEM */ + ti->type->release_clone_rq(clone); + return DM_MAPIO_REQUEUE; + } + /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); @@ -700,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) dm_get(md); } +static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq) +{ + struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); + + /* + * Must initialize md member of tio, otherwise it won't + * be available in dm_mq_queue_rq. + */ + tio->md = md; + + if (md->init_tio_pdu) { + /* target-specific per-io data is immediately after the tio */ + tio->info.ptr = tio + 1; + } + + return 0; +} + +static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) +{ + return __dm_rq_init_rq(q->rq_alloc_data, rq); +} + static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); @@ -794,6 +657,7 @@ static void dm_old_request_fn(struct request_queue *q) dm_start_request(md, rq); tio = tio_from_request(rq); + init_tio(tio, rq, md); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); @@ -804,10 +668,22 @@ static void dm_old_request_fn(struct request_queue *q) /* * Fully initialize a .request_fn request-based queue. */ -int dm_old_init_request_queue(struct mapped_device *md) +int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) { + struct dm_target *immutable_tgt; + /* Fully initialize the queue */ + md->queue->cmd_size = sizeof(struct dm_rq_target_io); + md->queue->rq_alloc_data = md; md->queue->request_fn = dm_old_request_fn; + md->queue->init_rq_fn = dm_rq_init_rq; + + immutable_tgt = dm_table_get_immutable_target(t); + if (immutable_tgt && immutable_tgt->per_io_data_size) { + /* any target-specific per-io data is immediately after the tio */ + md->queue->cmd_size += immutable_tgt->per_io_data_size; + md->init_tio_pdu = true; + } if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; @@ -816,7 +692,6 @@ int dm_old_init_request_queue(struct mapped_device *md) dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); - blk_queue_prep_rq(md->queue, dm_old_prep_fn); /* Initialize the request-based DM worker thread */ kthread_init_worker(&md->kworker); @@ -837,21 +712,7 @@ static int dm_mq_init_request(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node) { - struct mapped_device *md = data; - struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); - - /* - * Must initialize md member of tio, otherwise it won't - * be available in dm_mq_queue_rq. - */ - tio->md = md; - - if (md->init_tio_pdu) { - /* target-specific per-io data is immediately after the tio */ - tio->info.ptr = tio + 1; - } - - return 0; + return __dm_rq_init_rq(data, rq); } static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 4da06cae7bad..f0020d21b95f 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info { bool dm_use_blk_mq_default(void); bool dm_use_blk_mq(struct mapped_device *md); -int dm_old_init_request_queue(struct mapped_device *md); +int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t); int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); void dm_mq_cleanup_mapped_device(struct mapped_device *md); diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 710ae28fd618..43d3445b121d 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio) return -EIO; } -static int io_err_map_rq(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - return -EIO; -} - static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, union map_info *map_context, struct request **clone) @@ -161,7 +155,6 @@ static struct target_type error_target = { .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, - .map_rq = io_err_map_rq, .clone_and_map_rq = io_err_clone_and_map_rq, .release_clone_rq = io_err_release_clone_rq, .direct_access = io_err_direct_access, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3086da5664f3..ff4a29a97ad3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE; */ struct dm_md_mempools { mempool_t *io_pool; - mempool_t *rq_pool; struct bio_set *bs; }; @@ -1419,7 +1418,6 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->kworker_task) kthread_stop(md->kworker_task); mempool_destroy(md->io_pool); - mempool_destroy(md->rq_pool); if (md->bs) bioset_free(md->bs); @@ -1595,12 +1593,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) goto out; } - BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); + BUG_ON(!p || md->io_pool || md->bs); md->io_pool = p->io_pool; p->io_pool = NULL; - md->rq_pool = p->rq_pool; - p->rq_pool = NULL; md->bs = p->bs; p->bs = NULL; @@ -1777,7 +1773,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: - r = dm_old_init_request_queue(md); + r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); return r; @@ -2493,7 +2489,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t unsigned integrity, unsigned per_io_data_size) { struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); - struct kmem_cache *cachep = NULL; unsigned int pool_size = 0; unsigned int front_pad; @@ -2503,20 +2498,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - cachep = _io_cache; pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); + + pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); + if (!pools->io_pool) + goto out; break; case DM_TYPE_REQUEST_BASED: - cachep = _rq_tio_cache; - pool_size = dm_get_reserved_rq_based_ios(); - pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); - if (!pools->rq_pool) - goto out; - /* fall through to setup remaining rq-based pools */ case DM_TYPE_MQ_REQUEST_BASED: - if (!pool_size) - pool_size = dm_get_reserved_rq_based_ios(); + pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_io_data_size is used for blk-mq pdu at queue allocation */ break; @@ -2524,12 +2515,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t BUG(); } - if (cachep) { - pools->io_pool = mempool_create_slab_pool(pool_size, cachep); - if (!pools->io_pool) - goto out; - } - pools->bs = bioset_create_nobvec(pool_size, front_pad); if (!pools->bs) goto out; @@ -2551,7 +2536,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) return; mempool_destroy(pools->io_pool); - mempool_destroy(pools->rq_pool); if (pools->bs) bioset_free(pools->bs); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index f0aad08b9654..f298b01f7ab3 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); /* * To check whether the target type is request-based or not (bio-based). */ -#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ - ((t)->type->clone_and_map_rq != NULL)) +#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL) /* * To check whether the target type is a hybrid (capable of being diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ef7962e84444..a7e6903866fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); * = 2: The target wants to push back the io */ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); -typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, - union map_info *map_context); typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, struct request *rq, union map_info *map_context, @@ -163,7 +161,6 @@ struct target_type { dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; - dm_map_request_fn map_rq; dm_clone_and_map_request_fn clone_and_map_rq; dm_release_clone_request_fn release_clone_rq; dm_endio_fn end_io; -- cgit v1.2.3-58-ga151 From dc3b17cc8bf21307c7e076e7c778d5db756f7871 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 2 Feb 2017 15:56:50 +0100 Subject: block: Use pointer to backing_dev_info from request_queue We will want to have struct backing_dev_info allocated separately from struct request_queue. As the first step add pointer to backing_dev_info to request_queue and convert all users touching it. No functional changes in this patch. Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 6 +++--- block/blk-core.c | 27 ++++++++++++++------------- block/blk-integrity.c | 4 ++-- block/blk-settings.c | 2 +- block/blk-sysfs.c | 8 ++++---- block/blk-wbt.c | 8 ++++---- block/genhd.c | 2 +- drivers/block/aoe/aoeblk.c | 4 ++-- drivers/block/drbd/drbd_main.c | 6 +++--- drivers/block/drbd/drbd_nl.c | 12 +++++++----- drivers/block/drbd/drbd_proc.c | 2 +- drivers/block/drbd/drbd_req.c | 2 +- drivers/block/pktcdvd.c | 4 ++-- drivers/block/rbd.c | 2 +- drivers/md/bcache/request.c | 10 +++++----- drivers/md/bcache/super.c | 8 ++++---- drivers/md/dm-cache-target.c | 2 +- drivers/md/dm-era-target.c | 2 +- drivers/md/dm-table.c | 2 +- drivers/md/dm-thin.c | 2 +- drivers/md/dm.c | 6 +++--- drivers/md/linear.c | 2 +- drivers/md/md.c | 6 +++--- drivers/md/multipath.c | 2 +- drivers/md/raid0.c | 6 +++--- drivers/md/raid1.c | 4 ++-- drivers/md/raid10.c | 10 +++++----- drivers/md/raid5.c | 12 ++++++------ fs/gfs2/ops_fstype.c | 2 +- fs/nilfs2/super.c | 2 +- fs/super.c | 2 +- include/linux/blkdev.h | 3 ++- mm/page-writeback.c | 4 ++-- 33 files changed, 90 insertions(+), 86 deletions(-) (limited to 'drivers/md') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fb59a3edc778..37fe595cfd70 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, goto err_free_blkg; } - wb_congested = wb_congested_get_create(&q->backing_dev_info, + wb_congested = wb_congested_get_create(q->backing_dev_info, blkcg->css.id, GFP_NOWAIT | __GFP_NOWARN); if (!wb_congested) { @@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, const char *blkg_dev_name(struct blkcg_gq *blkg) { /* some drivers (floppy) instantiate a queue w/o disk registered */ - if (blkg->q->backing_dev_info.dev) - return dev_name(blkg->q->backing_dev_info.dev); + if (blkg->q->backing_dev_info->dev) + return dev_name(blkg->q->backing_dev_info->dev); return NULL; } EXPORT_SYMBOL_GPL(blkg_dev_name); diff --git a/block/blk-core.c b/block/blk-core.c index 3266daaa343f..dcac0352c14c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -75,7 +75,7 @@ static void blk_clear_congested(struct request_list *rl, int sync) * flip its congestion state for events on other blkcgs. */ if (rl == &rl->q->root_rl) - clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); + clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } @@ -86,7 +86,7 @@ static void blk_set_congested(struct request_list *rl, int sync) #else /* see blk_clear_congested() */ if (rl == &rl->q->root_rl) - set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); + set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } @@ -117,7 +117,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - return &q->backing_dev_info; + return q->backing_dev_info; } EXPORT_SYMBOL(blk_get_backing_dev_info); @@ -575,7 +575,7 @@ void blk_cleanup_queue(struct request_queue *q) blk_flush_integrity(); /* @q won't process any more request, flush async actions */ - del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); if (q->mq_ops) @@ -587,7 +587,7 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_unregister(&q->backing_dev_info); + bdi_unregister(q->backing_dev_info); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); @@ -728,17 +728,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q->bio_split) goto fail_id; - q->backing_dev_info.ra_pages = + q->backing_dev_info = &q->_backing_dev_info; + q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; - q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; - q->backing_dev_info.name = "block"; + q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; + q->backing_dev_info->name = "block"; q->node = node_id; - err = bdi_init(&q->backing_dev_info); + err = bdi_init(q->backing_dev_info); if (err) goto fail_split; - setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, + setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->queue_head); @@ -788,7 +789,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) fail_ref: percpu_ref_exit(&q->q_usage_counter); fail_bdi: - bdi_destroy(&q->backing_dev_info); + bdi_destroy(q->backing_dev_info); fail_split: bioset_free(q->bio_split); fail_id: @@ -1182,7 +1183,7 @@ fail_elvpriv: * disturb iosched and blkcg but weird is bettern than dead. */ printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", - __func__, dev_name(q->backing_dev_info.dev)); + __func__, dev_name(q->backing_dev_info->dev)); rq->rq_flags &= ~RQF_ELVPRIV; rq->elv.icq = NULL; @@ -2659,7 +2660,7 @@ void blk_finish_request(struct request *req, int error) BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) - laptop_io_completion(&req->q->backing_dev_info); + laptop_io_completion(req->q->backing_dev_info); blk_delete_timer(req); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index d69c5c79f98e..9f0ff5ba4f84 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk) return; if (bi->profile) - disk->queue->backing_dev_info.capabilities |= + disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else - disk->queue->backing_dev_info.capabilities &= + disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 529e55f52a03..6eb19bcbf3cb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -253,7 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); limits->max_sectors = max_sectors; - q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9); + q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(blk_queue_max_hw_sectors); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 894f77342fd4..05841be1f30f 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_ra_show(struct request_queue *q, char *page) { - unsigned long ra_kb = q->backing_dev_info.ra_pages << + unsigned long ra_kb = q->backing_dev_info->ra_pages << (PAGE_SHIFT - 10); return queue_var_show(ra_kb, (page)); @@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) if (ret < 0) return ret; - q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); + q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); return ret; } @@ -236,7 +236,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) spin_lock_irq(q->queue_lock); q->limits.max_sectors = max_sectors_kb << 1; - q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); + q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); spin_unlock_irq(q->queue_lock); return ret; @@ -799,7 +799,7 @@ static void blk_release_queue(struct kobject *kobj) container_of(kobj, struct request_queue, kobj); wbt_exit(q); - bdi_exit(&q->backing_dev_info); + bdi_exit(q->backing_dev_info); blkcg_exit_queue(q); if (q->elevator) { diff --git a/block/blk-wbt.c b/block/blk-wbt.c index f0a9c07b4c7a..1aedb1f7ee0c 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -96,7 +96,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var) */ static bool wb_recent_wait(struct rq_wb *rwb) { - struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb; + struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb; return time_before(jiffies, wb->dirty_sleep + HZ); } @@ -279,7 +279,7 @@ enum { static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) { - struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; + struct backing_dev_info *bdi = rwb->queue->backing_dev_info; u64 thislat; /* @@ -339,7 +339,7 @@ static int latency_exceeded(struct rq_wb *rwb) static void rwb_trace_step(struct rq_wb *rwb, const char *msg) { - struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; + struct backing_dev_info *bdi = rwb->queue->backing_dev_info; trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, rwb->wb_background, rwb->wb_normal, rwb->wb_max); @@ -423,7 +423,7 @@ static void wb_timer_fn(unsigned long data) status = latency_exceeded(rwb); - trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step, + trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step, inflight); /* diff --git a/block/genhd.c b/block/genhd.c index f2f22d0e8e14..d9ccd42f3675 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -613,7 +613,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk) disk_alloc_events(disk); /* Register BDI before referencing it from bdev */ - bdi = &disk->queue->backing_dev_info; + bdi = disk->queue->backing_dev_info; bdi_register_owner(bdi, disk_to_dev(disk)); blk_register_region(disk_devt(disk), disk->minors, NULL, diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index ec9d8610b25f..027b876370bc 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); - q->backing_dev_info.name = "aoe"; - q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; + q->backing_dev_info->name = "aoe"; + q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE; d->bufpool = mp; d->blkq = gd->queue = q; q->queuedata = d; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 83482721bc01..d305f05be648 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) if (get_ldev(device)) { q = bdev_get_queue(device->ldev->backing_bdev); - r = bdi_congested(&q->backing_dev_info, bdi_bits); + r = bdi_congested(q->backing_dev_info, bdi_bits); put_ldev(device); if (r) reason = 'b'; @@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* we have no partitions. we contain only ourselves. */ device->this_bdev->bd_contains = device->this_bdev; - q->backing_dev_info.congested_fn = drbd_congested; - q->backing_dev_info.congested_data = device; + q->backing_dev_info->congested_fn = drbd_congested; + q->backing_dev_info->congested_data = device; blk_queue_make_request(q, drbd_make_request); blk_queue_write_cache(q, true, true); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f35db29cac76..908c704e20aa 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi if (b) { blk_queue_stack_limits(q, b); - if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { + if (q->backing_dev_info->ra_pages != + b->backing_dev_info->ra_pages) { drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", - q->backing_dev_info.ra_pages, - b->backing_dev_info.ra_pages); - q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; + q->backing_dev_info->ra_pages, + b->backing_dev_info->ra_pages); + q->backing_dev_info->ra_pages = + b->backing_dev_info->ra_pages; } } fixup_discard_if_not_supported(q); @@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s, s->dev_disk_flags = md->flags; q = bdev_get_queue(device->ldev->backing_bdev); s->dev_lower_blocked = - bdi_congested(&q->backing_dev_info, + bdi_congested(q->backing_dev_info, (1 << WB_async_congested) | (1 << WB_sync_congested)); put_ldev(device); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index be2b93fd2c11..8378142f7a55 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { /* reset device->congestion_reason */ - bdi_rw_congested(&device->rq_queue->backing_dev_info); + bdi_rw_congested(device->rq_queue->backing_dev_info); nc = rcu_dereference(first_peer_device(device)->connection->net_conf); wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index de279fe4e4fd..cb6bdb75d52d 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se switch (rbm) { case RB_CONGESTED_REMOTE: - bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; + bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; return bdi_read_congested(bdi); case RB_LEAST_PENDING: return atomic_read(&device->local_cnt) > diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index f4bd95943dc2..66d846ba85a9 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1243,7 +1243,7 @@ try_next_bio: && pd->bio_queue_size <= pd->write_congestion_off); spin_unlock(&pd->lock); if (wakeup) { - clear_bdi_congested(&pd->disk->queue->backing_dev_info, + clear_bdi_congested(pd->disk->queue->backing_dev_info, BLK_RW_ASYNC); } @@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) spin_lock(&pd->lock); if (pd->write_congestion_on > 0 && pd->bio_queue_size >= pd->write_congestion_on) { - set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); + set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC); do { spin_unlock(&pd->lock); congestion_wait(BLK_RW_ASYNC, HZ); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4d78cf605b21..588721f30a22 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4526,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->limits.discard_zeroes_data = 1; if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) - q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; disk->queue = q; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 01035e718c1c..709c9cc34369 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits) struct request_queue *q = bdev_get_queue(dc->bdev); int ret = 0; - if (bdi_congested(&q->backing_dev_info, bits)) + if (bdi_congested(q->backing_dev_info, bits)) return 1; if (cached_dev_get(dc)) { @@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } cached_dev_put(dc); @@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc) struct gendisk *g = dc->disk.disk; g->queue->make_request_fn = cached_dev_make_request; - g->queue->backing_dev_info.congested_fn = cached_dev_congested; + g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; @@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d) struct gendisk *g = d->disk; g->queue->make_request_fn = flash_dev_make_request; - g->queue->backing_dev_info.congested_fn = flash_dev_congested; + g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3a19cbc8b230..85e3f21c2514 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; - q->backing_dev_info.congested_data = d; + q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; @@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) set_capacity(dc->disk.disk, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); - dc->disk.disk->queue->backing_dev_info.ra_pages = - max(dc->disk.disk->queue->backing_dev_info.ra_pages, - q->backing_dev_info.ra_pages); + dc->disk.disk->queue->backing_dev_info->ra_pages = + max(dc->disk.disk->queue->backing_dev_info->ra_pages, + q->backing_dev_info->ra_pages); bch_cached_dev_request_init(dc); bch_cached_dev_writeback_init(dc); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 5b9cf56de8ef..894bc14469c8 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws) static int is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index bf2b2676cb8a..9fab33b113c4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era) static int dev_is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0a427de23ed2..3ad16d9c9d5a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) char b[BDEVNAME_SIZE]; if (likely(q)) - r |= bdi_congested(&q->backing_dev_info, bdi_bits); + r |= bdi_congested(q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 110982db4b48..2b266a2b5035 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) return 1; q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static void requeue_bios(struct pool *pool) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ff4a29a97ad3..9e958bc94fed 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1313,7 +1313,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * With request-based DM we only need to check the * top-level queue for congestion. */ - r = md->queue->backing_dev_info.wb.state & bdi_bits; + r = md->queue->backing_dev_info->wb.state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) @@ -1396,7 +1396,7 @@ void dm_init_md_queue(struct mapped_device *md) * - must do so here (in alloc_dev callchain) before queue is used */ md->queue->queuedata = md; - md->queue->backing_dev_info.congested_data = md; + md->queue->backing_dev_info->congested_data = md; } void dm_init_normal_md_queue(struct mapped_device *md) @@ -1407,7 +1407,7 @@ void dm_init_normal_md_queue(struct mapped_device *md) /* * Initialize aspects of queue that aren't relevant for blk-mq */ - md->queue->backing_dev_info.congested_fn = dm_any_congested; + md->queue->backing_dev_info->congested_fn = dm_any_congested; blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 5975c9915684..f1c7bbac31a5 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits) for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; diff --git a/drivers/md/md.c b/drivers/md/md.c index 82821ee0d57f..ede3b2ad45db 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5341,8 +5341,8 @@ int md_run(struct mddev *mddev) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); - mddev->queue->backing_dev_info.congested_data = mddev; - mddev->queue->backing_dev_info.congested_fn = md_congested; + mddev->queue->backing_dev_info->congested_data = mddev; + mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -5699,7 +5699,7 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info.congested_fn = NULL; + mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index aa8c4e5c1ee2..d457afa672d5 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); /* Just like multipath_map, we just check the * first available device */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 848365d474f3..d6585239bff2 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits) for (i = 0; i < raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; } @@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev) */ int stripe = mddev->raid_disks * (mddev->chunk_sectors << 9) / PAGE_SIZE; - if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) - mddev->queue->backing_dev_info.ra_pages = 2* stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) + mddev->queue->backing_dev_info->ra_pages = 2* stripe; } dump_zones(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 67b0365d49b5..830ff2b20346 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits) * non-congested targets, it can be removed */ if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); else - ret &= bdi_congested(&q->backing_dev_info, bits); + ret &= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1920756828df..6bc5c2a85160 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); @@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev) * maybe... */ stripe /= conf->geo.near_copies; - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } if (md_integrity_register(mddev)) @@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf) int stripe = conf->geo.raid_disks * ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); stripe /= conf->geo.near_copies; - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36c13e4be9c9..c0312d3f516a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6264,10 +6264,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); conf->skip_copy = new; if (new) - mddev->queue->backing_dev_info.capabilities |= + mddev->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else - mddev->queue->backing_dev_info.capabilities &= + mddev->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; mddev_resume(mddev); } @@ -7086,8 +7086,8 @@ static int raid5_run(struct mddev *mddev) int data_disks = conf->previous_raid_disks - conf->max_degraded; int stripe = data_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); @@ -7696,8 +7696,8 @@ static void end_reshape(struct r5conf *conf) int data_disks = conf->raid_disks - conf->max_degraded; int stripe = data_disks * ((conf->chunk_sectors << 9) / PAGE_SIZE); - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } } } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index a34308df927f..c0e5b9a8bf5f 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1226,7 +1226,7 @@ static int set_gfs2_super(struct super_block *s, void *data) * We set the bdi here to the queue backing, file systems can * overwrite this in ->fill_super() */ - s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; + s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info; return 0; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 12eeae62a2b1..e1872f36147f 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1068,7 +1068,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_max_links = NILFS_LINK_MAX; - sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; + sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info; err = load_nilfs(nilfs, sb); if (err) diff --git a/fs/super.c b/fs/super.c index 1709ed029a2c..ea662b0e5e78 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1047,7 +1047,7 @@ static int set_bdev_super(struct super_block *s, void *data) * We set the bdi here to the queue backing, file systems can * overwrite this in ->fill_super() */ - s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; + s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info; return 0; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 11f7a8e86a89..a75e42de34ab 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -432,7 +432,8 @@ struct request_queue { */ struct delayed_work delay_work; - struct backing_dev_info backing_dev_info; + struct backing_dev_info *backing_dev_info; + struct backing_dev_info _backing_dev_info; /* * The queue owner gets to use this for whatever they like. diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 290e8b7d3181..216449825859 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1988,11 +1988,11 @@ void laptop_mode_timer_fn(unsigned long data) * We want to write everything out, not just down to the dirty * threshold */ - if (!bdi_has_dirty_io(&q->backing_dev_info)) + if (!bdi_has_dirty_io(q->backing_dev_info)) return; rcu_read_lock(); - list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) + list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node) if (wb_has_dirty_io(wb)) wb_start_writeback(wb, nr_pages, true, WB_REASON_LAPTOP_TIMER); -- cgit v1.2.3-58-ga151 From e980f62353c697cbf0c4325e43df6e44399aeb64 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 4 Feb 2017 10:45:03 +0100 Subject: dm: don't allow ioctls to targets that don't map to whole devices .. at least for unprivileged users. Before we called into the SCSI ioctl code to allow excemptions for a few SCSI passthrough ioctls, but this is pretty unsafe and except for this call dm knows nothing about SCSI ioctls. As the SCSI ioctl code is now optional, we really don't want to drag it in for DM, and the exception is not very useful anyway. Signed-off-by: Christoph Hellwig Acked-by: Mike Snitzer Acked-by: Paolo Bonzini Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/md/dm.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9e958bc94fed..5bd9ab06a562 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -465,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, if (r > 0) { /* - * Target determined this ioctl is being issued against - * a logical partition of the parent bdev; so extra - * validation is needed. + * Target determined this ioctl is being issued against a + * subset of the parent bdev; require extra privileges. */ - r = scsi_verify_blk_ioctl(NULL, cmd); - if (r) + if (!capable(CAP_SYS_RAWIO)) { + DMWARN_LIMIT( + "%s: sending ioctl %x to DM device without required privilege.", + current->comm, cmd); + r = -ENOIOCTLCMD; goto out; + } } r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); -- cgit v1.2.3-58-ga151