diff options
author | David S. Miller <davem@davemloft.net> | 2016-08-18 01:17:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-08-18 01:17:32 -0400 |
commit | 60747ef4d173c2747bf7f0377fb22846cb422195 (patch) | |
tree | ea0faf33b952495c47909be1400c475a3f3821b0 /block | |
parent | 484334198f8ce9552e20930fff9408ebf6bcf94d (diff) | |
parent | 184ca823481c99dadd7d946e5afd4bb921eab30d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor overlapping changes for both merge conflicts.
Resolution work done by Stephen Rothwell was used
as a reference.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 13 | ||||
-rw-r--r-- | block/bio-integrity.c | 2 | ||||
-rw-r--r-- | block/bio.c | 21 | ||||
-rw-r--r-- | block/blk-core.c | 26 | ||||
-rw-r--r-- | block/blk-merge.c | 8 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 12 | ||||
-rw-r--r-- | block/blk-mq.c | 25 | ||||
-rw-r--r-- | block/blk-throttle.c | 13 | ||||
-rw-r--r-- | block/cfq-iosched.c | 4 | ||||
-rw-r--r-- | block/genhd.c | 3 |
10 files changed, 71 insertions, 56 deletions
diff --git a/block/Kconfig b/block/Kconfig index 0363cd731320..161491d0a879 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -88,19 +88,6 @@ config BLK_DEV_INTEGRITY T10/SCSI Data Integrity Field or the T13/ATA External Path Protection. If in doubt, say N. -config BLK_DEV_DAX - bool "Block device DAX support" - depends on FS_DAX - depends on BROKEN - help - When DAX support is available (CONFIG_FS_DAX) raw block - devices can also support direct userspace access to the - storage capacity via MMAP(2) similar to a file on a - DAX-enabled filesystem. However, the DAX I/O-path disables - some standard I/O-statistics, and the MMAP(2) path has some - operational differences due to bypassing the page - cache. If in doubt, say N. - config BLK_DEV_THROTTLING bool "Block layer bio throttling support" depends on BLK_CGROUP=y diff --git a/block/bio-integrity.c b/block/bio-integrity.c index f70cc3bdfd01..63f72f00c72e 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, bip->bip_bio = bio; bio->bi_integrity = bip; - bio->bi_rw |= REQ_INTEGRITY; + bio->bi_opf |= REQ_INTEGRITY; return bip; err: diff --git a/block/bio.c b/block/bio.c index 54ee3846c3a5..f39477538fef 100644 --- a/block/bio.c +++ b/block/bio.c @@ -580,9 +580,11 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) */ bio->bi_bdev = bio_src->bi_bdev; bio_set_flag(bio, BIO_CLONED); - bio->bi_rw = bio_src->bi_rw; + bio->bi_opf = bio_src->bi_opf; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; + + bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); @@ -661,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, if (!bio) return NULL; bio->bi_bdev = bio_src->bi_bdev; - bio->bi_rw = bio_src->bi_rw; + bio->bi_opf = bio_src->bi_opf; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; @@ -687,6 +689,8 @@ integrity_clone: } } + bio_clone_blkcg_association(bio, bio_src); + return bio; } EXPORT_SYMBOL(bio_clone_bioset); @@ -869,7 +873,7 @@ int submit_bio_wait(struct bio *bio) init_completion(&ret.event); bio->bi_private = &ret; bio->bi_end_io = submit_bio_wait_endio; - bio->bi_rw |= REQ_SYNC; + bio->bi_opf |= REQ_SYNC; submit_bio(bio); wait_for_completion_io(&ret.event); @@ -2004,6 +2008,17 @@ void bio_disassociate_task(struct bio *bio) } } +/** + * bio_clone_blkcg_association - clone blkcg association from src to dst bio + * @dst: destination bio + * @src: source bio + */ +void bio_clone_blkcg_association(struct bio *dst, struct bio *src) +{ + if (src->bi_css) + WARN_ON(bio_associate_blkcg(dst, src->bi_css)); +} + #endif /* CONFIG_BLK_CGROUP */ static void __init biovec_init_slabs(void) diff --git a/block/blk-core.c b/block/blk-core.c index a687e9cc16c2..999442ec4601 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) * Flush requests do not use the elevator so skip initialization. * This allows a request to share the flush and elevator data. */ - if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) + if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) return false; return true; @@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload); bool bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio) { - const int ff = bio->bi_rw & REQ_FAILFAST_MASK; + const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_back_merge_fn(q, req, bio)) return false; @@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, bool bio_attempt_front_merge(struct request_queue *q, struct request *req, struct bio *bio) { - const int ff = bio->bi_rw & REQ_FAILFAST_MASK; + const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_front_merge_fn(q, req, bio)) return false; @@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) { req->cmd_type = REQ_TYPE_FS; - req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; - if (bio->bi_rw & REQ_RAHEAD) + req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK; + if (bio->bi_opf & REQ_RAHEAD) req->cmd_flags |= REQ_FAILFAST_MASK; req->errors = 0; @@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) { - const bool sync = !!(bio->bi_rw & REQ_SYNC); + const bool sync = !!(bio->bi_opf & REQ_SYNC); struct blk_plug *plug; int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; struct request *req; @@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } - if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) { + if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; goto get_rq; @@ -1728,7 +1728,7 @@ get_rq: /* * Add in META/PRIO flags, if set, before we get to the IO scheduler */ - rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO)); + rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO)); /* * Grab a free request. This is might sleep but can not fail. @@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio) printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", bdevname(bio->bi_bdev, b), - bio->bi_rw, + bio->bi_opf, (unsigned long long)bio_end_sector(bio), (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); } @@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio) * drivers without flush support don't have to worry * about them. */ - if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && + if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { - bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA); + bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!nr_sectors) { err = 0; goto end_io; @@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) * one. */ for (bio = rq->bio; bio; bio = bio->bi_next) { - if ((bio->bi_rw & ff) != ff) + if ((bio->bi_opf & ff) != ff) break; bytes += bio->bi_iter.bi_size; } @@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) /* mixed attributes always follow the first bio */ if (req->cmd_flags & REQ_MIXED_MERGE) { req->cmd_flags &= ~REQ_FAILFAST_MASK; - req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; + req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; } /* diff --git a/block/blk-merge.c b/block/blk-merge.c index 41cbd4878958..3eec75a9e91d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, if (split) { /* there isn't chance to merge the splitted bio */ - split->bi_rw |= REQ_NOMERGE; + split->bi_opf |= REQ_NOMERGE; bio_chain(split, *bio); trace_block_split(q, split, (*bio)->bi_iter.bi_sector); @@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq) * Distributes the attributs to each bio. */ for (bio = rq->bio; bio; bio = bio->bi_next) { - WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && - (bio->bi_rw & REQ_FAILFAST_MASK) != ff); - bio->bi_rw |= ff; + WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && + (bio->bi_opf & REQ_FAILFAST_MASK) != ff); + bio->bi_opf |= ff; } rq->cmd_flags |= REQ_MIXED_MERGE; } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 4ea4dd8a1eed..fe822aa5b8e4 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -380,15 +380,13 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) return ret; } -void blk_mq_unregister_disk(struct gendisk *disk) +static void __blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; int i, j; - blk_mq_disable_hotplug(); - queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); @@ -405,6 +403,12 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&disk_to_dev(disk)->kobj); q->mq_sysfs_init_done = false; +} + +void blk_mq_unregister_disk(struct gendisk *disk) +{ + blk_mq_disable_hotplug(); + __blk_mq_unregister_disk(disk); blk_mq_enable_hotplug(); } @@ -450,7 +454,7 @@ int blk_mq_register_disk(struct gendisk *disk) } if (ret) - blk_mq_unregister_disk(disk); + __blk_mq_unregister_disk(disk); else q->mq_sysfs_init_done = true; out: diff --git a/block/blk-mq.c b/block/blk-mq.c index 576e7112f807..e931a0e8e73d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -672,7 +672,20 @@ static void blk_mq_timeout_work(struct work_struct *work) }; int i; - if (blk_queue_enter(q, true)) + /* A deadlock might occur if a request is stuck requiring a + * timeout at the same time a queue freeze is waiting + * completion, since the timeout code would not be able to + * acquire the queue reference here. + * + * That's why we don't use blk_queue_enter here; instead, we use + * percpu_ref_tryget directly, because we need to be able to + * obtain a reference even in the short window between the queue + * starting to freeze, by dropping the first reference in + * blk_mq_freeze_queue_start, and the moment the last request is + * consumed, marked by the instant q_usage_counter reaches + * zero. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) return; blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); @@ -1221,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rw_is_sync(bio_op(bio), bio->bi_rw)) + if (rw_is_sync(bio_op(bio), bio->bi_opf)) op_flags |= REQ_SYNC; trace_block_getrq(q, bio, op); @@ -1289,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) */ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { - const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); - const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); + const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); + const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); struct blk_map_ctx data; struct request *rq; unsigned int request_count = 0; @@ -1383,8 +1396,8 @@ done: */ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { - const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); - const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); + const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); + const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); struct blk_plug *plug; unsigned int request_count = 0; struct blk_map_ctx data; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 47a3e540631a..f1aba26f4719 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -145,11 +145,6 @@ struct throtl_data /* Total Number of queued bios on READ and WRITE lists */ unsigned int nr_queued[2]; - /* - * number of total undestroyed groups - */ - unsigned int nr_undestroyed_grps; - /* Work for dispatching throttled bios */ struct work_struct dispatch_work; }; @@ -826,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) * second time when it eventually gets issued. Set it when a bio * is being charged to a tg. */ - if (!(bio->bi_rw & REQ_THROTTLED)) - bio->bi_rw |= REQ_THROTTLED; + if (!(bio->bi_opf & REQ_THROTTLED)) + bio->bi_opf |= REQ_THROTTLED; } /** @@ -1404,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, WARN_ON_ONCE(!rcu_read_lock_held()); /* see throtl_charge_bio() */ - if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw]) + if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw]) goto out; spin_lock_irq(q->queue_lock); @@ -1483,7 +1478,7 @@ out: * being issued. */ if (!throttled) - bio->bi_rw &= ~REQ_THROTTLED; + bio->bi_opf &= ~REQ_THROTTLED; return throttled; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index acabba198de9..cc2f6dbd4303 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) */ static inline bool cfq_bio_sync(struct bio *bio) { - return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); + return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC); } /* @@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, static void cfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) { - cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw); + cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf); } static void diff --git a/block/genhd.c b/block/genhd.c index 3c9dede4e04f..fcd6d4fae657 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -614,7 +614,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk) /* Register BDI before referencing it from bdev */ bdi = &disk->queue->backing_dev_info; - bdi_register_dev(bdi, disk_devt(disk)); + bdi_register_owner(bdi, disk_to_dev(disk)); blk_register_region(disk_devt(disk), disk->minors, NULL, exact_match, exact_lock, disk); @@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) if (iter) { class_dev_iter_exit(iter); kfree(iter); + seqf->private = NULL; } } |