diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-24 11:53:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-24 11:53:15 -0700 |
commit | c41c3ec4a2bcd3f24ab753bb337ec342f24bdf94 (patch) | |
tree | 4fe1f5576883ab02e118ad0f1502c0df7ffafec1 /block | |
parent | 2bf74771ca5610b10c3ac4cd17aacc389e6927ca (diff) | |
parent | 2d62e6b038e729c3e4bfbfcfbd44800ef0883680 (diff) |
Merge tag 'io_uring-5.9-2020-08-23' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- NVMe pull request from Sagi:
- nvme completion rework from Christoph and Chao that mostly came
from a bit of divergence of how we classify errors related to
pathing/retry etc.
- nvmet passthru fixes from Chaitanya
- minor nvmet fixes from Amit and I
- mpath round-robin path selection fix from Martin
- ignore noiob for zoned devices from Keith
- minor nvme-fc fix from Tianjia"
- BFQ cgroup leak fix (Dmitry)
- block layer MAINTAINERS addition (Geert)
- fix null_blk FUA checking (Hou)
- get_max_io_size() size fix (Keith)
- fix block page_is_mergeable() for compound pages (Matthew)
- discard granularity fixes (Ming)
- IO scheduler ordering fix (Ming)
- misc fixes
* tag 'io_uring-5.9-2020-08-23' of git://git.kernel.dk/linux-block: (31 commits)
null_blk: fix passing of REQ_FUA flag in null_handle_rq
nvmet: Disable keep-alive timer when kato is cleared to 0h
nvme: redirect commands on dying queue
nvme: just check the status code type in nvme_is_path_error
nvme: refactor command completion
nvme: rename and document nvme_end_request
nvme: skip noiob for zoned devices
nvme-pci: fix PRP pool size
nvme-pci: Use u32 for nvme_dev.q_depth and nvme_queue.q_depth
nvme: Use spin_lock_irq() when taking the ctrl->lock
nvmet: call blk_mq_free_request() directly
nvmet: fix oops in pt cmd execution
nvmet: add ns tear down label for pt-cmd handling
nvme: multipath: round-robin: eliminate "fallback" variable
nvme: multipath: round-robin: fix single non-optimized path case
nvme-fc: Fix wrong return value in __nvme_fc_init_request()
nvmet-passthru: Reject commands with non-sgl flags set
nvmet: fix a memory leak
blkcg: fix memleak for iolatency
MAINTAINERS: Add missing header files to BLOCK LAYER section
...
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-cgroup.c | 2 | ||||
-rw-r--r-- | block/bfq-iosched.h | 1 | ||||
-rw-r--r-- | block/bfq-wf2q.c | 12 | ||||
-rw-r--r-- | block/bio.c | 10 | ||||
-rw-r--r-- | block/blk-cgroup.c | 8 | ||||
-rw-r--r-- | block/blk-merge.c | 13 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 9 | ||||
-rw-r--r-- | block/blk-mq.c | 13 | ||||
-rw-r--r-- | block/bsg-lib.c | 2 |
9 files changed, 45 insertions, 25 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 68882b9b8f11..b791e2041e49 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -void bfqg_and_blkg_get(struct bfq_group *bfqg) +static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index cd224aaf9f52..703895224562 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -986,7 +986,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index eb0e2a6daabe..26776bdbdf36 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -533,9 +533,7 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } else - bfqg_and_blkg_get(container_of(entity, struct bfq_group, - entity)); + } } /** @@ -649,14 +647,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st_or_in_serv = false; st->wsum -= entity->weight; - if (is_in_service) - return; - - if (bfqq) + if (bfqq && !is_in_service) bfq_put_queue(bfqq); - else - bfqg_and_blkg_put(container_of(entity, struct bfq_group, - entity)); } /** diff --git a/block/bio.c b/block/bio.c index c63ba04bd629..a9931f23d933 100644 --- a/block/bio.c +++ b/block/bio.c @@ -740,8 +740,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + - bv->bv_offset + bv->bv_len - 1; + size_t bv_end = bv->bv_offset + bv->bv_len; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) @@ -750,9 +750,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) - return false; - return true; + if (*same_page) + return true; + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } /* diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 619a79b51068..c195365c9817 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1152,13 +1152,15 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); + ret = blk_throtl_init(q); if (ret) goto err_destroy_all; - ret = blk_throtl_init(q); - if (ret) + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); goto err_destroy_all; + } return 0; err_destroy_all: diff --git a/block/blk-merge.c b/block/blk-merge.c index 6529e3aab001..f685d633bcc9 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, if (max_sectors > start_offset) return max_sectors - start_offset; - return sectors & (lbs - 1); + return sectors & ~(lbs - 1); } static inline unsigned get_max_segment_size(const struct request_queue *q, @@ -533,10 +533,17 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(__blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(req->q, req, bio) == false) @@ -624,7 +631,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index a19cdf159b75..d2790e5b06d1 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -78,6 +78,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 0015a1892153..b3d2785eefe9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1438,6 +1438,15 @@ out: spin_unlock(&hctx->lock); /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another * thread and hence that a queue rerun is needed. @@ -1834,6 +1843,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. + * @at_head: true if the request should be inserted at the head of the list. * @run_queue: If we should run the hardware queue after inserting the request. * * Should only be used carefully, when the caller knows we want to @@ -2016,7 +2026,8 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, false, run_queue); + blk_mq_sched_insert_request(rq, false, run_queue, false); + return BLK_STS_OK; } diff --git a/block/bsg-lib.c b/block/bsg-lib.c index fb7b347f8010..d185396d88bb 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -378,7 +378,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, bset->timeout_fn = timeout; set = &bset->tag_set; - set->ops = &bsg_mq_ops, + set->ops = &bsg_mq_ops; set->nr_hw_queues = 1; set->queue_depth = 128; set->numa_node = NUMA_NO_NODE; |