diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 331 |
1 files changed, 224 insertions, 107 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index a7421b772d0e..af393d5a9680 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -129,11 +129,70 @@ void blk_rq_init(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL(blk_rq_init); +static const struct { + int errno; + const char *name; +} blk_errors[] = { + [BLK_STS_OK] = { 0, "" }, + [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, + [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, + [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, + [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, + [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, + [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, + [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, + [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, + [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, + [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, + + /* device mapper special case, should not leak out: */ + [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, + + /* everything else not covered above: */ + [BLK_STS_IOERR] = { -EIO, "I/O" }, +}; + +blk_status_t errno_to_blk_status(int errno) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { + if (blk_errors[i].errno == errno) + return (__force blk_status_t)i; + } + + return BLK_STS_IOERR; +} +EXPORT_SYMBOL_GPL(errno_to_blk_status); + +int blk_status_to_errno(blk_status_t status) +{ + int idx = (__force int)status; + + if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) + return -EIO; + return blk_errors[idx].errno; +} +EXPORT_SYMBOL_GPL(blk_status_to_errno); + +static void print_req_error(struct request *req, blk_status_t status) +{ + int idx = (__force int)status; + + if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) + return; + + printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", + __func__, blk_errors[idx].name, req->rq_disk ? + req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); +} + static void req_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, int error) + unsigned int nbytes, blk_status_t error) { if (error) - bio->bi_error = error; + bio->bi_status = error; if (unlikely(rq->rq_flags & RQF_QUIET)) bio_set_flag(bio, BIO_QUIET); @@ -177,10 +236,13 @@ static void blk_delay_work(struct work_struct *work) * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is - * restarted around the specified time. Queue lock must be held. + * restarted around the specified time. */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + if (likely(!blk_queue_dead(q))) queue_delayed_work(kblockd_workqueue, &q->delay_work, msecs_to_jiffies(msecs)); @@ -198,6 +260,9 @@ EXPORT_SYMBOL(blk_delay_queue); **/ void blk_start_queue_async(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + queue_flag_clear(QUEUE_FLAG_STOPPED, q); blk_run_queue_async(q); } @@ -210,11 +275,13 @@ EXPORT_SYMBOL(blk_start_queue_async); * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when - * entered. Also see blk_stop_queue(). Queue lock must be held. + * entered. Also see blk_stop_queue(). **/ void blk_start_queue(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); WARN_ON(!irqs_disabled()); + WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); @@ -233,10 +300,13 @@ EXPORT_SYMBOL(blk_start_queue); * or if it simply chooses not to queue more I/O at one point, it can * call this function to prevent the request_fn from being called until * the driver has signalled it's ready to go again. This happens by calling - * blk_start_queue() to restart queue operations. Queue lock must be held. + * blk_start_queue() to restart queue operations. **/ void blk_stop_queue(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } @@ -289,6 +359,9 @@ EXPORT_SYMBOL(blk_sync_queue); */ inline void __blk_run_queue_uncond(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + if (unlikely(blk_queue_dead(q))) return; @@ -310,11 +383,13 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); * @q: The queue to run * * Description: - * See @blk_run_queue. This variant must be called with the queue lock - * held and interrupts disabled. + * See @blk_run_queue. */ void __blk_run_queue(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + if (unlikely(blk_queue_stopped(q))) return; @@ -328,10 +403,18 @@ EXPORT_SYMBOL(__blk_run_queue); * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf - * of us. The caller must hold the queue lock. + * of us. + * + * Note: + * Since it is not allowed to run q->delay_work after blk_cleanup_queue() + * has canceled q->delay_work, callers must hold the queue lock to avoid + * race conditions between blk_cleanup_queue() and blk_run_queue_async(). */ void blk_run_queue_async(struct request_queue *q) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); } @@ -349,6 +432,8 @@ void blk_run_queue(struct request_queue *q) { unsigned long flags; + WARN_ON_ONCE(q->mq_ops); + spin_lock_irqsave(q->queue_lock, flags); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); @@ -377,6 +462,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) int i; lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); while (true) { bool drain = false; @@ -455,6 +541,8 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) */ void blk_queue_bypass_start(struct request_queue *q) { + WARN_ON_ONCE(q->mq_ops); + spin_lock_irq(q->queue_lock); q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); @@ -481,6 +569,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_start); * @q: queue of interest * * Leave bypass mode and restore the normal queueing behavior. + * + * Note: although blk_queue_bypass_start() is only called for blk-sq queues, + * this function is called for both blk-sq and blk-mq queues. */ void blk_queue_bypass_end(struct request_queue *q) { @@ -732,7 +823,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (q->id < 0) goto fail_q; - q->bio_split = bioset_create(BIO_POOL_SIZE, 0); + q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (!q->bio_split) goto fail_id; @@ -878,6 +969,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); int blk_init_allocated_queue(struct request_queue *q) { + WARN_ON_ONCE(q->mq_ops); + q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); if (!q->fq) return -ENOMEM; @@ -1015,6 +1108,8 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr) struct request_list *rl; int on_thresh, off_thresh; + WARN_ON_ONCE(q->mq_ops); + spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); @@ -1077,6 +1172,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, int may_queue; req_flags_t rq_flags = RQF_ALLOCED; + lockdep_assert_held(q->queue_lock); + if (unlikely(blk_queue_dying(q))) return ERR_PTR(-ENODEV); @@ -1250,12 +1347,20 @@ static struct request *get_request(struct request_queue *q, unsigned int op, struct request_list *rl; struct request *rq; + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ retry: rq = __get_request(rl, op, bio, gfp_mask); if (!IS_ERR(rq)) return rq; + if (op & REQ_NOWAIT) { + blk_put_rl(rl); + return ERR_PTR(-EAGAIN); + } + if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { blk_put_rl(rl); return rq; @@ -1283,16 +1388,18 @@ retry: goto retry; } -static struct request *blk_old_get_request(struct request_queue *q, int rw, - gfp_t gfp_mask) +static struct request *blk_old_get_request(struct request_queue *q, + unsigned int op, gfp_t gfp_mask) { struct request *rq; + WARN_ON_ONCE(q->mq_ops); + /* create ioc upfront */ create_io_context(gfp_mask, q->node); spin_lock_irq(q->queue_lock); - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, op, NULL, gfp_mask); if (IS_ERR(rq)) { spin_unlock_irq(q->queue_lock); return rq; @@ -1305,14 +1412,24 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, return rq; } -struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) +struct request *blk_get_request(struct request_queue *q, unsigned int op, + gfp_t gfp_mask) { - if (q->mq_ops) - return blk_mq_alloc_request(q, rw, + struct request *req; + + if (q->mq_ops) { + req = blk_mq_alloc_request(q, op, (gfp_mask & __GFP_DIRECT_RECLAIM) ? 0 : BLK_MQ_REQ_NOWAIT); - else - return blk_old_get_request(q, rw, gfp_mask); + if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) + q->mq_ops->initialize_rq_fn(req); + } else { + req = blk_old_get_request(q, op, gfp_mask); + if (!IS_ERR(req) && q->initialize_rq_fn) + q->initialize_rq_fn(req); + } + + return req; } EXPORT_SYMBOL(blk_get_request); @@ -1328,6 +1445,9 @@ EXPORT_SYMBOL(blk_get_request); */ void blk_requeue_request(struct request_queue *q, struct request *rq) { + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + blk_delete_timer(rq); blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); @@ -1402,9 +1522,6 @@ static void blk_pm_put_request(struct request *rq) static inline void blk_pm_put_request(struct request *rq) {} #endif -/* - * queue lock must be held - */ void __blk_put_request(struct request_queue *q, struct request *req) { req_flags_t rq_flags = req->rq_flags; @@ -1417,6 +1534,8 @@ void __blk_put_request(struct request_queue *q, struct request *req) return; } + lockdep_assert_held(q->queue_lock); + blk_pm_put_request(req); elv_completed_request(q, req); @@ -1646,6 +1765,7 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) req->ioprio = ioc->ioprio; else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + req->write_hint = bio->bi_write_hint; blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); @@ -1665,10 +1785,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) */ blk_queue_bounce(q, &bio); - blk_queue_split(q, &bio, q->bio_split); + blk_queue_split(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; } @@ -1726,7 +1846,10 @@ get_rq: req = get_request(q, bio->bi_opf, bio, GFP_NOIO); if (IS_ERR(req)) { __wbt_done(q->rq_wb, wb_acct); - bio->bi_error = PTR_ERR(req); + if (PTR_ERR(req) == -ENOMEM) + bio->bi_status = BLK_STS_RESOURCE; + else + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); goto out_unlock; } @@ -1881,7 +2004,7 @@ generic_make_request_checks(struct bio *bio) { struct request_queue *q; int nr_sectors = bio_sectors(bio); - int err = -EIO; + blk_status_t status = BLK_STS_IOERR; char b[BDEVNAME_SIZE]; struct hd_struct *part; @@ -1900,6 +2023,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * For a REQ_NOWAIT based request, return -EOPNOTSUPP + * if queue is not a request based queue. + */ + + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) + goto not_supported; + part = bio->bi_bdev->bd_part; if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, @@ -1924,7 +2055,7 @@ generic_make_request_checks(struct bio *bio) !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!nr_sectors) { - err = 0; + status = BLK_STS_OK; goto end_io; } } @@ -1976,9 +2107,9 @@ generic_make_request_checks(struct bio *bio) return true; not_supported: - err = -EOPNOTSUPP; + status = BLK_STS_NOTSUPP; end_io: - bio->bi_error = err; + bio->bi_status = status; bio_endio(bio); return false; } @@ -2057,7 +2188,7 @@ blk_qc_t generic_make_request(struct bio *bio) do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); - if (likely(blk_queue_enter(q, false) == 0)) { + if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { struct bio_list lower, same; /* Create a fresh bio_list for all subordinate requests */ @@ -2082,7 +2213,11 @@ blk_qc_t generic_make_request(struct bio *bio) bio_list_merge(&bio_list_on_stack[0], &same); bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } else { - bio_io_error(bio); + if (unlikely(!blk_queue_dying(q) && + (bio->bi_opf & REQ_NOWAIT))) + bio_wouldblock_error(bio); + else + bio_io_error(bio); } bio = bio_list_pop(&bio_list_on_stack[0]); } while (bio); @@ -2183,29 +2318,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, * @q: the queue to submit the request * @rq: the request being queued */ -int blk_insert_cloned_request(struct request_queue *q, struct request *rq) +blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) { unsigned long flags; int where = ELEVATOR_INSERT_BACK; if (blk_cloned_rq_check_limits(q, rq)) - return -EIO; + return BLK_STS_IOERR; if (rq->rq_disk && should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) - return -EIO; + return BLK_STS_IOERR; if (q->mq_ops) { if (blk_queue_io_stat(q)) blk_account_io_start(rq, true); blk_mq_sched_insert_request(rq, false, true, false, false); - return 0; + return BLK_STS_OK; } spin_lock_irqsave(q->queue_lock, flags); if (unlikely(blk_queue_dying(q))) { spin_unlock_irqrestore(q->queue_lock, flags); - return -ENODEV; + return BLK_STS_IOERR; } /* @@ -2222,7 +2357,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - return 0; + return BLK_STS_OK; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); @@ -2238,9 +2373,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request); * * Return: * The number of bytes to fail. - * - * Context: - * queue_lock must be held. */ unsigned int blk_rq_err_bytes(const struct request *rq) { @@ -2380,15 +2512,15 @@ void blk_account_io_start(struct request *rq, bool new_io) * Return: * Pointer to the request at the top of @q if available. Null * otherwise. - * - * Context: - * queue_lock must be held. */ struct request *blk_peek_request(struct request_queue *q) { struct request *rq; int ret; + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + while ((rq = __elv_next_request(q)) != NULL) { rq = blk_pm_peek_request(q, rq); @@ -2456,15 +2588,14 @@ struct request *blk_peek_request(struct request_queue *q) rq = NULL; break; } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { - int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; - rq->rq_flags |= RQF_QUIET; /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); - __blk_end_request_all(rq, err); + __blk_end_request_all(rq, ret == BLKPREP_INVALID ? + BLK_STS_TARGET : BLK_STS_IOERR); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; @@ -2505,12 +2636,12 @@ void blk_dequeue_request(struct request *rq) * * Block internal functions which don't want to start timer should * call blk_dequeue_request(). - * - * Context: - * queue_lock must be held. */ void blk_start_request(struct request *req) { + lockdep_assert_held(req->q->queue_lock); + WARN_ON_ONCE(req->q->mq_ops); + blk_dequeue_request(req); if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { @@ -2535,14 +2666,14 @@ EXPORT_SYMBOL(blk_start_request); * Return: * Pointer to the request at the top of @q if available. Null * otherwise. - * - * Context: - * queue_lock must be held. */ struct request *blk_fetch_request(struct request_queue *q) { struct request *rq; + lockdep_assert_held(q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + rq = blk_peek_request(q); if (rq) blk_start_request(rq); @@ -2553,7 +2684,7 @@ EXPORT_SYMBOL(blk_fetch_request); /** * blk_update_request - Special helper function for request stacking drivers * @req: the request being processed - * @error: %0 for success, < %0 for error + * @error: block status code * @nr_bytes: number of bytes to complete @req * * Description: @@ -2572,49 +2703,19 @@ EXPORT_SYMBOL(blk_fetch_request); * %false - this request doesn't have any more data * %true - this request has more data **/ -bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) +bool blk_update_request(struct request *req, blk_status_t error, + unsigned int nr_bytes) { int total_bytes; - trace_block_rq_complete(req, error, nr_bytes); + trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); if (!req->bio) return false; - if (error && !blk_rq_is_passthrough(req) && - !(req->rq_flags & RQF_QUIET)) { - char *error_type; - - switch (error) { - case -ENOLINK: - error_type = "recoverable transport"; - break; - case -EREMOTEIO: - error_type = "critical target"; - break; - case -EBADE: - error_type = "critical nexus"; - break; - case -ETIMEDOUT: - error_type = "timeout"; - break; - case -ENOSPC: - error_type = "critical space allocation"; - break; - case -ENODATA: - error_type = "critical medium"; - break; - case -EIO: - default: - error_type = "I/O"; - break; - } - printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", - __func__, error_type, req->rq_disk ? - req->rq_disk->disk_name : "?", - (unsigned long long)blk_rq_pos(req)); - - } + if (unlikely(error && !blk_rq_is_passthrough(req) && + !(req->rq_flags & RQF_QUIET))) + print_req_error(req, error); blk_account_io_completion(req, nr_bytes); @@ -2680,7 +2781,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) } EXPORT_SYMBOL_GPL(blk_update_request); -static bool blk_update_bidi_request(struct request *rq, int error, +static bool blk_update_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { @@ -2718,13 +2819,13 @@ void blk_unprep_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_unprep_request); -/* - * queue lock must be held - */ -void blk_finish_request(struct request *req, int error) +void blk_finish_request(struct request *req, blk_status_t error) { struct request_queue *q = req->q; + lockdep_assert_held(req->q->queue_lock); + WARN_ON_ONCE(q->mq_ops); + if (req->rq_flags & RQF_STATS) blk_stat_add(req); @@ -2758,7 +2859,7 @@ EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request * @rq: the request to complete - * @error: %0 for success, < %0 for error + * @error: block status code * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * @@ -2772,12 +2873,14 @@ EXPORT_SYMBOL(blk_finish_request); * %false - we are done with this request * %true - still buffers pending for this request **/ -static bool blk_end_bidi_request(struct request *rq, int error, +static bool blk_end_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags; + WARN_ON_ONCE(q->mq_ops); + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; @@ -2791,7 +2894,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, /** * __blk_end_bidi_request - Complete a bidi request with queue lock held * @rq: the request to complete - * @error: %0 for success, < %0 for error + * @error: block status code * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * @@ -2803,9 +2906,12 @@ static bool blk_end_bidi_request(struct request *rq, int error, * %false - we are done with this request * %true - still buffers pending for this request **/ -static bool __blk_end_bidi_request(struct request *rq, int error, +static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { + lockdep_assert_held(rq->q->queue_lock); + WARN_ON_ONCE(rq->q->mq_ops); + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; @@ -2817,7 +2923,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error, /** * blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed - * @error: %0 for success, < %0 for error + * @error: block status code * @nr_bytes: number of bytes to complete * * Description: @@ -2828,8 +2934,10 @@ static bool __blk_end_bidi_request(struct request *rq, int error, * %false - we are done with this request * %true - still buffers pending for this request **/ -bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +bool blk_end_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes) { + WARN_ON_ONCE(rq->q->mq_ops); return blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(blk_end_request); @@ -2837,12 +2945,12 @@ EXPORT_SYMBOL(blk_end_request); /** * blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish - * @error: %0 for success, < %0 for error + * @error: block status code * * Description: * Completely finish @rq. */ -void blk_end_request_all(struct request *rq, int error) +void blk_end_request_all(struct request *rq, blk_status_t error) { bool pending; unsigned int bidi_bytes = 0; @@ -2858,7 +2966,7 @@ EXPORT_SYMBOL(blk_end_request_all); /** * __blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed - * @error: %0 for success, < %0 for error + * @error: block status code * @nr_bytes: number of bytes to complete * * Description: @@ -2868,8 +2976,12 @@ EXPORT_SYMBOL(blk_end_request_all); * %false - we are done with this request * %true - still buffers pending for this request **/ -bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +bool __blk_end_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes) { + lockdep_assert_held(rq->q->queue_lock); + WARN_ON_ONCE(rq->q->mq_ops); + return __blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(__blk_end_request); @@ -2877,16 +2989,19 @@ EXPORT_SYMBOL(__blk_end_request); /** * __blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish - * @error: %0 for success, < %0 for error + * @error: block status code * * Description: * Completely finish @rq. Must be called with queue lock held. */ -void __blk_end_request_all(struct request *rq, int error) +void __blk_end_request_all(struct request *rq, blk_status_t error) { bool pending; unsigned int bidi_bytes = 0; + lockdep_assert_held(rq->q->queue_lock); + WARN_ON_ONCE(rq->q->mq_ops); + if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); @@ -2898,7 +3013,7 @@ EXPORT_SYMBOL(__blk_end_request_all); /** * __blk_end_request_cur - Helper function to finish the current request chunk. * @rq: the request to finish the current chunk for - * @error: %0 for success, < %0 for error + * @error: block status code * * Description: * Complete the current consecutively mapped chunk from @rq. Must @@ -2908,7 +3023,7 @@ EXPORT_SYMBOL(__blk_end_request_all); * %false - we are done with this request * %true - still buffers pending for this request */ -bool __blk_end_request_cur(struct request *rq, int error) +bool __blk_end_request_cur(struct request *rq, blk_status_t error) { return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } @@ -3151,6 +3266,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, bool from_schedule) __releases(q->queue_lock) { + lockdep_assert_held(q->queue_lock); + trace_block_unplug(q, depth, !from_schedule); if (from_schedule) @@ -3249,7 +3366,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) * Short-circuit if @q is dead */ if (unlikely(blk_queue_dying(q))) { - __blk_end_request_all(rq, -ENODEV); + __blk_end_request_all(rq, BLK_STS_IOERR); continue; } |