diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-tag.c | 29 | ||||
-rw-r--r-- | block/blk-mq.c | 8 | ||||
-rw-r--r-- | block/blk-tag.c | 22 | ||||
-rw-r--r-- | block/bsg.c | 22 |
4 files changed, 17 insertions, 64 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 70356a2a11ab..09b2ee6694fb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); -int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, - int (fn)(void *, struct request *)) -{ - int i, j, ret = 0; - - if (WARN_ON_ONCE(!fn)) - goto out; - - for (i = 0; i < set->nr_hw_queues; i++) { - struct blk_mq_tags *tags = set->tags[i]; - - if (!tags) - continue; - - for (j = 0; j < tags->nr_tags; j++) { - if (!tags->static_rqs[j]) - continue; - - ret = fn(data, tags->static_rqs[j]); - if (ret) - goto out; - } - } - -out: - return ret; -} -EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); - void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { diff --git a/block/blk-mq.c b/block/blk-mq.c index e9da5e6a8526..70c65bb6c013 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq) if (blk_mq_request_started(rq)) { WRITE_ONCE(rq->state, MQ_RQ_IDLE); + rq->rq_flags &= ~RQF_TIMED_OUT; if (q->dma_drain_size && blk_rq_bytes(rq)) rq->nr_phys_segments--; } @@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq); static void blk_mq_rq_timed_out(struct request *req, bool reserved) { + req->rq_flags |= RQF_TIMED_OUT; if (req->q->mq_ops->timeout) { enum blk_eh_timer_return ret; @@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); } + req->rq_flags &= ~RQF_TIMED_OUT; blk_add_timer(req); } @@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) return false; + if (rq->rq_flags & RQF_TIMED_OUT) + return false; deadline = blk_rq_deadline(rq); if (time_after_eq(jiffies, deadline)) @@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) mutex_lock(&set->tag_list_lock); list_del_rcu(&q->tag_set_list); - INIT_LIST_HEAD(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_SHARED; @@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_depth(set, false); } mutex_unlock(&set->tag_list_lock); - synchronize_rcu(); + INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, diff --git a/block/blk-tag.c b/block/blk-tag.c index 24b20d86bcbc..fbc153aef166 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, */ q->queue_tags = tags; queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); - INIT_LIST_HEAD(&q->tag_busy_list); return 0; } EXPORT_SYMBOL(blk_queue_init_tags); @@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->tag = tag; bqt->tag_index[tag] = rq; blk_start_request(rq); - list_add(&rq->queuelist, &q->tag_busy_list); return 0; } EXPORT_SYMBOL(blk_queue_start_tag); - -/** - * blk_queue_invalidate_tags - invalidate all pending tags - * @q: the request queue for the device - * - * Description: - * Hardware conditions may dictate a need to stop all pending requests. - * In this case, we will safely clear the block side of the tag queue and - * readd all requests to the request queue in the right order. - **/ -void blk_queue_invalidate_tags(struct request_queue *q) -{ - struct list_head *tmp, *n; - - lockdep_assert_held(q->queue_lock); - - list_for_each_safe(tmp, n, &q->tag_busy_list) - blk_requeue_request(q, list_entry_rq(tmp)); -} -EXPORT_SYMBOL(blk_queue_invalidate_tags); diff --git a/block/bsg.c b/block/bsg.c index 132e657e2d91..66602c489956 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct bsg_device *bd; unsigned char buf[32]; + lockdep_assert_held(&bsg_mutex); + if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); @@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode, bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); - mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); bsg_dbg(bd, "bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); - mutex_unlock(&bsg_mutex); return bd; } @@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; - mutex_lock(&bsg_mutex); + lockdep_assert_held(&bsg_mutex); hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { @@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) } bd = NULL; found: - mutex_unlock(&bsg_mutex); return bd; } @@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); - mutex_unlock(&bsg_mutex); - if (!bcd) - return ERR_PTR(-ENODEV); + if (!bcd) { + bd = ERR_PTR(-ENODEV); + goto out_unlock; + } bd = __bsg_get_device(iminor(inode), bcd->queue); - if (bd) - return bd; - - bd = bsg_add_device(inode, bcd->queue, file); + if (!bd) + bd = bsg_add_device(inode, bcd->queue, file); +out_unlock: + mutex_unlock(&bsg_mutex); return bd; } |