summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYu Kuai <yukuai3@huawei.com>2021-09-16 22:18:10 +0800
committerJens Axboe <axboe@kernel.dk>2021-10-18 14:50:37 -0600
commit8663b210f8c169a49aaeed3af92471a147545477 (patch)
treea7590af711bddea365b331c8fabd16c7c095465a
parent3fe1db626a56cdf259c348404f2c5429e2f065a1 (diff)
nbd: fix uaf in nbd_handle_reply()
There is a problem that nbd_handle_reply() might access freed request: 1) At first, a normal io is submitted and completed with scheduler: internel_tag = blk_mq_get_tag -> get tag from sched_tags blk_mq_rq_ctx_init sched_tags->rq[internel_tag] = sched_tag->static_rq[internel_tag] ... blk_mq_get_driver_tag __blk_mq_get_driver_tag -> get tag from tags tags->rq[tag] = sched_tag->static_rq[internel_tag] So, both tags->rq[tag] and sched_tags->rq[internel_tag] are pointing to the request: sched_tags->static_rq[internal_tag]. Even if the io is finished. 2) nbd server send a reply with random tag directly: recv_work nbd_handle_reply blk_mq_tag_to_rq(tags, tag) rq = tags->rq[tag] 3) if the sched_tags->static_rq is freed: blk_mq_sched_free_requests blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i) -> step 2) access rq before clearing rq mapping blk_mq_clear_rq_mapping(set, tags, hctx_idx); __free_pages() -> rq is freed here 4) Then, nbd continue to use the freed request in nbd_handle_reply Fix the problem by get 'q_usage_counter' before blk_mq_tag_to_rq(), thus request is ensured not to be freed because 'q_usage_counter' is not zero. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20210916141810.2325276-1-yukuai3@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/block/nbd.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 06e292f0b0ae..0d064fab6186 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -830,6 +830,7 @@ static void recv_work(struct work_struct *work)
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
+ struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
@@ -840,13 +841,28 @@ static void recv_work(struct work_struct *work)
if (nbd_read_reply(nbd, args->index, &reply))
break;
+ /*
+ * Grab .q_usage_counter so request pool won't go away, then no
+ * request use-after-free is possible during nbd_handle_reply().
+ * If queue is frozen, there won't be any inflight requests, we
+ * needn't to handle the incoming garbage message.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter)) {
+ dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
+ __func__);
+ break;
+ }
+
cmd = nbd_handle_reply(nbd, args->index, &reply);
- if (IS_ERR(cmd))
+ if (IS_ERR(cmd)) {
+ percpu_ref_put(&q->q_usage_counter);
break;
+ }
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
+ percpu_ref_put(&q->q_usage_counter);
}
nsock = config->socks[args->index];