diff options
author | Christoph Hellwig <hch@lst.de> | 2016-03-02 18:07:11 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-03-03 14:42:50 -0700 |
commit | da35825d9a091a7a1d5824c8468168e2658333ff (patch) | |
tree | a79f4c7eaa2d392a6f09f93f691eeea7ba1e479e /drivers/nvme | |
parent | a1a0e23e49037c23ea84bc8cc146a03584d13577 (diff) |
nvme: set queue limits for the admin queue
Factor out a helper to set all the device specific queue limits and apply
them to the admin queue in addition to the I/O queues. Without this the
command size on the admin queue is arbitrarily low, and the missing
other limitations are just minefields waiting for victims.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reported-by: Jeff Lien <Jeff.Lien@hgst.com>
Tested-by: Jeff Lien <Jeff.Lien@hgst.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 470d4f373841..cfee6ac399a3 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -840,6 +840,21 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) return ret; } +static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, + struct request_queue *q) +{ + if (ctrl->max_hw_sectors) { + blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); + blk_queue_max_segments(q, + (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1); + } + if (ctrl->stripe_size) + blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); + if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); + blk_queue_virt_boundary(q, ctrl->page_size - 1); +} + /* * Initialize the cached copies of the Identify data and various controller * register in our nvme_ctrl structure. This should be called as soon as @@ -897,6 +912,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) } } + nvme_set_queue_limits(ctrl, ctrl->admin_q); + kfree(id); return 0; } @@ -1147,17 +1164,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->disk = disk; ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); - if (ctrl->max_hw_sectors) { - blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors); - blk_queue_max_segments(ns->queue, - (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1); - } - if (ctrl->stripe_size) - blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9); - if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) - blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); - blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1); + nvme_set_queue_limits(ctrl, ns->queue); disk->major = nvme_major; disk->first_minor = 0; |