diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 56 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 6 | ||||
-rw-r--r-- | drivers/nvme/host/scsi.c | 7 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 2 |
6 files changed, 52 insertions, 25 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 138c6fa00cd5..44a1a257e0b5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -208,18 +208,18 @@ EXPORT_SYMBOL_GPL(nvme_requeue_req); struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, unsigned int flags, int qid) { + unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; struct request *req; if (qid == NVME_QID_ANY) { - req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); + req = blk_mq_alloc_request(q, op, flags); } else { - req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, + req = blk_mq_alloc_request_hctx(q, op, flags, qid ? qid - 1 : 0); } if (IS_ERR(req)) return req; - req->cmd_type = REQ_TYPE_DRV_PRIV; req->cmd_flags |= REQ_FAILFAST_DRIVER; nvme_req(req)->cmd = cmd; @@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns, static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { + unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; struct nvme_dsm_range *range; - unsigned int nr_bytes = blk_rq_bytes(req); + struct bio *bio; - range = kmalloc(sizeof(*range), GFP_ATOMIC); + range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); if (!range) return BLK_MQ_RQ_QUEUE_BUSY; - range->cattr = cpu_to_le32(0); - range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); - range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + __rq_for_each_bio(bio, req) { + u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + n++; + } + + if (WARN_ON_ONCE(n != segments)) { + kfree(range); + return BLK_MQ_RQ_QUEUE_ERROR; + } memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); - cmnd->dsm.nr = 0; + cmnd->dsm.nr = segments - 1; cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); - req->special_vec.bv_len = sizeof(*range); + req->special_vec.bv_len = sizeof(*range) * segments; req->rq_flags |= RQF_SPECIAL_PAYLOAD; return BLK_MQ_RQ_QUEUE_OK; @@ -309,17 +321,27 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, { int ret = BLK_MQ_RQ_QUEUE_OK; - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + switch (req_op(req)) { + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); - else if (req_op(req) == REQ_OP_FLUSH) + break; + case REQ_OP_FLUSH: nvme_setup_flush(ns, cmd); - else if (req_op(req) == REQ_OP_DISCARD) + break; + case REQ_OP_DISCARD: ret = nvme_setup_discard(ns, req, cmd); - else + break; + case REQ_OP_READ: + case REQ_OP_WRITE: nvme_setup_rw(ns, req, cmd); + break; + default: + WARN_ON_ONCE(1); + return BLK_MQ_RQ_QUEUE_ERROR; + } cmd->common.command_id = req->tag; - return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); @@ -868,6 +890,9 @@ static void nvme_config_discard(struct nvme_ns *ns) struct nvme_ctrl *ctrl = ns->ctrl; u32 logical_block_size = queue_logical_block_size(ns->queue); + BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < + NVME_DSM_MAX_RANGES); + if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) ns->queue->limits.discard_zeroes_data = 1; else @@ -876,6 +901,7 @@ static void nvme_config_discard(struct nvme_ns *ns) ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size; blk_queue_max_discard_sectors(ns->queue, UINT_MAX); + blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e65041c640cb..fb51a8de9b29 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1937,7 +1937,7 @@ nvme_fc_complete_rq(struct request *rq) return; } - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(rq)) error = rq->errors; else error = nvme_error_status(rq->errors); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d67d0d0a3bc0..ddc51adb594d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -589,7 +589,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, */ if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && - req->cmd_type != REQ_TYPE_DRV_PRIV) { + !blk_rq_is_passthrough(req)) { blk_mq_end_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } @@ -646,7 +646,7 @@ static void nvme_complete_rq(struct request *req) return; } - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(req)) error = req->errors; else error = nvme_error_status(req->errors); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 557f29b1f1bb..a75e95d42b3f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1423,7 +1423,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { struct nvme_command *cmd = nvme_req(rq)->cmd; - if (rq->cmd_type != REQ_TYPE_DRV_PRIV || + if (!blk_rq_is_passthrough(rq) || cmd->common.opcode != nvme_fabrics_command || cmd->fabrics.fctype != nvme_fabrics_type_connect) return false; @@ -1471,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); - if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) + if (req_op(rq) == REQ_OP_FLUSH) flush = true; ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); @@ -1522,7 +1522,7 @@ static void nvme_rdma_complete_rq(struct request *rq) return; } - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(rq)) error = rq->errors; else error = nvme_error_status(rq->errors); diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index a5c09e703bd8..f49ae2758bb7 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -43,6 +43,7 @@ #include <asm/unaligned.h> #include <scsi/sg.h> #include <scsi/scsi.h> +#include <scsi/scsi_request.h> #include "nvme.h" @@ -2347,12 +2348,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) { - u8 cmd[BLK_MAX_CDB]; + u8 cmd[16]; int retcode; unsigned int opcode; if (hdr->cmdp == NULL) return -EMSGSIZE; + if (hdr->cmd_len > sizeof(cmd)) + return -EINVAL; if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; @@ -2451,8 +2454,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) return -EFAULT; if (hdr.interface_id != 'S') return -EINVAL; - if (hdr.cmd_len > BLK_MAX_CDB) - return -EINVAL; /* * A positive return code means a NVMe status, which has been diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9aaa70071ae5..f3862e38f574 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req) return; } - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(req)) error = req->errors; else error = nvme_error_status(req->errors); |