diff options
author | Christoph Hellwig <hch@lst.de> | 2021-07-29 08:48:45 +0200 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-07-30 22:22:36 -0400 |
commit | 75ca56409e5b35aa6ceef94462f39ef4f533fc41 (patch) | |
tree | bb531f9b0f990e9bb763ae66577519f04ab733bf /block/bsg.c | |
parent | 1e61c1a804d2a2a3c46add01cac3a6e9eca01080 (diff) |
scsi: bsg: Move the whole request execution into the SCSI/transport handlers
Remove the amount of indirect calls by making the handler responsible for
the entire execution of the request.
Link: https://lore.kernel.org/r/20210729064845.1044147-5-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'block/bsg.c')
-rw-r--r-- | block/bsg.c | 66 |
1 files changed, 15 insertions, 51 deletions
diff --git a/block/bsg.c b/block/bsg.c index 3ba74eec4ba2..351095193788 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -22,12 +22,12 @@ struct bsg_device { struct request_queue *queue; - const struct bsg_ops *ops; struct device device; struct cdev cdev; int max_queue; unsigned int timeout; unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; }; static inline struct bsg_device *to_bsg_device(struct inode *inode) @@ -42,63 +42,28 @@ static DEFINE_IDA(bsg_minor_ida); static struct class *bsg_class; static int bsg_major; -#define uptr64(val) ((void __user *)(uintptr_t)(val)) +static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr) +{ + unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT; + + if (hdr->timeout) + timeout = msecs_to_jiffies(hdr->timeout); + else if (bd->timeout) + timeout = bd->timeout; + + return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT); +} static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg) { - struct request *rq; - struct bio *bio; struct sg_io_v4 hdr; int ret; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - if (hdr.guard != 'Q') return -EINVAL; - ret = bd->ops->check_proto(&hdr); - if (ret) - return ret; - - rq = blk_get_request(bd->queue, hdr.dout_xfer_len ? - REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - ret = bd->ops->fill_hdr(rq, &hdr, mode); - if (ret) { - blk_put_request(rq); - return ret; - } - - rq->timeout = msecs_to_jiffies(hdr.timeout); - if (!rq->timeout) - rq->timeout = bd->timeout; - if (!rq->timeout) - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - if (rq->timeout < BLK_MIN_SG_TIMEOUT) - rq->timeout = BLK_MIN_SG_TIMEOUT; - - if (hdr.dout_xfer_len) { - ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.dout_xferp), - hdr.dout_xfer_len, GFP_KERNEL); - } else if (hdr.din_xfer_len) { - ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.din_xferp), - hdr.din_xfer_len, GFP_KERNEL); - } - - if (ret) - goto out_free_rq; - - bio = rq->bio; - - blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); - ret = bd->ops->complete_rq(rq, &hdr); - blk_rq_unmap_user(bio); - -out_free_rq: - bd->ops->free_rq(rq); - blk_put_request(rq); + ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr)); if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; @@ -211,8 +176,7 @@ void bsg_unregister_queue(struct bsg_device *bd) EXPORT_SYMBOL_GPL(bsg_unregister_queue); struct bsg_device *bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - const struct bsg_ops *ops) + struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn) { struct bsg_device *bd; int ret; @@ -223,7 +187,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q, bd->max_queue = BSG_DEFAULT_CMDS; bd->reserved_size = INT_MAX; bd->queue = q; - bd->ops = ops; + bd->sg_io_fn = sg_io_fn; ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL); if (ret < 0) { |