diff options
author | Christoph Hellwig <hch@lst.de> | 2022-01-24 10:11:04 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-02-02 07:49:59 -0700 |
commit | b77c88c2100ce6a5ec8126c13599b5a7f6663e32 (patch) | |
tree | d640ab19bd3e2b3cd8ab50c6a86ef5b2e6bf474f /block | |
parent | 609be1066731fea86436f5f91022f82e592ab456 (diff) |
block: pass a block_device and opf to bio_alloc_kiocb
Pass the block_device and operation that we plan to use this bio for to
bio_alloc_kiocb to optimize the assigment.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 12 | ||||
-rw-r--r-- | block/fops.c | 17 |
2 files changed, 16 insertions, 13 deletions
diff --git a/block/bio.c b/block/bio.c index 9afc0c2aca6e..6c3efb0fd12b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1731,7 +1731,9 @@ EXPORT_SYMBOL(bioset_init_from_src); /** * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb * @kiocb: kiocb describing the IO + * @bdev: block device to allocate the bio for (can be %NULL) * @nr_vecs: number of iovecs to pre-allocate + * @opf: operation and flags for bio * @bs: bio_set to allocate from * * Description: @@ -1742,14 +1744,14 @@ EXPORT_SYMBOL(bioset_init_from_src); * MUST be done from process context, not hard/soft IRQ. * */ -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, - struct bio_set *bs) +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, + unsigned short nr_vecs, unsigned int opf, struct bio_set *bs) { struct bio_alloc_cache *cache; struct bio *bio; if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) - return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); + return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); cache = per_cpu_ptr(bs->cache, get_cpu()); if (cache->free_list) { @@ -1758,12 +1760,14 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, cache->nr--; put_cpu(); bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); + bio_set_dev(bio, bdev); + bio->bi_opf = opf; bio->bi_pool = bs; bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; } put_cpu(); - bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); + bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; } diff --git a/block/fops.c b/block/fops.c index 26bf15c770d2..3a62b8b91275 100644 --- a/block/fops.c +++ b/block/fops.c @@ -190,6 +190,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blkdev_dio *dio; struct bio *bio; bool is_read = (iov_iter_rw(iter) == READ), is_sync; + unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); loff_t pos = iocb->ki_pos; int ret = 0; @@ -197,7 +198,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); atomic_set(&dio->ref, 1); @@ -223,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, blk_start_plug(&plug); for (;;) { - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = iocb->ki_hint; bio->bi_private = dio; @@ -238,11 +238,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, } if (is_read) { - bio->bi_opf = REQ_OP_READ; if (dio->flags & DIO_SHOULD_DIRTY) bio_set_pages_dirty(bio); } else { - bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } if (iocb->ki_flags & IOCB_NOWAIT) @@ -259,6 +257,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, atomic_inc(&dio->ref); submit_bio(bio); bio = bio_alloc(GFP_KERNEL, nr_pages); + bio_set_dev(bio, bdev); + bio->bi_opf = opf; } blk_finish_plug(&plug); @@ -311,6 +311,8 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, unsigned int nr_pages) { struct block_device *bdev = iocb->ki_filp->private_data; + bool is_read = iov_iter_rw(iter) == READ; + unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); struct blkdev_dio *dio; struct bio *bio; loff_t pos = iocb->ki_pos; @@ -320,11 +322,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); dio->flags = 0; dio->iocb = iocb; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = iocb->ki_hint; bio->bi_end_io = blkdev_bio_end_io_async; @@ -347,14 +348,12 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, } dio->size = bio->bi_iter.bi_size; - if (iov_iter_rw(iter) == READ) { - bio->bi_opf = REQ_OP_READ; + if (is_read) { if (iter_is_iovec(iter)) { dio->flags |= DIO_SHOULD_DIRTY; bio_set_pages_dirty(bio); } } else { - bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } |