From 1652b0bafeaa8281ca9a805d81e13d7647bd2f44 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 31 May 2024 09:48:08 +0200 Subject: block: remove unused queue limits API Remove all APIs that are unused now that sd and sr have been converted to the atomic queue limits API. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: John Garry Reviewed-by: Nitesh Shetty Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240531074837.1648501-14-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 24c36929920b..ad995e7a7698 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -332,8 +332,6 @@ struct queue_limits { typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); -void disk_set_zoned(struct gendisk *disk); - #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); @@ -638,18 +636,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) return sector >> ilog2(disk->queue->limits.chunk_sectors); } -static inline void disk_set_max_open_zones(struct gendisk *disk, - unsigned int max_open_zones) -{ - disk->queue->limits.max_open_zones = max_open_zones; -} - -static inline void disk_set_max_active_zones(struct gendisk *disk, - unsigned int max_active_zones) -{ - disk->queue->limits.max_active_zones = max_active_zones; -} - static inline unsigned int bdev_max_open_zones(struct block_device *bdev) { return bdev->bd_disk->queue->limits.max_open_zones; @@ -929,24 +915,14 @@ static inline void queue_limits_cancel_update(struct request_queue *q) /* * Access functions for manipulating queue properties */ -extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); void blk_queue_max_secure_erase_sectors(struct request_queue *q, unsigned int max_sectors); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); -extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); -extern void blk_queue_max_zone_append_sectors(struct request_queue *q, - unsigned int max_zone_append_sectors); -extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); -void blk_queue_zone_write_granularity(struct request_queue *q, - unsigned int size); -extern void blk_queue_alignment_offset(struct request_queue *q, - unsigned int alignment); void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); -extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_stacking_limits(struct queue_limits *lim); -- cgit v1.2.3-58-ga151 From 73e3715ed14844067c5c598e72777641004a7f60 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 31 May 2024 09:48:09 +0200 Subject: block: add special APIs for run-time disabling of discard and friends A few drivers optimistically try to support discard, write zeroes and secure erase and disable the features from the I/O completion handler if the hardware can't support them. This disable can't be done using the atomic queue limits API because the I/O completion handlers can't take sleeping locks or freeze the queue. Keep the existing clearing of the relevant field to zero, but replace the old blk_queue_max_* APIs with new disable APIs that force the value to 0. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Damien Le Moal Reviewed-by: John Garry Reviewed-by: Nitesh Shetty Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240531074837.1648501-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 4 ++-- block/blk-settings.c | 41 ----------------------------------------- drivers/block/xen-blkfront.c | 4 ++-- drivers/scsi/sd.c | 4 ++-- include/linux/blkdev.h | 28 ++++++++++++++++++++++------ 5 files changed, 28 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 093c87879d08..cdcb75a68989 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -451,9 +451,9 @@ static void ubd_end_request(struct io_thread_req *io_req) { if (io_req->error == BLK_STS_NOTSUPP) { if (req_op(io_req->req) == REQ_OP_DISCARD) - blk_queue_max_discard_sectors(io_req->req->q, 0); + blk_queue_disable_discard(io_req->req->q); else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) - blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); + blk_queue_disable_write_zeroes(io_req->req->q); } blk_mq_end_request(io_req->req, io_req->error); kfree(io_req); diff --git a/block/blk-settings.c b/block/blk-settings.c index 0b038729608f..996f247fc98e 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -293,47 +293,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim) } EXPORT_SYMBOL_GPL(queue_limits_set); -/** - * blk_queue_max_discard_sectors - set max sectors for a single discard - * @q: the request queue for the device - * @max_discard_sectors: maximum number of sectors to discard - **/ -void blk_queue_max_discard_sectors(struct request_queue *q, - unsigned int max_discard_sectors) -{ - struct queue_limits *lim = &q->limits; - - lim->max_hw_discard_sectors = max_discard_sectors; - lim->max_discard_sectors = - min(max_discard_sectors, lim->max_user_discard_sectors); -} -EXPORT_SYMBOL(blk_queue_max_discard_sectors); - -/** - * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase - * @q: the request queue for the device - * @max_sectors: maximum number of sectors to secure_erase - **/ -void blk_queue_max_secure_erase_sectors(struct request_queue *q, - unsigned int max_sectors) -{ - q->limits.max_secure_erase_sectors = max_sectors; -} -EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors); - -/** - * blk_queue_max_write_zeroes_sectors - set max sectors for a single - * write zeroes - * @q: the request queue for the device - * @max_write_zeroes_sectors: maximum number of sectors to write per command - **/ -void blk_queue_max_write_zeroes_sectors(struct request_queue *q, - unsigned int max_write_zeroes_sectors) -{ - q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; -} -EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); - void disk_update_readahead(struct gendisk *disk) { blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fd7c0ff2139c..9b4ec3e4908c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1605,8 +1605,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - blk_queue_max_discard_sectors(rq, 0); - blk_queue_max_secure_erase_sectors(rq, 0); + blk_queue_disable_discard(rq); + blk_queue_disable_secure_erase(rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 049071b56819..d957e29b17a9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -837,7 +837,7 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, static void sd_disable_discard(struct scsi_disk *sdkp) { sdkp->provisioning_mode = SD_LBP_DISABLE; - blk_queue_max_discard_sectors(sdkp->disk->queue, 0); + blk_queue_disable_discard(sdkp->disk->queue); } static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, @@ -1019,7 +1019,7 @@ static void sd_disable_write_same(struct scsi_disk *sdkp) { sdkp->device->no_write_same = 1; sdkp->max_ws_blocks = 0; - blk_queue_max_write_zeroes_sectors(sdkp->disk->queue, 0); + blk_queue_disable_write_zeroes(sdkp->disk->queue); } static void sd_config_write_same(struct scsi_disk *sdkp, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ad995e7a7698..ac8e0cb2353a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -912,15 +912,31 @@ static inline void queue_limits_cancel_update(struct request_queue *q) mutex_unlock(&q->limits_lock); } +/* + * These helpers are for drivers that have sloppy feature negotiation and might + * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O + * completion handler when the device returned an indicator that the respective + * feature is not actually supported. They are racy and the driver needs to + * cope with that. Try to avoid this scheme if you can. + */ +static inline void blk_queue_disable_discard(struct request_queue *q) +{ + q->limits.max_discard_sectors = 0; +} + +static inline void blk_queue_disable_secure_erase(struct request_queue *q) +{ + q->limits.max_secure_erase_sectors = 0; +} + +static inline void blk_queue_disable_write_zeroes(struct request_queue *q) +{ + q->limits.max_write_zeroes_sectors = 0; +} + /* * Access functions for manipulating queue properties */ -void blk_queue_max_secure_erase_sectors(struct request_queue *q, - unsigned int max_sectors); -extern void blk_queue_max_discard_sectors(struct request_queue *q, - unsigned int max_discard_sectors); -extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, - unsigned int max_write_same_sectors); void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); -- cgit v1.2.3-58-ga151 From e9f5f44ad3725335d9c559c3c22cd3726152a7b1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Jun 2024 10:48:15 +0200 Subject: block: remove the blk_integrity_profile structure Block layer integrity configuration is a bit complex right now, as it indirects through operation vectors for a simple two-dimensional configuration: a) the checksum type of none, ip checksum, crc, crc64 b) the presence or absence of a reference tag Remove the integrity profile, and instead add a separate csum_type flag which replaces the existing ip-checksum field and a new flag that indicates the presence of the reference tag. This removes up to two layers of indirect calls, remove the need to offload the no-op verification of non-PI metadata to a workqueue and generally simplifies the code. The downside is that block/t10-pi.c now has to be built into the kernel when CONFIG_BLK_DEV_INTEGRITY is supported. Given that both nvme and SCSI require t10-pi.ko, it is loaded for all usual configurations that enabled CONFIG_BLK_DEV_INTEGRITY already, though. Signed-off-by: Christoph Hellwig Reviewed-by: Kanchan Joshi Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240613084839.1044015-6-hch@lst.de Signed-off-by: Jens Axboe --- block/Kconfig | 8 +- block/Makefile | 3 +- block/bio-integrity.c | 32 ++--- block/blk-integrity.c | 66 +++++----- block/blk-mq.c | 13 +- block/blk.h | 8 ++ block/t10-pi.c | 241 +++++++++++++----------------------- drivers/md/dm-crypt.c | 2 +- drivers/nvme/host/Kconfig | 1 - drivers/nvme/host/core.c | 17 ++- drivers/nvme/target/Kconfig | 1 - drivers/nvme/target/io-cmd-bdev.c | 16 +-- drivers/scsi/Kconfig | 1 - drivers/scsi/sd_dif.c | 19 ++- drivers/target/target_core_iblock.c | 49 ++++---- include/linux/blk-integrity.h | 35 ++---- include/linux/blkdev.h | 9 +- include/linux/t10-pi.h | 8 -- 18 files changed, 215 insertions(+), 314 deletions(-) (limited to 'include') diff --git a/block/Kconfig b/block/Kconfig index dc12af58dbae..5b623b876d3b 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -62,6 +62,8 @@ config BLK_DEV_BSGLIB config BLK_DEV_INTEGRITY bool "Block layer data integrity support" + select CRC_T10DIF + select CRC64_ROCKSOFT help Some storage devices allow extra information to be stored/retrieved to help protect the data. The block layer @@ -72,12 +74,6 @@ config BLK_DEV_INTEGRITY T10/SCSI Data Integrity Field or the T13/ATA External Path Protection. If in doubt, say N. -config BLK_DEV_INTEGRITY_T10 - tristate - depends on BLK_DEV_INTEGRITY - select CRC_T10DIF - select CRC64_ROCKSOFT - config BLK_DEV_WRITE_MOUNTED bool "Allow writing to mounted block devices" default y diff --git a/block/Makefile b/block/Makefile index 168150b9c510..ddfd21c1a9ff 100644 --- a/block/Makefile +++ b/block/Makefile @@ -26,8 +26,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o -obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o -obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o +obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o diff --git a/block/bio-integrity.c b/block/bio-integrity.c index af7f71d16114..31dbc2853f92 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -378,10 +378,9 @@ EXPORT_SYMBOL_GPL(bio_integrity_map_user); * bio_integrity_process - Process integrity metadata for a bio * @bio: bio to generate/verify integrity metadata for * @proc_iter: iterator to process - * @proc_fn: Pointer to the relevant processing function */ static blk_status_t bio_integrity_process(struct bio *bio, - struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) + struct bvec_iter *proc_iter) { struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); struct blk_integrity_iter iter; @@ -392,17 +391,18 @@ static blk_status_t bio_integrity_process(struct bio *bio, iter.disk_name = bio->bi_bdev->bd_disk->disk_name; iter.interval = 1 << bi->interval_exp; - iter.tuple_size = bi->tuple_size; iter.seed = proc_iter->bi_sector; iter.prot_buf = bvec_virt(bip->bip_vec); - iter.pi_offset = bi->pi_offset; __bio_for_each_segment(bv, bio, bviter, *proc_iter) { void *kaddr = bvec_kmap_local(&bv); iter.data_buf = kaddr; iter.data_size = bv.bv_len; - ret = proc_fn(&iter); + if (bio_data_dir(bio) == WRITE) + blk_integrity_generate(&iter, bi); + else + ret = blk_integrity_verify(&iter, bi); kunmap_local(kaddr); if (ret) @@ -448,12 +448,10 @@ bool bio_integrity_prep(struct bio *bio) return true; if (bio_data_dir(bio) == READ) { - if (!bi->profile->verify_fn || - !(bi->flags & BLK_INTEGRITY_VERIFY)) + if (!(bi->flags & BLK_INTEGRITY_VERIFY)) return true; } else { - if (!bi->profile->generate_fn || - !(bi->flags & BLK_INTEGRITY_GENERATE)) + if (!(bi->flags & BLK_INTEGRITY_GENERATE)) return true; /* @@ -488,7 +486,7 @@ bool bio_integrity_prep(struct bio *bio) bip->bip_flags |= BIP_BLOCK_INTEGRITY; bip_set_seed(bip, bio->bi_iter.bi_sector); - if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM) + if (bi->csum_type == BLK_INTEGRITY_CSUM_IP) bip->bip_flags |= BIP_IP_CHECKSUM; /* Map it */ @@ -511,12 +509,10 @@ bool bio_integrity_prep(struct bio *bio) } /* Auto-generate integrity metadata if this is a write */ - if (bio_data_dir(bio) == WRITE) { - bio_integrity_process(bio, &bio->bi_iter, - bi->profile->generate_fn); - } else { + if (bio_data_dir(bio) == WRITE) + bio_integrity_process(bio, &bio->bi_iter); + else bip->bio_iter = bio->bi_iter; - } return true; err_end_io: @@ -539,15 +535,13 @@ static void bio_integrity_verify_fn(struct work_struct *work) struct bio_integrity_payload *bip = container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); /* * At the moment verify is called bio's iterator was advanced * during split and completion, we need to rewind iterator to * it's original position. */ - bio->bi_status = bio_integrity_process(bio, &bip->bio_iter, - bi->profile->verify_fn); + bio->bi_status = bio_integrity_process(bio, &bip->bio_iter); bio_integrity_free(bio); bio_endio(bio); } @@ -569,7 +563,7 @@ bool __bio_integrity_endio(struct bio *bio) struct bio_integrity_payload *bip = bio_integrity(bio); if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && - (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) { + (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->csum_type) { INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bip->bip_work); return false; diff --git a/block/blk-integrity.c b/block/blk-integrity.c index ccbeb6dfa87a..17d37badfbb8 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -123,10 +123,10 @@ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) struct blk_integrity *b1 = &gd1->queue->integrity; struct blk_integrity *b2 = &gd2->queue->integrity; - if (!b1->profile && !b2->profile) + if (!b1->tuple_size && !b2->tuple_size) return 0; - if (!b1->profile || !b2->profile) + if (!b1->tuple_size || !b2->tuple_size) return -1; if (b1->interval_exp != b2->interval_exp) { @@ -150,10 +150,13 @@ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) return -1; } - if (b1->profile != b2->profile) { + if (b1->csum_type != b2->csum_type || + (b1->flags & BLK_INTEGRITY_REF_TAG) != + (b2->flags & BLK_INTEGRITY_REF_TAG)) { pr_err("%s: %s/%s type %s != %s\n", __func__, gd1->disk_name, gd2->disk_name, - b1->profile->name, b2->profile->name); + blk_integrity_profile_name(b1), + blk_integrity_profile_name(b2)); return -1; } @@ -217,14 +220,37 @@ static inline struct blk_integrity *dev_to_bi(struct device *dev) return &dev_to_disk(dev)->queue->integrity; } +const char *blk_integrity_profile_name(struct blk_integrity *bi) +{ + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_IP: + if (bi->flags & BLK_INTEGRITY_REF_TAG) + return "T10-DIF-TYPE1-IP"; + return "T10-DIF-TYPE3-IP"; + case BLK_INTEGRITY_CSUM_CRC: + if (bi->flags & BLK_INTEGRITY_REF_TAG) + return "T10-DIF-TYPE1-CRC"; + return "T10-DIF-TYPE3-CRC"; + case BLK_INTEGRITY_CSUM_CRC64: + if (bi->flags & BLK_INTEGRITY_REF_TAG) + return "EXT-DIF-TYPE1-CRC64"; + return "EXT-DIF-TYPE3-CRC64"; + case BLK_INTEGRITY_CSUM_NONE: + break; + } + + return "nop"; +} +EXPORT_SYMBOL_GPL(blk_integrity_profile_name); + static ssize_t format_show(struct device *dev, struct device_attribute *attr, char *page) { struct blk_integrity *bi = dev_to_bi(dev); - if (bi->profile && bi->profile->name) - return sysfs_emit(page, "%s\n", bi->profile->name); - return sysfs_emit(page, "none\n"); + if (!bi->tuple_size) + return sysfs_emit(page, "none\n"); + return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi)); } static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr, @@ -326,28 +352,6 @@ const struct attribute_group blk_integrity_attr_group = { .attrs = integrity_attrs, }; -static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter) -{ - return BLK_STS_OK; -} - -static void blk_integrity_nop_prepare(struct request *rq) -{ -} - -static void blk_integrity_nop_complete(struct request *rq, - unsigned int nr_bytes) -{ -} - -static const struct blk_integrity_profile nop_profile = { - .name = "nop", - .generate_fn = blk_integrity_nop_fn, - .verify_fn = blk_integrity_nop_fn, - .prepare_fn = blk_integrity_nop_prepare, - .complete_fn = blk_integrity_nop_complete, -}; - /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware @@ -363,11 +367,11 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template { struct blk_integrity *bi = &disk->queue->integrity; + bi->csum_type = template->csum_type; bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | template->flags; bi->interval_exp = template->interval_exp ? : ilog2(queue_logical_block_size(disk->queue)); - bi->profile = template->profile ? template->profile : &nop_profile; bi->tuple_size = template->tuple_size; bi->tag_size = template->tag_size; bi->pi_offset = template->pi_offset; @@ -394,7 +398,7 @@ void blk_integrity_unregister(struct gendisk *disk) { struct blk_integrity *bi = &disk->queue->integrity; - if (!bi->profile) + if (!bi->tuple_size) return; /* ensure all bios are off the integrity workqueue */ diff --git a/block/blk-mq.c b/block/blk-mq.c index 3b4df8e5ac9e..0d4cd39c3d25 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -804,10 +804,8 @@ static void blk_complete_request(struct request *req) if (!bio) return; -#ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) - req->q->integrity.profile->complete_fn(req, total_bytes); -#endif + blk_integrity_complete(req, total_bytes); /* * Upper layers may call blk_crypto_evict_key() anytime after the last @@ -875,11 +873,9 @@ bool blk_update_request(struct request *req, blk_status_t error, if (!req->bio) return false; -#ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && error == BLK_STS_OK) - req->q->integrity.profile->complete_fn(req, nr_bytes); -#endif + blk_integrity_complete(req, nr_bytes); /* * Upper layers may call blk_crypto_evict_key() anytime after the last @@ -1264,10 +1260,9 @@ void blk_mq_start_request(struct request *rq) WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); rq->mq_hctx->tags->rqs[rq->tag] = rq; -#ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) - q->integrity.profile->prepare_fn(rq); -#endif + blk_integrity_prepare(rq); + if (rq->bio && rq->bio->bi_opf & REQ_POLLED) WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); } diff --git a/block/blk.h b/block/blk.h index 189bc25beb50..79e8d5d4fe0c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -9,6 +9,7 @@ #include #include "blk-crypto-internal.h" +struct blk_integrity_iter; struct elevator_type; /* Max future timer expiry for timeouts */ @@ -673,4 +674,11 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops, struct file *bdev_file); int bdev_permission(dev_t dev, blk_mode_t mode, void *holder); +void blk_integrity_generate(struct blk_integrity_iter *iter, + struct blk_integrity *bi); +blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, + struct blk_integrity *bi); +void blk_integrity_prepare(struct request *rq); +void blk_integrity_complete(struct request *rq, unsigned int nr_bytes); + #endif /* BLK_INTERNAL_H */ diff --git a/block/t10-pi.c b/block/t10-pi.c index f4cc91156da1..dadecf621497 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -11,17 +11,14 @@ #include #include #include +#include "blk.h" -typedef __be16 (csum_fn) (__be16, void *, unsigned int); - -static __be16 t10_pi_crc_fn(__be16 crc, void *data, unsigned int len) -{ - return cpu_to_be16(crc_t10dif_update(be16_to_cpu(crc), data, len)); -} - -static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len) +static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len, + unsigned char csum_type) { - return (__force __be16)ip_compute_csum(data, len); + if (csum_type == BLK_INTEGRITY_CSUM_IP) + return (__force __be16)ip_compute_csum(data, len); + return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len)); } /* @@ -29,48 +26,44 @@ static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len) * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref * tag. */ -static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, - csum_fn *fn, enum t10_dif_type type) +static void t10_pi_generate(struct blk_integrity_iter *iter, + struct blk_integrity *bi) { - u8 offset = iter->pi_offset; + u8 offset = bi->pi_offset; unsigned int i; for (i = 0 ; i < iter->data_size ; i += iter->interval) { struct t10_pi_tuple *pi = iter->prot_buf + offset; - pi->guard_tag = fn(0, iter->data_buf, iter->interval); + pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval, + bi->csum_type); if (offset) - pi->guard_tag = fn(pi->guard_tag, iter->prot_buf, - offset); + pi->guard_tag = t10_pi_csum(pi->guard_tag, + iter->prot_buf, offset, bi->csum_type); pi->app_tag = 0; - if (type == T10_PI_TYPE1_PROTECTION) + if (bi->flags & BLK_INTEGRITY_REF_TAG) pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); else pi->ref_tag = 0; iter->data_buf += iter->interval; - iter->prot_buf += iter->tuple_size; + iter->prot_buf += bi->tuple_size; iter->seed++; } - - return BLK_STS_OK; } static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, - csum_fn *fn, enum t10_dif_type type) + struct blk_integrity *bi) { - u8 offset = iter->pi_offset; + u8 offset = bi->pi_offset; unsigned int i; - BUG_ON(type == T10_PI_TYPE0_PROTECTION); - for (i = 0 ; i < iter->data_size ; i += iter->interval) { struct t10_pi_tuple *pi = iter->prot_buf + offset; __be16 csum; - if (type == T10_PI_TYPE1_PROTECTION || - type == T10_PI_TYPE2_PROTECTION) { + if (bi->flags & BLK_INTEGRITY_REF_TAG) { if (pi->app_tag == T10_PI_APP_ESCAPE) goto next; @@ -82,15 +75,17 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, iter->seed, be32_to_cpu(pi->ref_tag)); return BLK_STS_PROTECTION; } - } else if (type == T10_PI_TYPE3_PROTECTION) { + } else { if (pi->app_tag == T10_PI_APP_ESCAPE && pi->ref_tag == T10_PI_REF_ESCAPE) goto next; } - csum = fn(0, iter->data_buf, iter->interval); + csum = t10_pi_csum(0, iter->data_buf, iter->interval, + bi->csum_type); if (offset) - csum = fn(csum, iter->prot_buf, offset); + csum = t10_pi_csum(csum, iter->prot_buf, offset, + bi->csum_type); if (pi->guard_tag != csum) { pr_err("%s: guard tag error at sector %llu " \ @@ -102,33 +97,13 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, next: iter->data_buf += iter->interval; - iter->prot_buf += iter->tuple_size; + iter->prot_buf += bi->tuple_size; iter->seed++; } return BLK_STS_OK; } -static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) -{ - return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); -} - -static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) -{ - return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); -} - -static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) -{ - return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); -} - -static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) -{ - return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); -} - /** * t10_pi_type1_prepare - prepare PI prior submitting request to device * @rq: request with PI that should be prepared @@ -225,81 +200,15 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) } } -static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) -{ - return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); -} - -static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) -{ - return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); -} - -static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) -{ - return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); -} - -static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) -{ - return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); -} - -/* Type 3 does not have a reference tag so no remapping is required. */ -static void t10_pi_type3_prepare(struct request *rq) -{ -} - -/* Type 3 does not have a reference tag so no remapping is required. */ -static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) -{ -} - -const struct blk_integrity_profile t10_pi_type1_crc = { - .name = "T10-DIF-TYPE1-CRC", - .generate_fn = t10_pi_type1_generate_crc, - .verify_fn = t10_pi_type1_verify_crc, - .prepare_fn = t10_pi_type1_prepare, - .complete_fn = t10_pi_type1_complete, -}; -EXPORT_SYMBOL(t10_pi_type1_crc); - -const struct blk_integrity_profile t10_pi_type1_ip = { - .name = "T10-DIF-TYPE1-IP", - .generate_fn = t10_pi_type1_generate_ip, - .verify_fn = t10_pi_type1_verify_ip, - .prepare_fn = t10_pi_type1_prepare, - .complete_fn = t10_pi_type1_complete, -}; -EXPORT_SYMBOL(t10_pi_type1_ip); - -const struct blk_integrity_profile t10_pi_type3_crc = { - .name = "T10-DIF-TYPE3-CRC", - .generate_fn = t10_pi_type3_generate_crc, - .verify_fn = t10_pi_type3_verify_crc, - .prepare_fn = t10_pi_type3_prepare, - .complete_fn = t10_pi_type3_complete, -}; -EXPORT_SYMBOL(t10_pi_type3_crc); - -const struct blk_integrity_profile t10_pi_type3_ip = { - .name = "T10-DIF-TYPE3-IP", - .generate_fn = t10_pi_type3_generate_ip, - .verify_fn = t10_pi_type3_verify_ip, - .prepare_fn = t10_pi_type3_prepare, - .complete_fn = t10_pi_type3_complete, -}; -EXPORT_SYMBOL(t10_pi_type3_ip); - static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len) { return cpu_to_be64(crc64_rocksoft_update(crc, data, len)); } -static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter, - enum t10_dif_type type) +static void ext_pi_crc64_generate(struct blk_integrity_iter *iter, + struct blk_integrity *bi) { - u8 offset = iter->pi_offset; + u8 offset = bi->pi_offset; unsigned int i; for (i = 0 ; i < iter->data_size ; i += iter->interval) { @@ -311,17 +220,15 @@ static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter, iter->prot_buf, offset); pi->app_tag = 0; - if (type == T10_PI_TYPE1_PROTECTION) + if (bi->flags & BLK_INTEGRITY_REF_TAG) put_unaligned_be48(iter->seed, pi->ref_tag); else put_unaligned_be48(0ULL, pi->ref_tag); iter->data_buf += iter->interval; - iter->prot_buf += iter->tuple_size; + iter->prot_buf += bi->tuple_size; iter->seed++; } - - return BLK_STS_OK; } static bool ext_pi_ref_escape(u8 *ref_tag) @@ -332,9 +239,9 @@ static bool ext_pi_ref_escape(u8 *ref_tag) } static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, - enum t10_dif_type type) + struct blk_integrity *bi) { - u8 offset = iter->pi_offset; + u8 offset = bi->pi_offset; unsigned int i; for (i = 0; i < iter->data_size; i += iter->interval) { @@ -342,7 +249,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, u64 ref, seed; __be64 csum; - if (type == T10_PI_TYPE1_PROTECTION) { + if (bi->flags & BLK_INTEGRITY_REF_TAG) { if (pi->app_tag == T10_PI_APP_ESCAPE) goto next; @@ -353,7 +260,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, iter->disk_name, seed, ref); return BLK_STS_PROTECTION; } - } else if (type == T10_PI_TYPE3_PROTECTION) { + } else { if (pi->app_tag == T10_PI_APP_ESCAPE && ext_pi_ref_escape(pi->ref_tag)) goto next; @@ -374,23 +281,13 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, next: iter->data_buf += iter->interval; - iter->prot_buf += iter->tuple_size; + iter->prot_buf += bi->tuple_size; iter->seed++; } return BLK_STS_OK; } -static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter) -{ - return ext_pi_crc64_verify(iter, T10_PI_TYPE1_PROTECTION); -} - -static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter) -{ - return ext_pi_crc64_generate(iter, T10_PI_TYPE1_PROTECTION); -} - static void ext_pi_type1_prepare(struct request *rq) { struct blk_integrity *bi = &rq->q->integrity; @@ -467,33 +364,61 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) } } -static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter) +void blk_integrity_generate(struct blk_integrity_iter *iter, + struct blk_integrity *bi) { - return ext_pi_crc64_verify(iter, T10_PI_TYPE3_PROTECTION); + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + ext_pi_crc64_generate(iter, bi); + break; + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + t10_pi_generate(iter, bi); + break; + default: + break; + } } -static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter) +blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, + struct blk_integrity *bi) { - return ext_pi_crc64_generate(iter, T10_PI_TYPE3_PROTECTION); + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + return ext_pi_crc64_verify(iter, bi); + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + return t10_pi_verify(iter, bi); + default: + return BLK_STS_OK; + } } -const struct blk_integrity_profile ext_pi_type1_crc64 = { - .name = "EXT-DIF-TYPE1-CRC64", - .generate_fn = ext_pi_type1_generate_crc64, - .verify_fn = ext_pi_type1_verify_crc64, - .prepare_fn = ext_pi_type1_prepare, - .complete_fn = ext_pi_type1_complete, -}; -EXPORT_SYMBOL_GPL(ext_pi_type1_crc64); - -const struct blk_integrity_profile ext_pi_type3_crc64 = { - .name = "EXT-DIF-TYPE3-CRC64", - .generate_fn = ext_pi_type3_generate_crc64, - .verify_fn = ext_pi_type3_verify_crc64, - .prepare_fn = t10_pi_type3_prepare, - .complete_fn = t10_pi_type3_complete, -}; -EXPORT_SYMBOL_GPL(ext_pi_type3_crc64); +void blk_integrity_prepare(struct request *rq) +{ + struct blk_integrity *bi = &rq->q->integrity; + + if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) + ext_pi_type1_prepare(rq); + else + t10_pi_type1_prepare(rq); +} + +void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) +{ + struct blk_integrity *bi = &rq->q->integrity; + + if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) + ext_pi_type1_complete(rq, nr_bytes); + else + t10_pi_type1_complete(rq, nr_bytes); +} MODULE_DESCRIPTION("T10 Protection Information module"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1dfc462f29cd..6c013ceb0e5f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1177,7 +1177,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) struct mapped_device *md = dm_table_get_md(ti->table); /* We require an underlying device with non-PI metadata */ - if (!bi || strcmp(bi->profile->name, "nop")) { + if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) { ti->error = "Integrity profile not supported."; return -EINVAL; } diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b309c8be720f..a3caef75aa0a 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f5d150c62955..14bac248cde4 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1744,17 +1744,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE3: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type3_crc; + integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; integrity.tag_size = sizeof(u16) + sizeof(u32); integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type3_crc64; + integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; integrity.tag_size = sizeof(u16) + 6; integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: - integrity.profile = NULL; break; } break; @@ -1762,22 +1761,22 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE2: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type1_crc; + integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type1_crc64; + integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; default: - integrity.profile = NULL; break; } break; default: - integrity.profile = NULL; break; } diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 872dd1a0acd8..c42aec41cc7b 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -6,7 +6,6 @@ config NVME_TARGET depends on CONFIGFS_FS select NVME_KEYRING if NVME_TARGET_TCP_TLS select KEYS if NVME_TARGET_TCP_TLS - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY select SGL_ALLOC help This enabled target side support for the NVMe protocol, that is diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 6426aac2634a..b628bc5ee998 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -61,15 +61,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) { struct blk_integrity *bi = bdev_get_integrity(ns->bdev); - if (bi) { + if (!bi) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) { ns->metadata_size = bi->tuple_size; - if (bi->profile == &t10_pi_type1_crc) + if (bi->flags & BLK_INTEGRITY_REF_TAG) ns->pi_type = NVME_NS_DPS_PI_TYPE1; - else if (bi->profile == &t10_pi_type3_crc) - ns->pi_type = NVME_NS_DPS_PI_TYPE3; else - /* Unsupported metadata type */ - ns->metadata_size = 0; + ns->pi_type = NVME_NS_DPS_PI_TYPE3; + } else { + ns->metadata_size = 0; } } @@ -102,7 +104,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) ns->pi_type = 0; ns->metadata_size = 0; - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) nvmet_bdev_ns_enable_integrity(ns); if (bdev_is_zoned(ns->bdev)) { diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 065db86d6021..37c24ffea65c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -82,7 +82,6 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY help If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 1df847b5f747..6f0921c7db78 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -47,18 +47,13 @@ void sd_dif_config_host(struct scsi_disk *sdkp) memset(&bi, 0, sizeof(bi)); /* Enable DMA of protection information */ - if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_ip; - else - bi.profile = &t10_pi_type1_ip; + if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) + bi.csum_type = BLK_INTEGRITY_CSUM_IP; + else + bi.csum_type = BLK_INTEGRITY_CSUM_CRC; - bi.flags |= BLK_INTEGRITY_IP_CHECKSUM; - } else - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_crc; - else - bi.profile = &t10_pi_type1_crc; + if (type != T10_PI_TYPE3_PROTECTION) + bi.flags |= BLK_INTEGRITY_REF_TAG; bi.tuple_size = sizeof(struct t10_pi_tuple); @@ -76,7 +71,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp) sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIX %s, application tag size %u bytes\n", - bi.profile->name, bi.tag_size); + blk_integrity_profile_name(&bi), bi.tag_size); out: blk_integrity_register(disk, &bi); } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7f6ca8177845..a3e09adc4e76 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -148,35 +148,38 @@ static int iblock_configure_device(struct se_device *dev) dev->dev_attrib.is_nonrot = 1; bi = bdev_get_integrity(bd); - if (bi) { - struct bio_set *bs = &ib_dev->ibd_bio_set; - - if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") || - !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) { - pr_err("IBLOCK export of blk_integrity: %s not" - " supported\n", bi->profile->name); - ret = -ENOSYS; - goto out_blkdev_put; - } + if (!bi) + return 0; - if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) { - dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; - } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) { + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_IP: + pr_err("IBLOCK export of blk_integrity: %s not supported\n", + blk_integrity_profile_name(bi)); + ret = -ENOSYS; + goto out_blkdev_put; + case BLK_INTEGRITY_CSUM_CRC: + if (bi->flags & BLK_INTEGRITY_REF_TAG) dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; - } + else + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + break; + default: + break; + } - if (dev->dev_attrib.pi_prot_type) { - if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { - pr_err("Unable to allocate bioset for PI\n"); - ret = -ENOMEM; - goto out_blkdev_put; - } - pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", - &bs->bio_integrity_pool); + if (dev->dev_attrib.pi_prot_type) { + struct bio_set *bs = &ib_dev->ibd_bio_set; + + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; } - dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + &bs->bio_integrity_pool); } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; return 0; out_blkdev_put: diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 7428cb43952d..56ce1ae35580 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -10,7 +10,7 @@ enum blk_integrity_flags { BLK_INTEGRITY_VERIFY = 1 << 0, BLK_INTEGRITY_GENERATE = 1 << 1, BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, - BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, + BLK_INTEGRITY_REF_TAG = 1 << 3, }; struct blk_integrity_iter { @@ -19,22 +19,10 @@ struct blk_integrity_iter { sector_t seed; unsigned int data_size; unsigned short interval; - unsigned char tuple_size; - unsigned char pi_offset; const char *disk_name; }; -typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); -typedef void (integrity_prepare_fn) (struct request *); -typedef void (integrity_complete_fn) (struct request *, unsigned int); - -struct blk_integrity_profile { - integrity_processing_fn *generate_fn; - integrity_processing_fn *verify_fn; - integrity_prepare_fn *prepare_fn; - integrity_complete_fn *complete_fn; - const char *name; -}; +const char *blk_integrity_profile_name(struct blk_integrity *bi); #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_integrity_register(struct gendisk *, struct blk_integrity *); @@ -44,14 +32,17 @@ int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) { - struct blk_integrity *bi = &disk->queue->integrity; + return q->integrity.tuple_size; +} - if (!bi->profile) +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + if (!blk_integrity_queue_supports_integrity(disk->queue)) return NULL; - - return bi; + return &disk->queue->integrity; } static inline struct blk_integrity * @@ -60,12 +51,6 @@ bdev_get_integrity(struct block_device *bdev) return blk_get_integrity(bdev->bd_disk); } -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return q->integrity.profile; -} - static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ac8e0cb2353a..bdd33388e1ce 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -105,9 +105,16 @@ enum { struct disk_events; struct badblocks; +enum blk_integrity_checksum { + BLK_INTEGRITY_CSUM_NONE = 0, + BLK_INTEGRITY_CSUM_IP = 1, + BLK_INTEGRITY_CSUM_CRC = 2, + BLK_INTEGRITY_CSUM_CRC64 = 3, +} __packed ; + struct blk_integrity { - const struct blk_integrity_profile *profile; unsigned char flags; + enum blk_integrity_checksum csum_type; unsigned char tuple_size; unsigned char pi_offset; unsigned char interval_exp; diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 248f4ac95642..d2bafb76badf 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -48,11 +48,6 @@ static inline u32 t10_pi_ref_tag(struct request *rq) return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } -extern const struct blk_integrity_profile t10_pi_type1_crc; -extern const struct blk_integrity_profile t10_pi_type1_ip; -extern const struct blk_integrity_profile t10_pi_type3_crc; -extern const struct blk_integrity_profile t10_pi_type3_ip; - struct crc64_pi_tuple { __be64 guard_tag; __be16 app_tag; @@ -79,7 +74,4 @@ static inline u64 ext_pi_ref_tag(struct request *rq) return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); } -extern const struct blk_integrity_profile ext_pi_type1_crc64; -extern const struct blk_integrity_profile ext_pi_type3_crc64; - #endif -- cgit v1.2.3-58-ga151 From 3c3e85ddffae93eba1a257eb6939bf5dc1e93b9e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Jun 2024 10:48:20 +0200 Subject: block: bypass the STABLE_WRITES flag for protection information Currently registering a checksum-enabled (aka PI) integrity profile sets the QUEUE_FLAG_STABLE_WRITE flag, and unregistering it clears the flag. This can incorrectly clear the flag when the driver requires stable writes even without PI, e.g. in case of iSCSI or NVMe/TCP with data digest enabled. Fix this by looking at the csum_type directly in bdev_stable_writes and not setting the queue flag. Also remove the blk_queue_stable_writes helper as the only user in nvme wants to only look at the actual QUEUE_FLAG_STABLE_WRITE flag as it inherits the integrity configuration by other means. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240613084839.1044015-11-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-integrity.c | 6 ------ drivers/nvme/host/multipath.c | 3 ++- include/linux/blkdev.h | 12 ++++++++---- 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 1d2d371cd632..bec0d1df387c 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -379,9 +379,6 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template bi->tag_size = template->tag_size; bi->pi_offset = template->pi_offset; - if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); - #ifdef CONFIG_BLK_INLINE_ENCRYPTION if (disk->queue->crypto_profile) { pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); @@ -404,9 +401,6 @@ void blk_integrity_unregister(struct gendisk *disk) if (!bi->tuple_size) return; - - if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE) - blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); memset(bi, 0, sizeof(*bi)); } EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d8b6b4648eaf..12c59db02539 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -875,7 +875,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) nvme_mpath_set_live(ns); } - if (blk_queue_stable_writes(ns->queue) && ns->head->disk) + if (test_bit(QUEUE_FLAG_STABLE_WRITES, &ns->queue->queue_flags) && + ns->head->disk) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bdd33388e1ce..f9089750919c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -571,8 +571,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) -#define blk_queue_stable_writes(q) \ - test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_zone_resetall(q) \ @@ -1300,8 +1298,14 @@ static inline bool bdev_synchronous(struct block_device *bdev) static inline bool bdev_stable_writes(struct block_device *bdev) { - return test_bit(QUEUE_FLAG_STABLE_WRITES, - &bdev_get_queue(bdev)->queue_flags); + struct request_queue *q = bdev_get_queue(bdev); + +#ifdef CONFIG_BLK_DEV_INTEGRITY + /* BLK_INTEGRITY_CSUM_NONE is not available in blkdev.h */ + if (q->integrity.csum_type != 0) + return true; +#endif + return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags); } static inline bool bdev_write_cache(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From 9f4aa46f2a7401025d8561495cf8740f773310fc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Jun 2024 10:48:21 +0200 Subject: block: invert the BLK_INTEGRITY_{GENERATE,VERIFY} flags Invert the flags so that user set values will be able to persist revalidating the integrity information once we switch the integrity information to queue_limits. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240613084839.1044015-12-hch@lst.de Signed-off-by: Jens Axboe --- block/bio-integrity.c | 4 ++-- block/blk-integrity.c | 18 +++++++++--------- include/linux/blk-integrity.h | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 31dbc2853f92..173ffd4d6237 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -448,10 +448,10 @@ bool bio_integrity_prep(struct bio *bio) return true; if (bio_data_dir(bio) == READ) { - if (!(bi->flags & BLK_INTEGRITY_VERIFY)) + if (bi->flags & BLK_INTEGRITY_NOVERIFY) return true; } else { - if (!(bi->flags & BLK_INTEGRITY_GENERATE)) + if (bi->flags & BLK_INTEGRITY_NOGENERATE) return true; /* diff --git a/block/blk-integrity.c b/block/blk-integrity.c index bec0d1df387c..b37b8855eed1 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -254,10 +254,11 @@ static ssize_t flag_store(struct device *dev, struct device_attribute *attr, if (err) return err; + /* the flags are inverted vs the values in the sysfs files */ if (val) - bi->flags |= flag; - else bi->flags &= ~flag; + else + bi->flags |= flag; return count; } @@ -266,7 +267,7 @@ static ssize_t flag_show(struct device *dev, struct device_attribute *attr, { struct blk_integrity *bi = dev_to_bi(dev); - return sysfs_emit(page, "%d\n", !!(bi->flags & flag)); + return sysfs_emit(page, "%d\n", !(bi->flags & flag)); } static ssize_t format_show(struct device *dev, struct device_attribute *attr, @@ -301,26 +302,26 @@ static ssize_t read_verify_store(struct device *dev, struct device_attribute *attr, const char *page, size_t count) { - return flag_store(dev, attr, page, count, BLK_INTEGRITY_VERIFY); + return flag_store(dev, attr, page, count, BLK_INTEGRITY_NOVERIFY); } static ssize_t read_verify_show(struct device *dev, struct device_attribute *attr, char *page) { - return flag_show(dev, attr, page, BLK_INTEGRITY_VERIFY); + return flag_show(dev, attr, page, BLK_INTEGRITY_NOVERIFY); } static ssize_t write_generate_store(struct device *dev, struct device_attribute *attr, const char *page, size_t count) { - return flag_store(dev, attr, page, count, BLK_INTEGRITY_GENERATE); + return flag_store(dev, attr, page, count, BLK_INTEGRITY_NOGENERATE); } static ssize_t write_generate_show(struct device *dev, struct device_attribute *attr, char *page) { - return flag_show(dev, attr, page, BLK_INTEGRITY_GENERATE); + return flag_show(dev, attr, page, BLK_INTEGRITY_NOGENERATE); } static ssize_t device_is_integrity_capable_show(struct device *dev, @@ -371,8 +372,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template struct blk_integrity *bi = &disk->queue->integrity; bi->csum_type = template->csum_type; - bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | - template->flags; + bi->flags = template->flags; bi->interval_exp = template->interval_exp ? : ilog2(queue_logical_block_size(disk->queue)); bi->tuple_size = template->tuple_size; diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 56ce1ae35580..bafa01d4e7f9 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -7,8 +7,8 @@ struct request; enum blk_integrity_flags { - BLK_INTEGRITY_VERIFY = 1 << 0, - BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_NOVERIFY = 1 << 0, + BLK_INTEGRITY_NOGENERATE = 1 << 1, BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, BLK_INTEGRITY_REF_TAG = 1 << 3, }; -- cgit v1.2.3-58-ga151 From c6e56cf6b2e79a463af21286ba951714ed20828c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Jun 2024 10:48:22 +0200 Subject: block: move integrity information into queue_limits Move the integrity information into the queue limits so that it can be set atomically with other queue limits, and that the sysfs changes to the read_verify and write_generate flags are properly synchronized. This also allows to provide a more useful helper to stack the integrity fields, although it still is separate from the main stacking function as not all stackable devices want to inherit the integrity settings. Even with that it greatly simplifies the code in md and dm. Note that the integrity field is moved as-is into the queue limits. While there are good arguments for removing the separate blk_integrity structure, this would cause a lot of churn and might better be done at a later time if desired. However the integrity field in the queue_limits structure is now unconditional so that various ifdefs can be avoided or replaced with IS_ENABLED(). Given that tiny size of it that seems like a worthwhile trade off. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240613084839.1044015-13-hch@lst.de Signed-off-by: Jens Axboe --- Documentation/block/data-integrity.rst | 49 +--------- block/blk-integrity.c | 124 +++---------------------- block/blk-settings.c | 118 ++++++++++++++++++++++-- block/t10-pi.c | 12 +-- drivers/md/dm-core.h | 1 - drivers/md/dm-integrity.c | 27 +++--- drivers/md/dm-table.c | 161 ++++++--------------------------- drivers/md/md.c | 72 ++++----------- drivers/md/md.h | 5 +- drivers/md/raid0.c | 7 +- drivers/md/raid1.c | 10 +- drivers/md/raid10.c | 10 +- drivers/md/raid5.c | 2 +- drivers/nvdimm/btt.c | 13 +-- drivers/nvme/host/core.c | 70 +++++++------- drivers/scsi/sd.c | 8 +- drivers/scsi/sd.h | 12 +-- drivers/scsi/sd_dif.c | 34 +++---- include/linux/blk-integrity.h | 27 +++--- include/linux/blkdev.h | 12 +-- include/linux/t10-pi.h | 12 +-- 21 files changed, 289 insertions(+), 497 deletions(-) (limited to 'include') diff --git a/Documentation/block/data-integrity.rst b/Documentation/block/data-integrity.rst index 6a760c0eb192..99905e880a0e 100644 --- a/Documentation/block/data-integrity.rst +++ b/Documentation/block/data-integrity.rst @@ -153,18 +153,11 @@ bio_free() will automatically free the bip. 4.2 Block Device ---------------- -Because the format of the protection data is tied to the physical -disk, each block device has been extended with a block integrity -profile (struct blk_integrity). This optional profile is registered -with the block layer using blk_integrity_register(). - -The profile contains callback functions for generating and verifying -the protection data, as well as getting and setting application tags. -The profile also contains a few constants to aid in completing, -merging and splitting the integrity metadata. +Block devices can set up the integrity information in the integrity +sub-struture of the queue_limits structure. Layered block devices will need to pick a profile that's appropriate -for all subdevices. blk_integrity_compare() can help with that. DM +for all subdevices. queue_limits_stack_integrity() can help with that. DM and MD linear, RAID0 and RAID1 are currently supported. RAID4/5/6 will require extra work due to the application tag. @@ -250,42 +243,6 @@ will require extra work due to the application tag. integrity upon completion. -5.4 Registering A Block Device As Capable Of Exchanging Integrity Metadata --------------------------------------------------------------------------- - - To enable integrity exchange on a block device the gendisk must be - registered as capable: - - `int blk_integrity_register(gendisk, blk_integrity);` - - The blk_integrity struct is a template and should contain the - following:: - - static struct blk_integrity my_profile = { - .name = "STANDARDSBODY-TYPE-VARIANT-CSUM", - .generate_fn = my_generate_fn, - .verify_fn = my_verify_fn, - .tuple_size = sizeof(struct my_tuple_size), - .tag_size = , - }; - - 'name' is a text string which will be visible in sysfs. This is - part of the userland API so chose it carefully and never change - it. The format is standards body-type-variant. - E.g. T10-DIF-TYPE1-IP or T13-EPP-0-CRC. - - 'generate_fn' generates appropriate integrity metadata (for WRITE). - - 'verify_fn' verifies that the data buffer matches the integrity - metadata. - - 'tuple_size' must be set to match the size of the integrity - metadata per sector. I.e. 8 for DIF and EPP. - - 'tag_size' must be set to identify how many bytes of tag space - are available per hardware sector. For DIF this is either 2 or - 0 depending on the value of the Control Mode Page ATO bit. - ---------------------------------------------------------------------- 2007-12-24 Martin K. Petersen diff --git a/block/blk-integrity.c b/block/blk-integrity.c index b37b8855eed1..05a48689a424 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -107,63 +107,6 @@ new_segment: } EXPORT_SYMBOL(blk_rq_map_integrity_sg); -/** - * blk_integrity_compare - Compare integrity profile of two disks - * @gd1: Disk to compare - * @gd2: Disk to compare - * - * Description: Meta-devices like DM and MD need to verify that all - * sub-devices use the same integrity format before advertising to - * upper layers that they can send/receive integrity metadata. This - * function can be used to check whether two gendisk devices have - * compatible integrity formats. - */ -int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) -{ - struct blk_integrity *b1 = &gd1->queue->integrity; - struct blk_integrity *b2 = &gd2->queue->integrity; - - if (!b1->tuple_size && !b2->tuple_size) - return 0; - - if (!b1->tuple_size || !b2->tuple_size) - return -1; - - if (b1->interval_exp != b2->interval_exp) { - pr_err("%s: %s/%s protection interval %u != %u\n", - __func__, gd1->disk_name, gd2->disk_name, - 1 << b1->interval_exp, 1 << b2->interval_exp); - return -1; - } - - if (b1->tuple_size != b2->tuple_size) { - pr_err("%s: %s/%s tuple sz %u != %u\n", __func__, - gd1->disk_name, gd2->disk_name, - b1->tuple_size, b2->tuple_size); - return -1; - } - - if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { - pr_err("%s: %s/%s tag sz %u != %u\n", __func__, - gd1->disk_name, gd2->disk_name, - b1->tag_size, b2->tag_size); - return -1; - } - - if (b1->csum_type != b2->csum_type || - (b1->flags & BLK_INTEGRITY_REF_TAG) != - (b2->flags & BLK_INTEGRITY_REF_TAG)) { - pr_err("%s: %s/%s type %s != %s\n", __func__, - gd1->disk_name, gd2->disk_name, - blk_integrity_profile_name(b1), - blk_integrity_profile_name(b2)); - return -1; - } - - return 0; -} -EXPORT_SYMBOL(blk_integrity_compare); - bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) { @@ -217,7 +160,7 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, static inline struct blk_integrity *dev_to_bi(struct device *dev) { - return &dev_to_disk(dev)->queue->integrity; + return &dev_to_disk(dev)->queue->limits.integrity; } const char *blk_integrity_profile_name(struct blk_integrity *bi) @@ -246,7 +189,8 @@ EXPORT_SYMBOL_GPL(blk_integrity_profile_name); static ssize_t flag_store(struct device *dev, struct device_attribute *attr, const char *page, size_t count, unsigned char flag) { - struct blk_integrity *bi = dev_to_bi(dev); + struct request_queue *q = dev_to_disk(dev)->queue; + struct queue_limits lim; unsigned long val; int err; @@ -254,11 +198,18 @@ static ssize_t flag_store(struct device *dev, struct device_attribute *attr, if (err) return err; - /* the flags are inverted vs the values in the sysfs files */ + /* note that the flags are inverted vs the values in the sysfs files */ + lim = queue_limits_start_update(q); if (val) - bi->flags &= ~flag; + lim.integrity.flags &= ~flag; else - bi->flags |= flag; + lim.integrity.flags |= flag; + + blk_mq_freeze_queue(q); + err = queue_limits_commit_update(q, &lim); + blk_mq_unfreeze_queue(q); + if (err) + return err; return count; } @@ -355,52 +306,3 @@ const struct attribute_group blk_integrity_attr_group = { .name = "integrity", .attrs = integrity_attrs, }; - -/** - * blk_integrity_register - Register a gendisk as being integrity-capable - * @disk: struct gendisk pointer to make integrity-aware - * @template: block integrity profile to register - * - * Description: When a device needs to advertise itself as being able to - * send/receive integrity metadata it must use this function to register - * the capability with the block layer. The template is a blk_integrity - * struct with values appropriate for the underlying hardware. See - * Documentation/block/data-integrity.rst. - */ -void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) -{ - struct blk_integrity *bi = &disk->queue->integrity; - - bi->csum_type = template->csum_type; - bi->flags = template->flags; - bi->interval_exp = template->interval_exp ? : - ilog2(queue_logical_block_size(disk->queue)); - bi->tuple_size = template->tuple_size; - bi->tag_size = template->tag_size; - bi->pi_offset = template->pi_offset; - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - if (disk->queue->crypto_profile) { - pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); - disk->queue->crypto_profile = NULL; - } -#endif -} -EXPORT_SYMBOL(blk_integrity_register); - -/** - * blk_integrity_unregister - Unregister block integrity profile - * @disk: disk whose integrity profile to unregister - * - * Description: This function unregisters the integrity capability from - * a block device. - */ -void blk_integrity_unregister(struct gendisk *disk) -{ - struct blk_integrity *bi = &disk->queue->integrity; - - if (!bi->tuple_size) - return; - memset(bi, 0, sizeof(*bi)); -} -EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-settings.c b/block/blk-settings.c index 996f247fc98e..f11c8676eb4c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -97,6 +97,36 @@ static int blk_validate_zoned_limits(struct queue_limits *lim) return 0; } +static int blk_validate_integrity_limits(struct queue_limits *lim) +{ + struct blk_integrity *bi = &lim->integrity; + + if (!bi->tuple_size) { + if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE || + bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) { + pr_warn("invalid PI settings.\n"); + return -EINVAL; + } + return 0; + } + + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { + pr_warn("integrity support disabled.\n"); + return -EINVAL; + } + + if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE && + (bi->flags & BLK_INTEGRITY_REF_TAG)) { + pr_warn("ref tag not support without checksum.\n"); + return -EINVAL; + } + + if (!bi->interval_exp) + bi->interval_exp = ilog2(lim->logical_block_size); + + return 0; +} + /* * Check that the limits in lim are valid, initialize defaults for unset * values, and cap values based on others where needed. @@ -105,6 +135,7 @@ static int blk_validate_limits(struct queue_limits *lim) { unsigned int max_hw_sectors; unsigned int logical_block_sectors; + int err; /* * Unless otherwise specified, default to 512 byte logical blocks and a @@ -230,6 +261,9 @@ static int blk_validate_limits(struct queue_limits *lim) lim->misaligned = 0; } + err = blk_validate_integrity_limits(lim); + if (err) + return err; return blk_validate_zoned_limits(lim); } @@ -263,13 +297,24 @@ int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim) __releases(q->limits_lock) { - int error = blk_validate_limits(lim); + int error; - if (!error) { - q->limits = *lim; - if (q->disk) - blk_apply_bdi_limits(q->disk->bdi, lim); + error = blk_validate_limits(lim); + if (error) + goto out_unlock; + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + if (q->crypto_profile && lim->integrity.tag_size) { + pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n"); + error = -EINVAL; + goto out_unlock; } +#endif + + q->limits = *lim; + if (q->disk) + blk_apply_bdi_limits(q->disk->bdi, lim); +out_unlock: mutex_unlock(&q->limits_lock); return error; } @@ -575,6 +620,67 @@ void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, } EXPORT_SYMBOL_GPL(queue_limits_stack_bdev); +/** + * queue_limits_stack_integrity - stack integrity profile + * @t: target queue limits + * @b: base queue limits + * + * Check if the integrity profile in the @b can be stacked into the + * target @t. Stacking is possible if either: + * + * a) does not have any integrity information stacked into it yet + * b) the integrity profile in @b is identical to the one in @t + * + * If @b can be stacked into @t, return %true. Else return %false and clear the + * integrity information in @t. + */ +bool queue_limits_stack_integrity(struct queue_limits *t, + struct queue_limits *b) +{ + struct blk_integrity *ti = &t->integrity; + struct blk_integrity *bi = &b->integrity; + + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + return true; + + if (!ti->tuple_size) { + /* inherit the settings from the first underlying device */ + if (!(ti->flags & BLK_INTEGRITY_STACKED)) { + ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE | + (bi->flags & BLK_INTEGRITY_REF_TAG); + ti->csum_type = bi->csum_type; + ti->tuple_size = bi->tuple_size; + ti->pi_offset = bi->pi_offset; + ti->interval_exp = bi->interval_exp; + ti->tag_size = bi->tag_size; + goto done; + } + if (!bi->tuple_size) + goto done; + } + + if (ti->tuple_size != bi->tuple_size) + goto incompatible; + if (ti->interval_exp != bi->interval_exp) + goto incompatible; + if (ti->tag_size != bi->tag_size) + goto incompatible; + if (ti->csum_type != bi->csum_type) + goto incompatible; + if ((ti->flags & BLK_INTEGRITY_REF_TAG) != + (bi->flags & BLK_INTEGRITY_REF_TAG)) + goto incompatible; + +done: + ti->flags |= BLK_INTEGRITY_STACKED; + return true; + +incompatible: + memset(ti, 0, sizeof(*ti)); + return false; +} +EXPORT_SYMBOL_GPL(queue_limits_stack_integrity); + /** * blk_queue_update_dma_pad - update pad mask * @q: the request queue for the device diff --git a/block/t10-pi.c b/block/t10-pi.c index dadecf621497..cd7fa60d63ff 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -116,7 +116,7 @@ next: */ static void t10_pi_type1_prepare(struct request *rq) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; const int tuple_sz = bi->tuple_size; u32 ref_tag = t10_pi_ref_tag(rq); u8 offset = bi->pi_offset; @@ -167,7 +167,7 @@ static void t10_pi_type1_prepare(struct request *rq) */ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; unsigned intervals = nr_bytes >> bi->interval_exp; const int tuple_sz = bi->tuple_size; u32 ref_tag = t10_pi_ref_tag(rq); @@ -290,7 +290,7 @@ next: static void ext_pi_type1_prepare(struct request *rq) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; const int tuple_sz = bi->tuple_size; u64 ref_tag = ext_pi_ref_tag(rq); u8 offset = bi->pi_offset; @@ -330,7 +330,7 @@ static void ext_pi_type1_prepare(struct request *rq) static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; unsigned intervals = nr_bytes >> bi->interval_exp; const int tuple_sz = bi->tuple_size; u64 ref_tag = ext_pi_ref_tag(rq); @@ -396,7 +396,7 @@ blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, void blk_integrity_prepare(struct request *rq) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) return; @@ -409,7 +409,7 @@ void blk_integrity_prepare(struct request *rq) void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) { - struct blk_integrity *bi = &rq->q->integrity; + struct blk_integrity *bi = &rq->q->limits.integrity; if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) return; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 08700bfc3e23..14a44c0f8286 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -206,7 +206,6 @@ struct dm_table { bool integrity_supported:1; bool singleton:1; - unsigned integrity_added:1; /* * Indicates the rw permissions for the new logical device. This diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c1cc27541673..2a89f8eb4713 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3475,6 +3475,17 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim limits->dma_alignment = limits->logical_block_size - 1; limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; } + + if (!ic->internal_hash) { + struct blk_integrity *bi = &limits->integrity; + + memset(bi, 0, sizeof(*bi)); + bi->tuple_size = ic->tag_size; + bi->tag_size = bi->tuple_size; + bi->interval_exp = + ic->sb->log2_sectors_per_block + SECTOR_SHIFT; + } + limits->max_integrity_segments = USHRT_MAX; } @@ -3631,19 +3642,6 @@ try_smaller_buffer: return 0; } -static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) -{ - struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); - struct blk_integrity bi; - - memset(&bi, 0, sizeof(bi)); - bi.tuple_size = ic->tag_size; - bi.tag_size = bi.tuple_size; - bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; - - blk_integrity_register(disk, &bi); -} - static void dm_integrity_free_page_list(struct page_list *pl) { unsigned int i; @@ -4629,9 +4627,6 @@ try_smaller_buffer: } } - if (!ic->internal_hash) - dm_integrity_set(ti, ic); - ti->num_flush_bios = 1; ti->flush_supported = true; if (ic->discard) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index b2d5246cff21..fd789eeb62d9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -425,6 +425,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); + + /* + * Only stack the integrity profile if the target doesn't have native + * integrity support. + */ + if (!dm_target_has_integrity(ti->type)) + queue_limits_stack_integrity_bdev(limits, bdev); return 0; } @@ -702,9 +709,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->immutable_target_type = ti->type; } - if (dm_target_has_integrity(ti->type)) - t->integrity_added = 1; - ti->table = t; ti->begin = start; ti->len = len; @@ -1119,99 +1123,6 @@ static int dm_table_build_index(struct dm_table *t) return r; } -static bool integrity_profile_exists(struct gendisk *disk) -{ - return !!blk_get_integrity(disk); -} - -/* - * Get a disk whose integrity profile reflects the table's profile. - * Returns NULL if integrity support was inconsistent or unavailable. - */ -static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) -{ - struct list_head *devices = dm_table_get_devices(t); - struct dm_dev_internal *dd = NULL; - struct gendisk *prev_disk = NULL, *template_disk = NULL; - - for (unsigned int i = 0; i < t->num_targets; i++) { - struct dm_target *ti = dm_table_get_target(t, i); - - if (!dm_target_passes_integrity(ti->type)) - goto no_integrity; - } - - list_for_each_entry(dd, devices, list) { - template_disk = dd->dm_dev->bdev->bd_disk; - if (!integrity_profile_exists(template_disk)) - goto no_integrity; - else if (prev_disk && - blk_integrity_compare(prev_disk, template_disk) < 0) - goto no_integrity; - prev_disk = template_disk; - } - - return template_disk; - -no_integrity: - if (prev_disk) - DMWARN("%s: integrity not set: %s and %s profile mismatch", - dm_device_name(t->md), - prev_disk->disk_name, - template_disk->disk_name); - return NULL; -} - -/* - * Register the mapped device for blk_integrity support if the - * underlying devices have an integrity profile. But all devices may - * not have matching profiles (checking all devices isn't reliable - * during table load because this table may use other DM device(s) which - * must be resumed before they will have an initialized integity - * profile). Consequently, stacked DM devices force a 2 stage integrity - * profile validation: First pass during table load, final pass during - * resume. - */ -static int dm_table_register_integrity(struct dm_table *t) -{ - struct mapped_device *md = t->md; - struct gendisk *template_disk = NULL; - - /* If target handles integrity itself do not register it here. */ - if (t->integrity_added) - return 0; - - template_disk = dm_table_get_integrity_disk(t); - if (!template_disk) - return 0; - - if (!integrity_profile_exists(dm_disk(md))) { - t->integrity_supported = true; - /* - * Register integrity profile during table load; we can do - * this because the final profile must match during resume. - */ - blk_integrity_register(dm_disk(md), - blk_get_integrity(template_disk)); - return 0; - } - - /* - * If DM device already has an initialized integrity - * profile the new profile should not conflict. - */ - if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { - DMERR("%s: conflict with existing integrity profile: %s profile mismatch", - dm_device_name(t->md), - template_disk->disk_name); - return 1; - } - - /* Preserve existing integrity profile */ - t->integrity_supported = true; - return 0; -} - #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct dm_crypto_profile { @@ -1423,12 +1334,6 @@ int dm_table_complete(struct dm_table *t) return r; } - r = dm_table_register_integrity(t); - if (r) { - DMERR("could not register integrity profile."); - return r; - } - r = dm_table_construct_crypto_profile(t); if (r) { DMERR("could not construct crypto profile."); @@ -1688,6 +1593,14 @@ int dm_calculate_queue_limits(struct dm_table *t, blk_set_stacking_limits(limits); + t->integrity_supported = true; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (!dm_target_passes_integrity(ti->type)) + t->integrity_supported = false; + } + for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1738,6 +1651,18 @@ combine_limits: dm_device_name(t->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); + + if (t->integrity_supported || + dm_target_has_integrity(ti->type)) { + if (!queue_limits_stack_integrity(limits, &ti_limits)) { + DMWARN("%s: adding target device (start sect %llu len %llu) " + "disabled integrity support due to incompatibility", + dm_device_name(t->md), + (unsigned long long) ti->begin, + (unsigned long long) ti->len); + t->integrity_supported = false; + } + } } /* @@ -1761,36 +1686,6 @@ combine_limits: return validate_hardware_logical_block_alignment(t, limits); } -/* - * Verify that all devices have an integrity profile that matches the - * DM device's registered integrity profile. If the profiles don't - * match then unregister the DM device's integrity profile. - */ -static void dm_table_verify_integrity(struct dm_table *t) -{ - struct gendisk *template_disk = NULL; - - if (t->integrity_added) - return; - - if (t->integrity_supported) { - /* - * Verify that the original integrity profile - * matches all the devices in this table. - */ - template_disk = dm_table_get_integrity_disk(t); - if (template_disk && - blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) - return; - } - - if (integrity_profile_exists(dm_disk(t->md))) { - DMWARN("%s: unable to establish an integrity profile", - dm_device_name(t->md)); - blk_integrity_unregister(dm_disk(t->md)); - } -} - static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -2004,8 +1899,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - dm_table_verify_integrity(t); - /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. diff --git a/drivers/md/md.c b/drivers/md/md.c index aff9118ff697..67ece2cd725f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2410,36 +2410,10 @@ static LIST_HEAD(pending_raid_disks); */ int md_integrity_register(struct mddev *mddev) { - struct md_rdev *rdev, *reference = NULL; - if (list_empty(&mddev->disks)) return 0; /* nothing to do */ - if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk)) - return 0; /* shouldn't register, or already is */ - rdev_for_each(rdev, mddev) { - /* skip spares and non-functional disks */ - if (test_bit(Faulty, &rdev->flags)) - continue; - if (rdev->raid_disk < 0) - continue; - if (!reference) { - /* Use the first rdev as the reference */ - reference = rdev; - continue; - } - /* does this rdev's profile match the reference profile? */ - if (blk_integrity_compare(reference->bdev->bd_disk, - rdev->bdev->bd_disk) < 0) - return -EINVAL; - } - if (!reference || !bdev_get_integrity(reference->bdev)) - return 0; - /* - * All component devices are integrity capable and have matching - * profiles, register the common profile for the md device. - */ - blk_integrity_register(mddev->gendisk, - bdev_get_integrity(reference->bdev)); + if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk)) + return 0; /* shouldn't register */ pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || @@ -2459,32 +2433,6 @@ int md_integrity_register(struct mddev *mddev) } EXPORT_SYMBOL(md_integrity_register); -/* - * Attempt to add an rdev, but only if it is consistent with the current - * integrity profile - */ -int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) -{ - struct blk_integrity *bi_mddev; - - if (mddev_is_dm(mddev)) - return 0; - - bi_mddev = blk_get_integrity(mddev->gendisk); - - if (!bi_mddev) /* nothing to do */ - return 0; - - if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { - pr_err("%s: incompatible integrity profile for %pg\n", - mdname(mddev), rdev->bdev); - return -ENXIO; - } - - return 0; -} -EXPORT_SYMBOL(md_integrity_add_rdev); - static bool rdev_read_only(struct md_rdev *rdev) { return bdev_read_only(rdev->bdev) || @@ -5755,14 +5703,20 @@ static const struct kobj_type md_ktype = { int mdp_major = 0; /* stack the limit for all rdevs into lim */ -void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim) +int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, + unsigned int flags) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) { queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); + if ((flags & MDDEV_STACK_INTEGRITY) && + !queue_limits_stack_integrity_bdev(lim, rdev->bdev)) + return -EINVAL; } + + return 0; } EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); @@ -5777,6 +5731,14 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) lim = queue_limits_start_update(mddev->gendisk->queue); queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); + + if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) { + pr_err("%s: incompatible integrity profile for %pg\n", + mdname(mddev), rdev->bdev); + queue_limits_cancel_update(mddev->gendisk->queue); + return -ENXIO; + } + return queue_limits_commit_update(mddev->gendisk->queue, &lim); } EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); diff --git a/drivers/md/md.h b/drivers/md/md.h index ca085ecad504..6733b0b0abf9 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -809,7 +809,6 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern int md_check_no_bitmap(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev); -extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int mddev_init(struct mddev *mddev); @@ -908,7 +907,9 @@ void md_autostart_arrays(int part); int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); int do_md_run(struct mddev *mddev); -void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim); +#define MDDEV_STACK_INTEGRITY (1u << 0) +int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, + unsigned int flags); int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 81c01347cd24..62634e2a33bd 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -377,13 +377,18 @@ static void raid0_free(struct mddev *mddev, void *priv) static int raid0_set_limits(struct mddev *mddev) { struct queue_limits lim; + int err; blk_set_stacking_limits(&lim); lim.max_hw_sectors = mddev->chunk_sectors; lim.max_write_zeroes_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1f321826ef02..779cad62f6f8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1907,9 +1907,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; - if (md_integrity_add_rdev(rdev, mddev)) - return -ENXIO; - if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -3197,10 +3194,15 @@ static struct r1conf *setup_conf(struct mddev *mddev) static int raid1_set_limits(struct mddev *mddev) { struct queue_limits lim; + int err; blk_set_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a4556d2e46bf..5f6885b53b69 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2083,9 +2083,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) return -EINVAL; - if (md_integrity_add_rdev(rdev, mddev)) - return -ENXIO; - if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -3980,12 +3977,17 @@ static int raid10_set_queue_limits(struct mddev *mddev) { struct r10conf *conf = mddev->private; struct queue_limits lim; + int err; blk_set_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2bd1ce9b3922..675c68fa6c64 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7708,7 +7708,7 @@ static int raid5_set_limits(struct mddev *mddev) lim.raid_partial_stripes_expensive = 1; lim.discard_granularity = stripe; lim.max_write_zeroes_sectors = 0; - mddev_stack_rdev_limits(mddev, &lim); + mddev_stack_rdev_limits(mddev, &lim, 0); rdev_for_each(rdev, mddev) queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, mddev->gendisk->disk_name); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 1e5aedaf8c7b..c5f8451b494d 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1504,6 +1504,11 @@ static int btt_blk_init(struct btt *btt) }; int rc; + if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { + lim.integrity.tuple_size = btt_meta_size(btt); + lim.integrity.tag_size = btt_meta_size(btt); + } + btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(btt->btt_disk)) return PTR_ERR(btt->btt_disk); @@ -1516,14 +1521,6 @@ static int btt_blk_init(struct btt *btt) blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); - if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { - struct blk_integrity bi = { - .tuple_size = btt_meta_size(btt), - .tag_size = btt_meta_size(btt), - }; - blk_integrity_register(btt->btt_disk, &bi); - } - set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); if (rc) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 14bac248cde4..5a673fa5cb26 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1723,11 +1723,12 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) +static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head, + struct queue_limits *lim) { - struct blk_integrity integrity = { }; + struct blk_integrity *bi = &lim->integrity; - blk_integrity_unregister(disk); + memset(bi, 0, sizeof(*bi)); if (!head->ms) return true; @@ -1744,14 +1745,14 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE3: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; - integrity.tag_size = sizeof(u16) + sizeof(u32); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16) + sizeof(u32); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; - integrity.tag_size = sizeof(u16) + 6; - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16) + 6; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: break; @@ -1761,16 +1762,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE2: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | - BLK_INTEGRITY_REF_TAG; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; case NVME_NVM_NS_64B_GUARD: - integrity.csum_type = BLK_INTEGRITY_CSUM_CRC64; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE | - BLK_INTEGRITY_REF_TAG; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; default: break; @@ -1780,9 +1781,8 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) break; } - integrity.tuple_size = head->ms; - integrity.pi_offset = head->pi_offset; - blk_integrity_register(disk, &integrity); + bi->tuple_size = head->ms; + bi->pi_offset = head->pi_offset; return true; } @@ -2105,11 +2105,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && ns->head->ids.csi == NVME_CSI_ZNS) nvme_update_zone_info(ns, &lim, &zi); - ret = queue_limits_commit_update(ns->disk->queue, &lim); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue); - goto out; - } /* * Register a metadata profile for PI, or the plain non-integrity NVMe @@ -2117,9 +2112,15 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * I/O to namespaces with metadata except when the namespace supports * PI, as it can strip/insert in that case. */ - if (!nvme_init_integrity(ns->disk, ns->head)) + if (!nvme_init_integrity(ns->disk, ns->head, &lim)) capacity = 0; + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + set_capacity_and_notify(ns->disk, capacity); /* @@ -2191,14 +2192,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) struct queue_limits lim; blk_mq_freeze_queue(ns->head->disk->queue); - if (unsupported) - ns->head->disk->flags |= GENHD_FL_HIDDEN; - else - nvme_init_integrity(ns->head->disk, ns->head); - set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - /* * queue_limits mixes values that are the hardware limitations * for bio splitting with what is the device configuration. @@ -2221,7 +2214,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) lim.io_opt = ns_lim->io_opt; queue_limits_stack_bdev(&lim, ns->disk->part0, 0, ns->head->disk->disk_name); + if (unsupported) + ns->head->disk->flags |= GENHD_FL_HIDDEN; + else + nvme_init_integrity(ns->head->disk, ns->head, &lim); ret = queue_limits_commit_update(ns->head->disk->queue, &lim); + + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + blk_mq_unfreeze_queue(ns->head->disk->queue); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d957e29b17a9..e01393ed4207 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2482,11 +2482,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer return 0; } -static void sd_config_protection(struct scsi_disk *sdkp) +static void sd_config_protection(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - sd_dif_config_host(sdkp); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + sd_dif_config_host(sdkp, lim); if (!sdkp->protection_type) return; @@ -3677,7 +3679,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp); + sd_config_protection(sdkp, &lim); } /* diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index b4170b17bad4..726f1613f6cb 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -220,17 +220,7 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec return sector >> (ilog2(sdev->sector_size) - 9); } -#ifdef CONFIG_BLK_DEV_INTEGRITY - -extern void sd_dif_config_host(struct scsi_disk *); - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline void sd_dif_config_host(struct scsi_disk *disk) -{ -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); static inline int sd_is_zoned(struct scsi_disk *sdkp) { diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 6f0921c7db78..ae6ce6f5d622 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -24,14 +24,15 @@ /* * Configure exchange of protection information between OS and HBA. */ -void sd_dif_config_host(struct scsi_disk *sdkp) +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - struct gendisk *disk = sdkp->disk; u8 type = sdkp->protection_type; - struct blk_integrity bi; + struct blk_integrity *bi = &lim->integrity; int dif, dix; + memset(bi, 0, sizeof(*bi)); + dif = scsi_host_dif_capable(sdp->host, type); dix = scsi_host_dix_capable(sdp->host, type); @@ -39,40 +40,33 @@ void sd_dif_config_host(struct scsi_disk *sdkp) dif = 0; dix = 1; } - if (!dix) { - blk_integrity_unregister(disk); + if (!dix) return; - } - - memset(&bi, 0, sizeof(bi)); /* Enable DMA of protection information */ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) - bi.csum_type = BLK_INTEGRITY_CSUM_IP; + bi->csum_type = BLK_INTEGRITY_CSUM_IP; else - bi.csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; if (type != T10_PI_TYPE3_PROTECTION) - bi.flags |= BLK_INTEGRITY_REF_TAG; + bi->flags |= BLK_INTEGRITY_REF_TAG; - bi.tuple_size = sizeof(struct t10_pi_tuple); + bi->tuple_size = sizeof(struct t10_pi_tuple); if (dif && type) { - bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; if (!sdkp->ATO) - goto out; + return; if (type == T10_PI_TYPE3_PROTECTION) - bi.tag_size = sizeof(u16) + sizeof(u32); + bi->tag_size = sizeof(u16) + sizeof(u32); else - bi.tag_size = sizeof(u16); + bi->tag_size = sizeof(u16); } sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIX %s, application tag size %u bytes\n", - blk_integrity_profile_name(&bi), bi.tag_size); -out: - blk_integrity_register(disk, &bi); + blk_integrity_profile_name(bi), bi->tag_size); } - diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index bafa01d4e7f9..d201140d77a3 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -11,6 +11,7 @@ enum blk_integrity_flags { BLK_INTEGRITY_NOGENERATE = 1 << 1, BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, BLK_INTEGRITY_REF_TAG = 1 << 3, + BLK_INTEGRITY_STACKED = 1 << 4, }; struct blk_integrity_iter { @@ -23,11 +24,15 @@ struct blk_integrity_iter { }; const char *blk_integrity_profile_name(struct blk_integrity *bi); +bool queue_limits_stack_integrity(struct queue_limits *t, + struct queue_limits *b); +static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, + struct block_device *bdev) +{ + return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits); +} #ifdef CONFIG_BLK_DEV_INTEGRITY -void blk_integrity_register(struct gendisk *, struct blk_integrity *); -void blk_integrity_unregister(struct gendisk *); -int blk_integrity_compare(struct gendisk *, struct gendisk *); int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); @@ -35,14 +40,14 @@ int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) { - return q->integrity.tuple_size; + return q->limits.integrity.tuple_size; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { if (!blk_integrity_queue_supports_integrity(disk->queue)) return NULL; - return &disk->queue->integrity; + return &disk->queue->limits.integrity; } static inline struct blk_integrity * @@ -119,17 +124,6 @@ blk_integrity_queue_supports_integrity(struct request_queue *q) { return false; } -static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) -{ - return 0; -} -static inline void blk_integrity_register(struct gendisk *d, - struct blk_integrity *b) -{ -} -static inline void blk_integrity_unregister(struct gendisk *d) -{ -} static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { @@ -157,4 +151,5 @@ static inline struct bio_vec *rq_integrity_vec(struct request *rq) return NULL; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ + #endif /* _LINUX_BLK_INTEGRITY_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f9089750919c..0c247a716885 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -334,6 +334,8 @@ struct queue_limits { * due to possible offsets. */ unsigned int dma_alignment; + + struct blk_integrity integrity; }; typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, @@ -419,10 +421,6 @@ struct request_queue { struct queue_limits limits; -#ifdef CONFIG_BLK_DEV_INTEGRITY - struct blk_integrity integrity; -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - #ifdef CONFIG_PM struct device *dev; enum rpm_status rpm_status; @@ -1300,11 +1298,9 @@ static inline bool bdev_stable_writes(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); -#ifdef CONFIG_BLK_DEV_INTEGRITY - /* BLK_INTEGRITY_CSUM_NONE is not available in blkdev.h */ - if (q->integrity.csum_type != 0) + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) return true; -#endif return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags); } diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index d2bafb76badf..1773610010eb 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -39,12 +39,8 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + unsigned int shift = rq->q->limits.integrity.interval_exp; -#ifdef CONFIG_BLK_DEV_INTEGRITY - if (rq->q->integrity.interval_exp) - shift = rq->q->integrity.interval_exp; -#endif return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } @@ -65,12 +61,8 @@ static inline u64 lower_48_bits(u64 n) static inline u64 ext_pi_ref_tag(struct request *rq) { - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + unsigned int shift = rq->q->limits.integrity.interval_exp; -#ifdef CONFIG_BLK_DEV_INTEGRITY - if (rq->q->integrity.interval_exp) - shift = rq->q->integrity.interval_exp; -#endif return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); } -- cgit v1.2.3-58-ga151 From c3042a5403ef2be622023fcc3b11fc1aa08ba7fa Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 14 Jun 2024 09:03:44 +0000 Subject: block: Drop locking annotation for limits_lock Currently compiling block/blk-settings.c with C=1 gives the following warning: block/blk-settings.c:262:9: warning: context imbalance in 'queue_limits_commit_update' - wrong count at exit request_queue.limits_lock is a mutex. Sparse locking annotation for mutexes are currently not supported - see [0] - so drop that locking annotation. [0] https://lore.kernel.org/lkml/cover.1579893447.git.jbi.octave@gmail.com/T/#mbb8bda6c0a7ca7ce19f46df976a8e3b489745488 Fixes: d690cb8ae14bd ("block: add an API to atomically update queue limits") Signed-off-by: John Garry Reviewed-by: Bart Van Assche Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240614090345.655716-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-settings.c | 1 - include/linux/blkdev.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index f574181105f8..377a86fe0fcc 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -303,7 +303,6 @@ int blk_set_default_limits(struct queue_limits *lim) */ int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim) - __releases(q->limits_lock) { int error; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0c247a716885..83af6ac5aa48 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -893,7 +893,6 @@ static inline unsigned int blk_chunk_sectors_left(sector_t offset, */ static inline struct queue_limits queue_limits_start_update(struct request_queue *q) - __acquires(q->limits_lock) { mutex_lock(&q->limits_lock); return q->limits; -- cgit v1.2.3-58-ga151 From 1122c0c1cc71f740fa4d5f14f239194e06a1d5e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:40 +0200 Subject: block: move cache control settings out of queue->flags Move the cache control settings into the queue_limits so that the flags can be set atomically with the device queue frozen. Add new features and flags field for the driver set flags, and internal (usually sysfs-controlled) flags in the block layer. Note that we'll eventually remove enough field from queue_limits to bring it back to the previous size. The disable flag is inverted compared to the previous meaning, which means it now survives a rescan, similar to the max_sectors and max_discard_sectors user limits. The FLUSH and FUA flags are now inherited by blk_stack_limits, which simplified the code in dm a lot, but also causes a slight behavior change in that dm-switch and dm-unstripe now advertise a write cache despite setting num_flush_bios to 0. The I/O path will handle this gracefully, but as far as I can tell the lack of num_flush_bios and thus flush support is a pre-existing data integrity bug in those targets that really needs fixing, after which a non-zero num_flush_bios should be required in dm for targets that map to underlying devices. Signed-off-by: Christoph Hellwig Acked-by: Ulf Hansson Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-14-hch@lst.de Signed-off-by: Jens Axboe --- Documentation/block/writeback_cache_control.rst | 67 ++++++++++++++----------- arch/um/drivers/ubd_kern.c | 2 +- block/blk-core.c | 2 +- block/blk-flush.c | 9 ++-- block/blk-mq-debugfs.c | 2 - block/blk-settings.c | 29 ++--------- block/blk-sysfs.c | 29 +++++++---- block/blk-wbt.c | 4 +- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/loop.c | 9 ++-- drivers/block/nbd.c | 14 +++--- drivers/block/null_blk/main.c | 12 +++-- drivers/block/ps3disk.c | 7 +-- drivers/block/rnbd/rnbd-clt.c | 10 ++-- drivers/block/ublk_drv.c | 8 ++- drivers/block/virtio_blk.c | 20 ++++++-- drivers/block/xen-blkfront.c | 8 ++- drivers/md/bcache/super.c | 7 +-- drivers/md/dm-table.c | 39 +++----------- drivers/md/md.c | 8 +-- drivers/mmc/core/block.c | 42 +++++++--------- drivers/mmc/core/queue.c | 12 +++-- drivers/mmc/core/queue.h | 3 +- drivers/mtd/mtd_blkdevs.c | 5 +- drivers/nvdimm/pmem.c | 4 +- drivers/nvme/host/core.c | 7 ++- drivers/nvme/host/multipath.c | 6 --- drivers/scsi/sd.c | 28 +++++++---- include/linux/blkdev.h | 38 +++++++++++--- 29 files changed, 227 insertions(+), 206 deletions(-) (limited to 'include') diff --git a/Documentation/block/writeback_cache_control.rst b/Documentation/block/writeback_cache_control.rst index b208488d0aae..c575e08beda8 100644 --- a/Documentation/block/writeback_cache_control.rst +++ b/Documentation/block/writeback_cache_control.rst @@ -46,41 +46,50 @@ worry if the underlying devices need any explicit cache flushing and how the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags may both be set on a single bio. +Feature settings for block drivers +---------------------------------- -Implementation details for bio based block drivers --------------------------------------------------------------- +For devices that do not support volatile write caches there is no driver +support required, the block layer completes empty REQ_PREFLUSH requests before +entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from +requests that have a payload. -These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit -directly below the submit_bio interface. For remapping drivers the REQ_FUA -bits need to be propagated to underlying devices, and a global flush needs -to be implemented for bios with the REQ_PREFLUSH bit set. For real device -drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits -on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without -data can be completed successfully without doing any work. Drivers for -devices with volatile caches need to implement the support for these -flags themselves without any help from the block layer. +For devices with volatile write caches the driver needs to tell the block layer +that it supports flushing caches by setting the + BLK_FEAT_WRITE_CACHE -Implementation details for request_fn based block drivers ---------------------------------------------------------- +flag in the queue_limits feature field. For devices that also support the FUA +bit the block layer needs to be told to pass on the REQ_FUA bit by also setting +the -For devices that do not support volatile write caches there is no driver -support required, the block layer completes empty REQ_PREFLUSH requests before -entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from -requests that have a payload. For devices with volatile write caches the -driver needs to tell the block layer that it supports flushing caches by -doing:: + BLK_FEAT_FUA + +flag in the features field of the queue_limits structure. + +Implementation details for bio based block drivers +-------------------------------------------------- + +For bio based drivers the REQ_PREFLUSH and REQ_FUA bit are simplify passed on +to the driver if the drivers sets the BLK_FEAT_WRITE_CACHE flag and the drivers +needs to handle them. + +*NOTE*: The REQ_FUA bit also gets passed on when the BLK_FEAT_FUA flags is +_not_ set. Any bio based driver that sets BLK_FEAT_WRITE_CACHE also needs to +handle REQ_FUA. - blk_queue_write_cache(sdkp->disk->queue, true, false); +For remapping drivers the REQ_FUA bits need to be propagated to underlying +devices, and a global flush needs to be implemented for bios with the +REQ_PREFLUSH bit set. -and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that -REQ_PREFLUSH requests with a payload are automatically turned into a sequence -of an empty REQ_OP_FLUSH request followed by the actual write by the block -layer. For devices that also support the FUA bit the block layer needs -to be told to pass through the REQ_FUA bit using:: +Implementation details for blk-mq drivers +----------------------------------------- - blk_queue_write_cache(sdkp->disk->queue, true, true); +When the BLK_FEAT_WRITE_CACHE flag is set, REQ_OP_WRITE | REQ_PREFLUSH requests +with a payload are automatically turned into a sequence of a REQ_OP_FLUSH +request followed by the actual write by the block layer. -and the driver must handle write requests that have the REQ_FUA bit set -in prep_fn/request_fn. If the FUA bit is not natively supported the block -layer turns it into an empty REQ_OP_FLUSH request after the actual write. +When the BLK_FEAT_FUA flags is set, the REQ_FUA bit simplify passed on for the +REQ_OP_WRITE request, else a REQ_OP_FLUSH request is sent by the block layer +after the completion of the write request for bio submissions with the REQ_FUA +bit set. diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index cdcb75a68989..19e01691ea0e 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -835,6 +835,7 @@ static int ubd_add(int n, char **error_out) struct queue_limits lim = { .max_segments = MAX_SG, .seg_boundary_mask = PAGE_SIZE - 1, + .features = BLK_FEAT_WRITE_CACHE, }; struct gendisk *disk; int err = 0; @@ -882,7 +883,6 @@ static int ubd_add(int n, char **error_out) } blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_write_cache(disk->queue, true, false); disk->major = UBD_MAJOR; disk->first_minor = n << UBD_SHIFT; disk->minors = 1 << UBD_SHIFT; diff --git a/block/blk-core.c b/block/blk-core.c index 82c3ae22d76d..2b45a4df9a1a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -782,7 +782,7 @@ void submit_bio_noacct(struct bio *bio) if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && bio_op(bio) != REQ_OP_ZONE_APPEND)) goto end_io; - if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { + if (!bdev_write_cache(bdev)) { bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!bio_sectors(bio)) { status = BLK_STS_OK; diff --git a/block/blk-flush.c b/block/blk-flush.c index 2234f8b3fc05..30b9d5033a2b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -381,8 +381,8 @@ static void blk_rq_init_flush(struct request *rq) bool blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; - unsigned long fflags = q->queue_flags; /* may change, cache */ struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); + bool supports_fua = q->limits.features & BLK_FEAT_FUA; unsigned int policy = 0; /* FLUSH/FUA request must never be merged */ @@ -394,11 +394,10 @@ bool blk_insert_flush(struct request *rq) /* * Check which flushes we need to sequence for this operation. */ - if (fflags & (1UL << QUEUE_FLAG_WC)) { + if (blk_queue_write_cache(q)) { if (rq->cmd_flags & REQ_PREFLUSH) policy |= REQ_FSEQ_PREFLUSH; - if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && - (rq->cmd_flags & REQ_FUA)) + if ((rq->cmd_flags & REQ_FUA) && !supports_fua) policy |= REQ_FSEQ_POSTFLUSH; } @@ -407,7 +406,7 @@ bool blk_insert_flush(struct request *rq) * REQ_PREFLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_PREFLUSH; - if (!(fflags & (1UL << QUEUE_FLAG_FUA))) + if (!supports_fua) rq->cmd_flags &= ~REQ_FUA; /* diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 770c0c2b72fa..e8b9db7c30c4 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -93,8 +93,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(INIT_DONE), QUEUE_FLAG_NAME(STABLE_WRITES), QUEUE_FLAG_NAME(POLL), - QUEUE_FLAG_NAME(WC), - QUEUE_FLAG_NAME(FUA), QUEUE_FLAG_NAME(DAX), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), diff --git a/block/blk-settings.c b/block/blk-settings.c index f11c8676eb4c..536ee202fcdc 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -261,6 +261,9 @@ static int blk_validate_limits(struct queue_limits *lim) lim->misaligned = 0; } + if (!(lim->features & BLK_FEAT_WRITE_CACHE)) + lim->features &= ~BLK_FEAT_FUA; + err = blk_validate_integrity_limits(lim); if (err) return err; @@ -454,6 +457,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, { unsigned int top, bottom, alignment, ret = 0; + t->features |= (b->features & BLK_FEAT_INHERIT_MASK); + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, b->max_user_sectors); @@ -711,30 +716,6 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth) } EXPORT_SYMBOL(blk_set_queue_depth); -/** - * blk_queue_write_cache - configure queue's write cache - * @q: the request queue for the device - * @wc: write back cache on or off - * @fua: device supports FUA writes, if true - * - * Tell the block layer about the write cache of @q. - */ -void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) -{ - if (wc) { - blk_queue_flag_set(QUEUE_FLAG_HW_WC, q); - blk_queue_flag_set(QUEUE_FLAG_WC, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q); - blk_queue_flag_clear(QUEUE_FLAG_WC, q); - } - if (fua) - blk_queue_flag_set(QUEUE_FLAG_FUA, q); - else - blk_queue_flag_clear(QUEUE_FLAG_FUA, q); -} -EXPORT_SYMBOL_GPL(blk_queue_write_cache); - int bdev_alignment_offset(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 5c787965b7d0..4f524c1d5e08 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -423,32 +423,41 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, static ssize_t queue_wc_show(struct request_queue *q, char *page) { - if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - return sprintf(page, "write back\n"); - - return sprintf(page, "write through\n"); + if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED) + return sprintf(page, "write through\n"); + return sprintf(page, "write back\n"); } static ssize_t queue_wc_store(struct request_queue *q, const char *page, size_t count) { + struct queue_limits lim; + bool disable; + int err; + if (!strncmp(page, "write back", 10)) { - if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags)) - return -EINVAL; - blk_queue_flag_set(QUEUE_FLAG_WC, q); + disable = false; } else if (!strncmp(page, "write through", 13) || - !strncmp(page, "none", 4)) { - blk_queue_flag_clear(QUEUE_FLAG_WC, q); + !strncmp(page, "none", 4)) { + disable = true; } else { return -EINVAL; } + lim = queue_limits_start_update(q); + if (disable) + lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED; + else + lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED; + err = queue_limits_commit_update(q, &lim); + if (err) + return err; return count; } static ssize_t queue_fua_show(struct request_queue *q, char *page) { - return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); + return sprintf(page, "%u\n", !!(q->limits.features & BLK_FEAT_FUA)); } static ssize_t queue_dax_show(struct request_queue *q, char *page) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 64472134dd26..1a5e4b049ecd 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -206,8 +206,8 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, */ if (wb_acct & WBT_DISCARD) limit = rwb->wb_background; - else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) && - !wb_recent_wait(rwb)) + else if (blk_queue_write_cache(rwb->rqos.disk->queue) && + !wb_recent_wait(rwb)) limit = 0; else limit = rwb->wb_normal; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 113b441d4d36..bf42a46781fa 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2697,6 +2697,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig * connect. */ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, }; device = minor_to_device(minor); @@ -2736,7 +2737,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig disk->private_data = device; blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); - blk_queue_write_cache(disk->queue, true, true); device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 8991de8fb1bb..08d0fc7f17b7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -985,6 +985,9 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize) lim.logical_block_size = bsize; lim.physical_block_size = bsize; lim.io_min = bsize; + lim.features &= ~BLK_FEAT_WRITE_CACHE; + if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY)) + lim.features |= BLK_FEAT_WRITE_CACHE; if (!backing_bdev || bdev_nonrot(backing_bdev)) blk_queue_flag_set(QUEUE_FLAG_NONROT, lo->lo_queue); else @@ -1078,9 +1081,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); - if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) - blk_queue_write_cache(lo->lo_queue, true, false); - error = loop_reconfigure_limits(lo, config->block_size); if (WARN_ON_ONCE(error)) goto out_unlock; @@ -1131,9 +1131,6 @@ static void __loop_clr_fd(struct loop_device *lo, bool release) struct file *filp; gfp_t gfp = lo->old_gfp_mask; - if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) - blk_queue_write_cache(lo->lo_queue, false, false); - /* * Freeze the request queue when unbinding on a live file descriptor and * thus an open device. When called from ->release we are guaranteed diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 44b8c671921e..cb1c86a6a3fb 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -342,12 +342,14 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize, lim.max_hw_discard_sectors = UINT_MAX; else lim.max_hw_discard_sectors = 0; - if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) - blk_queue_write_cache(nbd->disk->queue, false, false); - else if (nbd->config->flags & NBD_FLAG_SEND_FUA) - blk_queue_write_cache(nbd->disk->queue, true, true); - else - blk_queue_write_cache(nbd->disk->queue, true, false); + if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) { + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + } else { + lim.features |= BLK_FEAT_WRITE_CACHE; + lim.features &= ~BLK_FEAT_FUA; + } lim.logical_block_size = blksize; lim.physical_block_size = blksize; error = queue_limits_commit_update(nbd->disk->queue, &lim); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 75f189e42f88..21f9d256e884 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1928,6 +1928,13 @@ static int null_add_dev(struct nullb_device *dev) goto out_cleanup_tags; } + if (dev->cache_size > 0) { + set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); + lim.features |= BLK_FEAT_WRITE_CACHE; + if (dev->fua) + lim.features |= BLK_FEAT_FUA; + } + nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); if (IS_ERR(nullb->disk)) { rv = PTR_ERR(nullb->disk); @@ -1940,11 +1947,6 @@ static int null_add_dev(struct nullb_device *dev) nullb_setup_bwtimer(nullb); } - if (dev->cache_size > 0) { - set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); - blk_queue_write_cache(nullb->q, true, dev->fua); - } - nullb->q->queuedata = nullb; blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index b810ac0a5c4b..8b73cf459b59 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -388,9 +388,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) .max_segments = -1, .max_segment_size = dev->bounce_size, .dma_alignment = dev->blk_size - 1, + .features = BLK_FEAT_WRITE_CACHE, }; - - struct request_queue *queue; struct gendisk *gendisk; if (dev->blk_size < 512) { @@ -447,10 +446,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) goto fail_free_tag_set; } - queue = gendisk->queue; - - blk_queue_write_cache(queue, true, false); - priv->gendisk = gendisk; gendisk->major = ps3disk_major; gendisk->first_minor = devidx * PS3DISK_MINORS; diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index b7ffe03c6160..02c4b1731827 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1389,6 +1389,12 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, le32_to_cpu(rsp->max_discard_sectors); } + if (rsp->cache_policy & RNBD_WRITEBACK) { + lim.features |= BLK_FEAT_WRITE_CACHE; + if (rsp->cache_policy & RNBD_FUA) + lim.features |= BLK_FEAT_FUA; + } + dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); if (IS_ERR(dev->gd)) return PTR_ERR(dev->gd); @@ -1397,10 +1403,6 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); - blk_queue_write_cache(dev->queue, - !!(rsp->cache_policy & RNBD_WRITEBACK), - !!(rsp->cache_policy & RNBD_FUA)); - return rnbd_clt_setup_gen_disk(dev, rsp, idx); } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4e159948c912..e45c65c1848d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -487,8 +487,6 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub) struct request_queue *q = ub->ub_disk->queue; const struct ublk_param_basic *p = &ub->params.basic; - blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE, - p->attrs & UBLK_ATTR_FUA); if (p->attrs & UBLK_ATTR_ROTATIONAL) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); else @@ -2210,6 +2208,12 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) lim.max_zone_append_sectors = p->max_zone_append_sectors; } + if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) { + lim.features |= BLK_FEAT_WRITE_CACHE; + if (ub->params.basic.attrs & UBLK_ATTR_FUA) + lim.features |= BLK_FEAT_FUA; + } + if (wait_for_completion_interruptible(&ub->completion) != 0) return -EINTR; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 378b241911ca..b1a3c2935285 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1100,6 +1100,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; + struct queue_limits lim; int i; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); @@ -1108,7 +1109,17 @@ cache_type_store(struct device *dev, struct device_attribute *attr, return i; virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); - blk_queue_write_cache(disk->queue, virtblk_get_cache_mode(vdev), false); + + lim = queue_limits_start_update(disk->queue); + if (virtblk_get_cache_mode(vdev)) + lim.features |= BLK_FEAT_WRITE_CACHE; + else + lim.features &= ~BLK_FEAT_WRITE_CACHE; + blk_mq_freeze_queue(disk->queue); + i = queue_limits_commit_update(disk->queue, &lim); + blk_mq_unfreeze_queue(disk->queue); + if (i) + return i; return count; } @@ -1504,6 +1515,9 @@ static int virtblk_probe(struct virtio_device *vdev) if (err) goto out_free_tags; + if (virtblk_get_cache_mode(vdev)) + lim.features |= BLK_FEAT_WRITE_CACHE; + vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); if (IS_ERR(vblk->disk)) { err = PTR_ERR(vblk->disk); @@ -1519,10 +1533,6 @@ static int virtblk_probe(struct virtio_device *vdev) vblk->disk->fops = &virtblk_fops; vblk->index = index; - /* configure queue flush support */ - blk_queue_write_cache(vblk->disk->queue, virtblk_get_cache_mode(vdev), - false); - /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 851b03844edd..9aafce3e5987 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -959,6 +959,12 @@ static void blkif_set_queue_limits(const struct blkfront_info *info, lim->max_secure_erase_sectors = UINT_MAX; } + if (info->feature_flush) { + lim->features |= BLK_FEAT_WRITE_CACHE; + if (info->feature_fua) + lim->features |= BLK_FEAT_FUA; + } + /* Hard sector size and max sectors impersonate the equiv. hardware. */ lim->logical_block_size = info->sector_size; lim->physical_block_size = info->physical_sector_size; @@ -987,8 +993,6 @@ static const char *flush_info(struct blkfront_info *info) static void xlvbd_flush(struct blkfront_info *info) { - blk_queue_write_cache(info->rq, info->feature_flush ? true : false, - info->feature_fua ? true : false); pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d11fc664cb0..cb6595c8b551 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -897,7 +897,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, sector_t sectors, struct block_device *cached_bdev, const struct block_device_operations *ops) { - struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, SIZE_MAX / sizeof(atomic_t)); struct queue_limits lim = { @@ -909,6 +908,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, .io_min = block_size, .logical_block_size = block_size, .physical_block_size = block_size, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, }; uint64_t n; int idx; @@ -975,12 +975,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->fops = ops; d->disk->private_data = d; - q = d->disk->queue; - blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); - - blk_queue_write_cache(q, true, true); - return 0; out_bioset_exit: diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index fd789eeb62d9..03abdae64682 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1686,34 +1686,16 @@ combine_limits: return validate_hardware_logical_block_alignment(t, limits); } -static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - unsigned long flush = (unsigned long) data; - struct request_queue *q = bdev_get_queue(dev->bdev); - - return (q->queue_flags & flush); -} - -static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) +/* + * Check if a target requires flush support even if none of the underlying + * devices need it (e.g. to persist target-specific metadata). + */ +static bool dm_table_supports_flush(struct dm_table *t) { - /* - * Require at least one underlying device to support flushes. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting flushes must provide. - */ for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); - if (!ti->num_flush_bios) - continue; - - if (ti->flush_supported) - return true; - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) + if (ti->num_flush_bios && ti->flush_supported) return true; } @@ -1855,7 +1837,6 @@ static int device_requires_stable_pages(struct dm_target *ti, int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { - bool wc = false, fua = false; int r; if (dm_table_supports_nowait(t)) @@ -1876,12 +1857,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_secure_erase(t)) limits->max_secure_erase_sectors = 0; - if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { - wc = true; - if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) - fua = true; - } - blk_queue_write_cache(q, wc, fua); + if (dm_table_supports_flush(t)) + limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; if (dm_table_supports_dax(t, device_not_dax_capable)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); diff --git a/drivers/md/md.c b/drivers/md/md.c index 67ece2cd725f..2f4c5d1755d8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5785,7 +5785,10 @@ struct mddev *md_alloc(dev_t dev, char *name) int partitioned; int shift; int unit; - int error ; + int error; + struct queue_limits lim = { + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, + }; /* * Wait for any previous instance of this device to be completely @@ -5825,7 +5828,7 @@ struct mddev *md_alloc(dev_t dev, char *name) */ mddev->hold_active = UNTIL_STOP; - disk = blk_alloc_disk(NULL, NUMA_NO_NODE); + disk = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(disk)) { error = PTR_ERR(disk); goto out_free_mddev; @@ -5843,7 +5846,6 @@ struct mddev *md_alloc(dev_t dev, char *name) disk->fops = &md_fops; disk->private_data = mddev; - blk_queue_write_cache(disk->queue, true, true); disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; error = add_disk(disk); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 367509b5b646..2c9963248fcb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2466,8 +2466,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct mmc_blk_data *md; int devidx, ret; char cap_str[10]; - bool cache_enabled = false; - bool fua_enabled = false; + unsigned int features = 0; devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL); if (devidx < 0) { @@ -2499,7 +2498,24 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, */ md->read_only = mmc_blk_readonly(card); - md->disk = mmc_init_queue(&md->queue, card); + if (mmc_host_cmd23(card->host)) { + if ((mmc_card_mmc(card) && + card->csd.mmca_vsn >= CSD_SPEC_VER_3) || + (mmc_card_sd(card) && + card->scr.cmds & SD_SCR_CMD23_SUPPORT)) + md->flags |= MMC_BLK_CMD23; + } + + if (md->flags & MMC_BLK_CMD23 && + ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || + card->ext_csd.rel_sectors)) { + md->flags |= MMC_BLK_REL_WR; + features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + } else if (mmc_cache_enabled(card->host)) { + features |= BLK_FEAT_WRITE_CACHE; + } + + md->disk = mmc_init_queue(&md->queue, card, features); if (IS_ERR(md->disk)) { ret = PTR_ERR(md->disk); goto err_kfree; @@ -2539,26 +2555,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, set_capacity(md->disk, size); - if (mmc_host_cmd23(card->host)) { - if ((mmc_card_mmc(card) && - card->csd.mmca_vsn >= CSD_SPEC_VER_3) || - (mmc_card_sd(card) && - card->scr.cmds & SD_SCR_CMD23_SUPPORT)) - md->flags |= MMC_BLK_CMD23; - } - - if (md->flags & MMC_BLK_CMD23 && - ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || - card->ext_csd.rel_sectors)) { - md->flags |= MMC_BLK_REL_WR; - fua_enabled = true; - cache_enabled = true; - } - if (mmc_cache_enabled(card->host)) - cache_enabled = true; - - blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled); - string_get_size((u64)size, 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s%s\n", diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 241cdc2b2a2a..97ff993d3157 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -344,10 +344,12 @@ static const struct blk_mq_ops mmc_mq_ops = { }; static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, - struct mmc_card *card) + struct mmc_card *card, unsigned int features) { struct mmc_host *host = card->host; - struct queue_limits lim = { }; + struct queue_limits lim = { + .features = features, + }; struct gendisk *disk; if (mmc_can_erase(card)) @@ -413,10 +415,12 @@ static inline bool mmc_merge_capable(struct mmc_host *host) * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue + * @features: block layer features (BLK_FEAT_*) * * Initialise a MMC card request queue. */ -struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + unsigned int features) { struct mmc_host *host = card->host; struct gendisk *disk; @@ -460,7 +464,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) return ERR_PTR(ret); - disk = mmc_alloc_disk(mq, card); + disk = mmc_alloc_disk(mq, card, features); if (IS_ERR(disk)) blk_mq_free_tag_set(&mq->tag_set); return disk; diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 9ade3bcbb714..1498840a4ea0 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -94,7 +94,8 @@ struct mmc_queue { struct work_struct complete_work; }; -struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card); +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + unsigned int features); extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 3caa0717d46c..1b9f57f231e8 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -336,6 +336,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) lim.logical_block_size = tr->blksize; if (tr->discard) lim.max_hw_discard_sectors = UINT_MAX; + if (tr->flush) + lim.features |= BLK_FEAT_WRITE_CACHE; /* Create gendisk */ gd = blk_mq_alloc_disk(new->tag_set, &lim, new); @@ -373,9 +375,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) spin_lock_init(&new->queue_lock); INIT_LIST_HEAD(&new->rq_list); - if (tr->flush) - blk_queue_write_cache(new->rq, true, false); - blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 598fe2e89bda..aff818469c11 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -455,6 +455,7 @@ static int pmem_attach_disk(struct device *dev, .logical_block_size = pmem_sector_size(ndns), .physical_block_size = PAGE_SIZE, .max_hw_sectors = UINT_MAX, + .features = BLK_FEAT_WRITE_CACHE, }; int nid = dev_to_node(dev), fua; struct resource *res = &nsio->res; @@ -495,6 +496,8 @@ static int pmem_attach_disk(struct device *dev, dev_warn(dev, "unable to guarantee persistence of writes\n"); fua = 0; } + if (fua) + lim.features |= BLK_FEAT_FUA; if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -543,7 +546,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - blk_queue_write_cache(q, true, fua); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); if (pmem->pfn_flags & PFN_MAP) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5a673fa5cb26..9fc5e36fe2e5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2056,7 +2056,6 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { - bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; struct queue_limits lim; struct nvme_id_ns_nvm *nvm = NULL; struct nvme_zone_info zi = {}; @@ -2106,6 +2105,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ns->head->ids.csi == NVME_CSI_ZNS) nvme_update_zone_info(ns, &lim, &zi); + if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + else + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + /* * Register a metadata profile for PI, or the plain non-integrity NVMe * metadata masquerading as Type 0 if supported, otherwise reject block @@ -2132,7 +2136,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) ns->head->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); - blk_queue_write_cache(ns->disk->queue, vwc, vwc); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 12c59db02539..3d0e23a0a4dd 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -521,7 +521,6 @@ static void nvme_requeue_work(struct work_struct *work) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct queue_limits lim; - bool vwc = false; mutex_init(&head->lock); bio_list_init(&head->requeue_list); @@ -562,11 +561,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); - - /* we need to propagate up the VMC settings */ - if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) - vwc = true; - blk_queue_write_cache(head->disk->queue, vwc, vwc); return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 66f7d1e3429c..d8ee4a4d4a62 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -120,17 +120,18 @@ static const char *sd_cache_types[] = { "write back, no read (daft)" }; -static void sd_set_flush_flag(struct scsi_disk *sdkp) +static void sd_set_flush_flag(struct scsi_disk *sdkp, + struct queue_limits *lim) { - bool wc = false, fua = false; - if (sdkp->WCE) { - wc = true; + lim->features |= BLK_FEAT_WRITE_CACHE; if (sdkp->DPOFUA) - fua = true; + lim->features |= BLK_FEAT_FUA; + else + lim->features &= ~BLK_FEAT_FUA; + } else { + lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); } - - blk_queue_write_cache(sdkp->disk->queue, wc, fua); } static ssize_t @@ -168,9 +169,18 @@ cache_type_store(struct device *dev, struct device_attribute *attr, wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { + struct queue_limits lim; + sdkp->WCE = wce; sdkp->RCD = rcd; - sd_set_flush_flag(sdkp); + + lim = queue_limits_start_update(sdkp->disk->queue); + sd_set_flush_flag(sdkp, &lim); + blk_mq_freeze_queue(sdkp->disk->queue); + ret = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (ret) + return ret; return count; } @@ -3663,7 +3673,7 @@ static int sd_revalidate_disk(struct gendisk *disk) * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp); + sd_set_flush_flag(sdkp, &lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0c247a716885..acdfe5122faa 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -282,6 +282,28 @@ static inline bool blk_op_is_passthrough(blk_opf_t op) return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } +/* flags set by the driver in queue_limits.features */ +enum { + /* supports a volatile write cache */ + BLK_FEAT_WRITE_CACHE = (1u << 0), + + /* supports passing on the FUA bit */ + BLK_FEAT_FUA = (1u << 1), +}; + +/* + * Flags automatically inherited when stacking limits. + */ +#define BLK_FEAT_INHERIT_MASK \ + (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA) + + +/* internal flags in queue_limits.flags */ +enum { + /* do not send FLUSH or FUA command despite advertised write cache */ + BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31), +}; + /* * BLK_BOUNCE_NONE: never bounce (default) * BLK_BOUNCE_HIGH: bounce all highmem pages @@ -292,6 +314,8 @@ enum blk_bounce { }; struct queue_limits { + unsigned int features; + unsigned int flags; enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; @@ -536,12 +560,9 @@ struct request_queue { #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ -#define QUEUE_FLAG_WC 17 /* Write back caching */ -#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ #define QUEUE_FLAG_DAX 19 /* device supports DAX */ #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ @@ -951,7 +972,6 @@ void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); -extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); struct blk_independent_access_ranges * disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); @@ -1304,14 +1324,20 @@ static inline bool bdev_stable_writes(struct block_device *bdev) return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags); } +static inline bool blk_queue_write_cache(struct request_queue *q) +{ + return (q->limits.features & BLK_FEAT_WRITE_CACHE) && + !(q->limits.flags & BLK_FLAGS_WRITE_CACHE_DISABLED); +} + static inline bool bdev_write_cache(struct block_device *bdev) { - return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); + return blk_queue_write_cache(bdev_get_queue(bdev)); } static inline bool bdev_fua(struct block_device *bdev) { - return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); + return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; } static inline bool bdev_nowait(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From bd4a633b6f7c3c6b6ebc1a07317643270e751a94 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:41 +0200 Subject: block: move the nonrot flag to queue_limits Move the nonrot flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Use the chance to switch to defaulting to non-rotational and require the driver to opt into rotational, which matches the polarity of the sysfs interface. For the z2ram, ps3vram, 2x memstick, ubiblock and dcssblk the new rotational flag is not set as they clearly are not rotational despite this being a behavior change. There are some other drivers that unconditionally set the rotational flag to keep the existing behavior as they arguably can be used on rotational devices even if that is probably not their main use today (e.g. virtio_blk and drbd). The flag is automatically inherited in blk_stack_limits matching the existing behavior in dm and md. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/m68k/emu/nfblock.c | 1 + arch/um/drivers/ubd_kern.c | 1 - arch/xtensa/platforms/iss/simdisk.c | 5 ++++- block/blk-mq-debugfs.c | 1 - block/blk-sysfs.c | 39 ++++++++++++++++++++++++++++++++++--- drivers/block/amiflop.c | 5 ++++- drivers/block/aoe/aoeblk.c | 1 + drivers/block/ataflop.c | 5 ++++- drivers/block/brd.c | 2 -- drivers/block/drbd/drbd_main.c | 3 ++- drivers/block/floppy.c | 3 ++- drivers/block/loop.c | 8 +++----- drivers/block/mtip32xx/mtip32xx.c | 1 - drivers/block/n64cart.c | 2 -- drivers/block/nbd.c | 5 ----- drivers/block/null_blk/main.c | 1 - drivers/block/pktcdvd.c | 1 + drivers/block/ps3disk.c | 3 ++- drivers/block/rbd.c | 3 --- drivers/block/rnbd/rnbd-clt.c | 4 ---- drivers/block/sunvdc.c | 1 + drivers/block/swim.c | 5 ++++- drivers/block/swim3.c | 5 ++++- drivers/block/ublk_drv.c | 9 +++------ drivers/block/virtio_blk.c | 4 +++- drivers/block/xen-blkfront.c | 1 - drivers/block/zram/zram_drv.c | 2 -- drivers/cdrom/gdrom.c | 1 + drivers/md/bcache/super.c | 2 -- drivers/md/dm-table.c | 12 ------------ drivers/md/md.c | 13 ------------- drivers/mmc/core/queue.c | 1 - drivers/mtd/mtd_blkdevs.c | 1 - drivers/nvdimm/btt.c | 1 - drivers/nvdimm/pmem.c | 1 - drivers/nvme/host/core.c | 1 - drivers/nvme/host/multipath.c | 1 - drivers/s390/block/dasd_genhd.c | 1 - drivers/s390/block/scm_blk.c | 1 - drivers/scsi/sd.c | 4 ++-- include/linux/blkdev.h | 10 +++++----- 41 files changed, 83 insertions(+), 88 deletions(-) (limited to 'include') diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 642fb80c5c4e..8eea7ef91151 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -98,6 +98,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) { struct queue_limits lim = { .logical_block_size = bsize, + .features = BLK_FEAT_ROTATIONAL, }; struct nfhd_device *dev; int dev_id = id - NFHD_DEV_OFFSET; diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 19e01691ea0e..9f1e76ddda5a 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -882,7 +882,6 @@ static int ubd_add(int n, char **error_out) goto out_cleanup_tags; } - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); disk->major = UBD_MAJOR; disk->first_minor = n << UBD_SHIFT; disk->minors = 1 << UBD_SHIFT; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index defc67909a9c..d6d2b533a574 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -263,6 +263,9 @@ static const struct proc_ops simdisk_proc_ops = { static int __init simdisk_setup(struct simdisk *dev, int which, struct proc_dir_entry *procdir) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; char tmp[2] = { '0' + which, 0 }; int err; @@ -271,7 +274,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which, spin_lock_init(&dev->lock); dev->users = 0; - dev->gd = blk_alloc_disk(NULL, NUMA_NO_NODE); + dev->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(dev->gd)) { err = PTR_ERR(dev->gd); goto out; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index e8b9db7c30c4..4d0e62ec88f0 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -84,7 +84,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), - QUEUE_FLAG_NAME(NONROT), QUEUE_FLAG_NAME(IO_STAT), QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(ADD_RANDOM), diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 4f524c1d5e08..637ed3bbbfb4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -263,6 +263,39 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) return queue_var_show(queue_dma_alignment(q), page); } +static ssize_t queue_feature_store(struct request_queue *q, const char *page, + size_t count, unsigned int feature) +{ + struct queue_limits lim; + unsigned long val; + ssize_t ret; + + ret = queue_var_store(&val, page, count); + if (ret < 0) + return ret; + + lim = queue_limits_start_update(q); + if (val) + lim.features |= feature; + else + lim.features &= ~feature; + ret = queue_limits_commit_update(q, &lim); + if (ret) + return ret; + return count; +} + +#define QUEUE_SYSFS_FEATURE(_name, _feature) \ +static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \ +{ \ + return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \ +} \ +static ssize_t queue_##_name##_store(struct request_queue *q, \ + const char *page, size_t count) \ +{ \ + return queue_feature_store(q, page, count, _feature); \ +} + #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ queue_##name##_show(struct request_queue *q, char *page) \ @@ -289,7 +322,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ return ret; \ } -QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); +QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); @@ -526,7 +559,7 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_logical_block_size_show, }; -QUEUE_RW_ENTRY(queue_nonrot, "rotational"); +QUEUE_RW_ENTRY(queue_rotational, "rotational"); QUEUE_RW_ENTRY(queue_iostats, "iostats"); QUEUE_RW_ENTRY(queue_random, "add_random"); QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); @@ -624,7 +657,7 @@ static struct attribute *queue_attrs[] = { &queue_write_zeroes_max_entry.attr, &queue_zone_append_max_entry.attr, &queue_zone_write_granularity_entry.attr, - &queue_nonrot_entry.attr, + &queue_rotational_entry.attr, &queue_zoned_entry.attr, &queue_nr_zones_entry.attr, &queue_max_open_zones_entry.attr, diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a25414228e47..ff45701f7a5e 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1776,10 +1776,13 @@ static const struct blk_mq_ops amiflop_mq_ops = { static int fd_alloc_disk(int drive, int system) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct gendisk *disk; int err; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index b6dac8cee70f..2028795ec61c 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp) struct queue_limits lim = { .max_hw_sectors = aoe_maxsectors, .io_opt = SZ_2M, + .features = BLK_FEAT_ROTATIONAL, }; ulong flags; int late = 0; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index cacc4ba942a8..4ee10a742bdb 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1992,9 +1992,12 @@ static const struct blk_mq_ops ataflop_mq_ops = { static int ataflop_alloc_disk(unsigned int drive, unsigned int type) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct gendisk *disk; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 558d8e670566..b25dc463b5e3 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -366,8 +366,6 @@ static int brd_alloc(int i) strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); err = add_disk(disk); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index bf42a46781fa..2ef29a478075 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2697,7 +2697,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig * connect. */ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8, - .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | + BLK_FEAT_ROTATIONAL, }; device = minor_to_device(minor); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 25c9d85667f1..6d7f7df97c3a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4516,7 +4516,8 @@ static bool floppy_available(int drive) static int floppy_alloc_disk(unsigned int drive, unsigned int type) { struct queue_limits lim = { - .max_hw_sectors = 64, + .max_hw_sectors = 64, + .features = BLK_FEAT_ROTATIONAL, }; struct gendisk *disk; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 08d0fc7f17b7..86b5d956dc4e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -985,13 +985,11 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize) lim.logical_block_size = bsize; lim.physical_block_size = bsize; lim.io_min = bsize; - lim.features &= ~BLK_FEAT_WRITE_CACHE; + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL); if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY)) lim.features |= BLK_FEAT_WRITE_CACHE; - if (!backing_bdev || bdev_nonrot(backing_bdev)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, lo->lo_queue); - else - blk_queue_flag_clear(QUEUE_FLAG_NONROT, lo->lo_queue); + if (backing_bdev && !bdev_nonrot(backing_bdev)) + lim.features |= BLK_FEAT_ROTATIONAL; loop_config_discard(lo, &lim); return queue_limits_commit_update(lo->lo_queue, &lim); } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 43a187609ef7..1dbbf72659d5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3485,7 +3485,6 @@ skip_create_disk: goto start_service_thread; /* Set device limits. */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); dma_set_max_seg_size(&dd->pdev->dev, 0x400000); diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 27b2187e7a6d..b9fdeff31caf 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev) set_capacity(disk, size >> SECTOR_SHIFT); set_disk_ro(disk, 1); - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - err = add_disk(disk); if (err) goto out_cleanup_disk; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index cb1c86a6a3fb..6cddf5baffe0 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1867,11 +1867,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) goto out_err_disk; } - /* - * Tell the block layer that we are not a rotational device - */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - mutex_init(&nbd->config_lock); refcount_set(&nbd->config_refs, 0); /* diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 21f9d256e884..83a4ebe4763a 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1948,7 +1948,6 @@ static int null_add_dev(struct nullb_device *dev) } nullb->q->queuedata = nullb; - blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a2ce8070010..7cece5884b9c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2622,6 +2622,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) struct queue_limits lim = { .max_hw_sectors = PACKET_MAX_SECTORS, .logical_block_size = CD_FRAMESIZE, + .features = BLK_FEAT_ROTATIONAL, }; int idx; int ret = -ENOMEM; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 8b73cf459b59..ff45ed766469 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -388,7 +388,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) .max_segments = -1, .max_segment_size = dev->bounce_size, .dma_alignment = dev->blk_size - 1, - .features = BLK_FEAT_WRITE_CACHE, + .features = BLK_FEAT_WRITE_CACHE | + BLK_FEAT_ROTATIONAL, }; struct gendisk *gendisk; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 22ad704f81d8..ec1f1c7d4275 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4997,9 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->fops = &rbd_bd_ops; disk->private_data = rbd_dev; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 02c4b1731827..4918b0f68b46 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1352,10 +1352,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, if (dev->access_mode == RNBD_ACCESS_RO) set_disk_ro(dev->gd, true); - /* - * Network device does not need rotational - */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); err = add_disk(dev->gd); if (err) put_disk(dev->gd); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 5286cb8e0824..2d38331ee667 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -791,6 +791,7 @@ static int probe_disk(struct vdc_port *port) .seg_boundary_mask = PAGE_SIZE - 1, .max_segment_size = PAGE_SIZE, .max_segments = port->ring_cookies, + .features = BLK_FEAT_ROTATIONAL, }; struct request_queue *q; struct gendisk *g; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 6731678f3a41..126f151c4f2c 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs) static int swim_floppy_init(struct swim_priv *swd) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; int err; int drive; struct swim __iomem *base = swd->base; @@ -820,7 +823,7 @@ static int swim_floppy_init(struct swim_priv *swd) goto exit_put_disks; swd->unit[drive].disk = - blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL, + blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim, &swd->unit[drive]); if (IS_ERR(swd->unit[drive].disk)) { blk_mq_free_tag_set(&swd->unit[drive].tag_set); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index a04756ac778e..90be1017f7bf 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1189,6 +1189,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index) static int swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct floppy_state *fs; struct gendisk *disk; int rc; @@ -1210,7 +1213,7 @@ static int swim3_attach(struct macio_dev *mdev, if (rc) goto out_unregister; - disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs); + disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs); if (IS_ERR(disk)) { rc = PTR_ERR(disk); goto out_free_tag_set; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index e45c65c1848d..4fcde0999358 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -484,14 +484,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos) static void ublk_dev_param_basic_apply(struct ublk_device *ub) { - struct request_queue *q = ub->ub_disk->queue; const struct ublk_param_basic *p = &ub->params.basic; - if (p->attrs & UBLK_ATTR_ROTATIONAL) - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - if (p->attrs & UBLK_ATTR_READ_ONLY) set_disk_ro(ub->ub_disk, true); @@ -2214,6 +2208,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) lim.features |= BLK_FEAT_FUA; } + if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL) + lim.features |= BLK_FEAT_ROTATIONAL; + if (wait_for_completion_interruptible(&ub->completion) != 0) return -EINTR; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b1a3c2935285..13a2f24f1766 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1451,7 +1451,9 @@ static int virtblk_read_limits(struct virtio_blk *vblk, static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; - struct queue_limits lim = { }; + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; int err, index; unsigned int queue_depth; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9aafce3e5987..fa3a2ba52545 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1146,7 +1146,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, err = PTR_ERR(gd); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue); strcpy(gd->disk_name, DEV_NAME); ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 3acd7006ad2c..aad840fc7e18 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2245,8 +2245,6 @@ static int zram_add(void) /* Actual capacity set using sysfs (/sys/block/zram/disksize */ set_capacity(zram->disk, 0); - /* zram devices sort of resembles non-rotational disks */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); ret = device_add_disk(NULL, zram->disk, zram_disk_groups); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index eefdd422ad8e..71cfe7a85913 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr) .max_segments = 1, /* set a large max size to get most from DMA */ .max_segment_size = 0x40000, + .features = BLK_FEAT_ROTATIONAL, }; int err; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index cb6595c8b551..baa364eedd00 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -974,8 +974,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->minors = BCACHE_MINORS; d->disk->fops = ops; d->disk->private_data = d; - - blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); return 0; out_bioset_exit: diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 03abdae64682..c062af329709 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1716,12 +1716,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - return !bdev_nonrot(dev->bdev); -} - static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1870,12 +1864,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); - /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. diff --git a/drivers/md/md.c b/drivers/md/md.c index 2f4c5d1755d8..c23423c51fb7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6151,20 +6151,7 @@ int md_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { struct request_queue *q = mddev->gendisk->queue; - bool nonrot = true; - rdev_for_each(rdev, mddev) { - if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) { - nonrot = false; - break; - } - } - if (mddev->degraded) - nonrot = false; - if (nonrot) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); /* Set the NOWAIT flags if all underlying devices support it */ diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 97ff993d3157..b4f62fa84586 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -387,7 +387,6 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); blk_queue_rq_timeout(mq->queue, 60 * HZ); - blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 1b9f57f231e8..bf8369ce7ddf 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -375,7 +375,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) spin_lock_init(&new->queue_lock); INIT_LIST_HEAD(&new->rq_list); - blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); gd->queue = new->rq; diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index c5f8451b494d..e474afa8e9f6 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1518,7 +1518,6 @@ static int btt_blk_init(struct btt *btt) btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; - blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index aff818469c11..501cf226df01 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -546,7 +546,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); if (pmem->pfn_flags & PFN_MAP) blk_queue_flag_set(QUEUE_FLAG_DAX, q); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9fc5e36fe2e5..0d753fe71f35 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3744,7 +3744,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (ctrl->opts && ctrl->opts->data_digest) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); - blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); if (ctrl->ops->supports_pci_p2pdma && ctrl->ops->supports_pci_p2pdma(ctrl)) blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3d0e23a0a4dd..58c13304e558 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -549,7 +549,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); - blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); /* diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 4533dd055ca8..1aa426b1dedd 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -68,7 +68,6 @@ int dasd_gendisk_alloc(struct dasd_block *block) blk_mq_free_tag_set(&block->tag_set); return PTR_ERR(gdp); } - blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue); /* Initialize gendisk structure. */ gdp->major = DASD_MAJOR; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 1d456a5a3bfb..2e2309fa9a0b 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -475,7 +475,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) goto out_tag; } rq = bdev->rq = bdev->gendisk->queue; - blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk->private_data = scmdev; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d8ee4a4d4a62..a42c3c45e868 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3318,7 +3318,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp, rcu_read_unlock(); if (rot == 1) { - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + lim->features &= ~BLK_FEAT_ROTATIONAL; blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } @@ -3646,7 +3646,7 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + lim.features |= BLK_FEAT_ROTATIONAL; blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); if (scsi_device_supports_vpd(sdp)) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index acdfe5122faa..988e3248cffe 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -289,14 +289,16 @@ enum { /* supports passing on the FUA bit */ BLK_FEAT_FUA = (1u << 1), + + /* rotational device (hard drive or floppy) */ + BLK_FEAT_ROTATIONAL = (1u << 2), }; /* * Flags automatically inherited when stacking limits. */ #define BLK_FEAT_INHERIT_MASK \ - (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA) - + (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL) /* internal flags in queue_limits.flags */ enum { @@ -553,8 +555,6 @@ struct request_queue { #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ -#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ -#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ @@ -589,7 +589,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) -#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_zone_resetall(q) \ -- cgit v1.2.3-58-ga151 From 39a9f1c334f9f27b3b3e6d0005c10ed667268346 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:42 +0200 Subject: block: move the add_random flag to queue_limits Move the add_random flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Note that this also removes code from dm to clear the flag based on the underlying devices, which can't be reached as dm devices will always start out without the flag set. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - block/blk-sysfs.c | 6 +++--- drivers/block/mtip32xx/mtip32xx.c | 1 - drivers/md/dm-table.c | 18 ------------------ drivers/mmc/core/queue.c | 2 -- drivers/mtd/mtd_blkdevs.c | 3 --- drivers/s390/block/scm_blk.c | 4 ---- drivers/scsi/scsi_lib.c | 3 +-- drivers/scsi/sd.c | 11 +++-------- include/linux/blkdev.h | 5 +++-- 10 files changed, 10 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 4d0e62ec88f0..6b7edb50bfd3 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -86,7 +86,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(IO_STAT), QUEUE_FLAG_NAME(NOXMERGES), - QUEUE_FLAG_NAME(ADD_RANDOM), QUEUE_FLAG_NAME(SYNCHRONOUS), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 637ed3bbbfb4..9174aca3b855 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -323,7 +323,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ } QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) -QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); +QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); #undef QUEUE_SYSFS_BIT_FNS @@ -561,7 +561,7 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { QUEUE_RW_ENTRY(queue_rotational, "rotational"); QUEUE_RW_ENTRY(queue_iostats, "iostats"); -QUEUE_RW_ENTRY(queue_random, "add_random"); +QUEUE_RW_ENTRY(queue_add_random, "add_random"); QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); #ifdef CONFIG_BLK_WBT @@ -665,7 +665,7 @@ static struct attribute *queue_attrs[] = { &queue_nomerges_entry.attr, &queue_iostats_entry.attr, &queue_stable_writes_entry.attr, - &queue_random_entry.attr, + &queue_add_random_entry.attr, &queue_poll_entry.attr, &queue_wc_entry.attr, &queue_fua_entry.attr, diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 1dbbf72659d5..c6ef0546ffc9 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3485,7 +3485,6 @@ skip_create_disk: goto start_service_thread; /* Set device limits. */ - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); dma_set_max_seg_size(&dd->pdev->dev, 0x400000); /* Set the capacity of the device in 512 byte sectors. */ diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index c062af329709..0a3838e45aff 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1716,14 +1716,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !blk_queue_add_random(q); -} - static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1876,16 +1868,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); - /* - * Determine whether or not this queue's I/O timings contribute - * to the entropy pool, Only request-based targets use this. - * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not - * have it set. - */ - if (blk_queue_add_random(q) && - dm_table_any_dev_attr(t, device_is_not_random, NULL)) - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - /* * For a zoned target, setup the zones related queue attributes * and resources necessary for zone append emulation if necessary. diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index b4f62fa84586..da00904d4a3c 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -387,8 +387,6 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); blk_queue_rq_timeout(mq->queue, 60 * HZ); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); - dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index bf8369ce7ddf..47ead84407cd 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -374,9 +374,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) /* Create the request queue */ spin_lock_init(&new->queue_lock); INIT_LIST_HEAD(&new->rq_list); - - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); - gd->queue = new->rq; if (new->readonly) diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2e2309fa9a0b..3fcfe029db1b 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -439,7 +439,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) .logical_block_size = 1 << 12, }; unsigned int devindex; - struct request_queue *rq; int len, ret; lim.max_segments = min(scmdev->nr_max_block, @@ -474,9 +473,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ret = PTR_ERR(bdev->gendisk); goto out_tag; } - rq = bdev->rq = bdev->gendisk->queue; - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); - bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; bdev->gendisk->major = scm_major; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ec39acc986d6..54f771ec8cfb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -631,8 +631,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - // XXX: - if (blk_queue_add_random(q)) + if (q->limits.features & BLK_FEAT_ADD_RANDOM) add_disk_randomness(req->q->disk); WARN_ON_ONCE(!blk_rq_is_passthrough(req) && diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a42c3c45e868..a27f1c7f1b61 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3301,7 +3301,6 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) static void sd_read_block_characteristics(struct scsi_disk *sdkp, struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; struct scsi_vpd *vpd; u16 rot; @@ -3317,10 +3316,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp, sdkp->zoned = (vpd->data[8] >> 4) & 3; rcu_read_unlock(); - if (rot == 1) { - lim->features &= ~BLK_FEAT_ROTATIONAL; - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } + if (rot == 1) + lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (!sdkp->first_scan) return; @@ -3599,7 +3596,6 @@ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - struct request_queue *q = sdkp->disk->queue; sector_t old_capacity = sdkp->capacity; struct queue_limits lim; unsigned char *buffer; @@ -3646,8 +3642,7 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - lim.features |= BLK_FEAT_ROTATIONAL; - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 988e3248cffe..cf1bbf566b2b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -292,6 +292,9 @@ enum { /* rotational device (hard drive or floppy) */ BLK_FEAT_ROTATIONAL = (1u << 2), + + /* contributes to the random number pool */ + BLK_FEAT_ADD_RANDOM = (1u << 3), }; /* @@ -557,7 +560,6 @@ struct request_queue { #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ -#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ @@ -591,7 +593,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) -#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_zone_resetall(q) \ test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) -- cgit v1.2.3-58-ga151 From cdb2497918cc2929691408bac87b58433b45b6d3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:43 +0200 Subject: block: move the io_stat flag setting to queue_limits Move the io_stat flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Simplify md and dm to set the flag unconditionally instead of avoiding setting a simple flag for cases where it already is set by other means, which is a bit pointless. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-17-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - block/blk-mq.c | 6 +++++- block/blk-sysfs.c | 2 +- drivers/md/dm-table.c | 12 +++++++++--- drivers/md/dm.c | 13 +++---------- drivers/md/md.c | 5 ++--- drivers/nvme/host/multipath.c | 2 +- include/linux/blkdev.h | 9 +++++---- 8 files changed, 26 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 6b7edb50bfd3..cbe99444ed1a 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -84,7 +84,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), - QUEUE_FLAG_NAME(IO_STAT), QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(SYNCHRONOUS), QUEUE_FLAG_NAME(SAME_FORCE), diff --git a/block/blk-mq.c b/block/blk-mq.c index 58b0d6c7cc34..cf67dc13f7dd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4116,7 +4116,11 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct request_queue *q; int ret; - q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node); + if (!lim) + lim = &default_lim; + lim->features |= BLK_FEAT_IO_STAT; + + q = blk_alloc_queue(lim, set->numa_node); if (IS_ERR(q)) return q; q->queuedata = queuedata; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 9174aca3b855..6f58530fb3c0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -324,7 +324,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) -QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); +QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); #undef QUEUE_SYSFS_BIT_FNS diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0a3838e45aff..5d5431e531ae 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -579,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input) return 0; } +static void dm_set_stacking_limits(struct queue_limits *limits) +{ + blk_set_stacking_limits(limits); + limits->features |= BLK_FEAT_IO_STAT; +} + /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be @@ -617,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, for (i = 0; i < t->num_targets; i++) { ti = dm_table_get_target(t, i); - blk_set_stacking_limits(&ti_limits); + dm_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) @@ -1591,7 +1597,7 @@ int dm_calculate_queue_limits(struct dm_table *t, unsigned int zone_sectors = 0; bool zoned = false; - blk_set_stacking_limits(limits); + dm_set_stacking_limits(limits); t->integrity_supported = true; for (unsigned int i = 0; i < t->num_targets; i++) { @@ -1604,7 +1610,7 @@ int dm_calculate_queue_limits(struct dm_table *t, for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); - blk_set_stacking_limits(&ti_limits); + dm_set_stacking_limits(&ti_limits); if (!ti->type->iterate_devices) { /* Set I/O hints portion of queue limits */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 13037d6a6f62..8a976cee448b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2386,22 +2386,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) struct table_device *td; int r; - switch (type) { - case DM_TYPE_REQUEST_BASED: + WARN_ON_ONCE(type == DM_TYPE_NONE); + + if (type == DM_TYPE_REQUEST_BASED) { md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } - break; - case DM_TYPE_BIO_BASED: - case DM_TYPE_DAX_BIO_BASED: - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue); - break; - case DM_TYPE_NONE: - WARN_ON_ONCE(true); - break; } r = dm_calculate_queue_limits(t, &limits); diff --git a/drivers/md/md.c b/drivers/md/md.c index c23423c51fb7..8db0db8d5a27 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5787,7 +5787,8 @@ struct mddev *md_alloc(dev_t dev, char *name) int unit; int error; struct queue_limits lim = { - .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | + BLK_FEAT_IO_STAT, }; /* @@ -6152,8 +6153,6 @@ int md_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { struct request_queue *q = mddev->gendisk->queue; - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); - /* Set the NOWAIT flags if all underlying devices support it */ if (nowait) blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 58c13304e558..eea727cfa9e6 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -538,6 +538,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_set_stacking_limits(&lim); lim.dma_alignment = 3; + lim.features |= BLK_FEAT_IO_STAT; if (head->ids.csi != NVME_CSI_ZNS) lim.max_zone_append_sectors = 0; @@ -550,7 +551,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ctrl->subsys->instance, head->instance); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); /* * This assumes all controllers that refer to a namespace either * support poll queues or not. That is not a strict guarantee, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cf1bbf566b2b..5fafb2f95fd1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -295,6 +295,9 @@ enum { /* contributes to the random number pool */ BLK_FEAT_ADD_RANDOM = (1u << 3), + + /* do disk/partitions IO accounting */ + BLK_FEAT_IO_STAT = (1u << 4), }; /* @@ -558,7 +561,6 @@ struct request_queue { #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ -#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ @@ -577,8 +579,7 @@ struct request_queue { #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ -#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ - (1UL << QUEUE_FLAG_SAME_COMP) | \ +#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_SAME_COMP) | \ (1UL << QUEUE_FLAG_NOWAIT)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); @@ -592,7 +593,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) -#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) +#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_zone_resetall(q) \ test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) -- cgit v1.2.3-58-ga151 From 1a02f3a73f8c670eddeb44bf52a75ae7f67cfc11 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:44 +0200 Subject: block: move the stable_writes flag to queue_limits Move the stable_writes flag into the queue_limits feature field so that it can be set atomically with the queue frozen. The flag is now inherited by blk_stack_limits, which greatly simplifies the code in dm, and fixed md which previously did not pass on the flag set on lower devices. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-18-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - block/blk-sysfs.c | 29 +---------------------------- drivers/block/drbd/drbd_main.c | 5 ++--- drivers/block/rbd.c | 9 +++------ drivers/block/zram/zram_drv.c | 2 +- drivers/md/dm-table.c | 19 ------------------- drivers/md/raid5.c | 6 ++++-- drivers/mmc/core/queue.c | 5 +++-- drivers/nvme/host/core.c | 9 +++++---- drivers/nvme/host/multipath.c | 4 ---- drivers/scsi/iscsi_tcp.c | 8 ++++---- include/linux/blkdev.h | 9 ++++++--- 12 files changed, 29 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index cbe99444ed1a..eb73f1d348e5 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -88,7 +88,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(SYNCHRONOUS), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), - QUEUE_FLAG_NAME(STABLE_WRITES), QUEUE_FLAG_NAME(POLL), QUEUE_FLAG_NAME(DAX), QUEUE_FLAG_NAME(STATS), diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6f58530fb3c0..cde525724831 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -296,37 +296,10 @@ static ssize_t queue_##_name##_store(struct request_queue *q, \ return queue_feature_store(q, page, count, _feature); \ } -#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ -static ssize_t \ -queue_##name##_show(struct request_queue *q, char *page) \ -{ \ - int bit; \ - bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ - return queue_var_show(neg ? !bit : bit, page); \ -} \ -static ssize_t \ -queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ -{ \ - unsigned long val; \ - ssize_t ret; \ - ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ - if (neg) \ - val = !val; \ - \ - if (val) \ - blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ - else \ - blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ - return ret; \ -} - QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) -QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); -#undef QUEUE_SYSFS_BIT_FNS +QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); static ssize_t queue_zoned_show(struct request_queue *q, char *page) { diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2ef29a478075..f92673f05c7a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2698,7 +2698,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig */ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8, .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | - BLK_FEAT_ROTATIONAL, + BLK_FEAT_ROTATIONAL | + BLK_FEAT_STABLE_WRITES, }; device = minor_to_device(minor); @@ -2737,8 +2738,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig sprintf(disk->disk_name, "drbd%d", minor); disk->private_data = device; - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); - device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) goto out_no_io_page; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ec1f1c7d4275..008e850555f4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4949,7 +4949,6 @@ static const struct blk_mq_ops rbd_mq_ops = { static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; - struct request_queue *q; unsigned int objset_bytes = rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; struct queue_limits lim = { @@ -4979,12 +4978,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT; } + if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) + lim.features |= BLK_FEAT_STABLE_WRITES; + disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_tag_set; } - q = disk->queue; snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->dev_id); @@ -4996,10 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->minors = RBD_MINORS_PER_MAJOR; disk->fops = &rbd_bd_ops; disk->private_data = rbd_dev; - - if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); - rbd_dev->disk = disk; return 0; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index aad840fc7e18..f8f1b5b54795 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2208,6 +2208,7 @@ static int zram_add(void) #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE .max_write_zeroes_sectors = UINT_MAX, #endif + .features = BLK_FEAT_STABLE_WRITES, }; struct zram *zram; int ret, device_id; @@ -2246,7 +2247,6 @@ static int zram_add(void) /* Actual capacity set using sysfs (/sys/block/zram/disksize */ set_capacity(zram->disk, 0); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); ret = device_add_disk(NULL, zram->disk, zram_disk_groups); if (ret) goto out_cleanup_disk; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5d5431e531ae..aaf379cb15d9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1819,13 +1819,6 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) return true; } -static int device_requires_stable_pages(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) -{ - return bdev_stable_writes(dev->bdev); -} - int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1862,18 +1855,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); - /* - * Some devices don't use blk_integrity but still want stable pages - * because they do their own checksumming. - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ - if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); - else - blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); - /* * For a zoned target, setup the zones related queue attributes * and resources necessary for zone append emulation if necessary. diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 675c68fa6c64..e875763d6991 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7082,12 +7082,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) err = -ENODEV; else if (new != conf->skip_copy) { struct request_queue *q = mddev->gendisk->queue; + struct queue_limits lim = queue_limits_start_update(q); conf->skip_copy = new; if (new) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); + lim.features |= BLK_FEAT_STABLE_WRITES; else - blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); + lim.features &= ~BLK_FEAT_STABLE_WRITES; + err = queue_limits_commit_update(q, &lim); } mddev_unlock_and_resume(mddev); return err ?: len; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index da00904d4a3c..d0b3ca8a11f0 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -378,13 +378,14 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, lim.max_segments = host->max_segs; } + if (mmc_host_is_spi(host) && host->use_spi_crc) + lim.features |= BLK_FEAT_STABLE_WRITES; + disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); if (IS_ERR(disk)) return disk; mq->queue = disk->queue; - if (mmc_host_is_spi(host) && host->use_spi_crc) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); blk_queue_rq_timeout(mq->queue, 60 * HZ); dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0d753fe71f35..5ecf762d7c88 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3724,6 +3724,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { + struct queue_limits lim = { }; struct nvme_ns *ns; struct gendisk *disk; int node = ctrl->numa_node; @@ -3732,7 +3733,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (!ns) return; - disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); + if (ctrl->opts && ctrl->opts->data_digest) + lim.features |= BLK_FEAT_STABLE_WRITES; + + disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; @@ -3741,9 +3745,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - if (ctrl->opts && ctrl->opts->data_digest) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); - if (ctrl->ops->supports_pci_p2pdma && ctrl->ops->supports_pci_p2pdma(ctrl)) blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index eea727cfa9e6..173796f2ddea 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -868,10 +868,6 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) nvme_mpath_set_live(ns); } - if (test_bit(QUEUE_FLAG_STABLE_WRITES, &ns->queue->queue_flags) && - ns->head->disk) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) ns->head->disk->nr_zones = ns->disk->nr_zones; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 60688f18fac6..c708e1059638 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1057,15 +1057,15 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) return 0; } -static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) +static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); struct iscsi_session *session = tcp_sw_host->session; struct iscsi_conn *conn = session->leadconn; if (conn->datadgst_en) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - sdev->request_queue); + lim->features |= BLK_FEAT_STABLE_WRITES; return 0; } @@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = { .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .dma_boundary = PAGE_SIZE - 1, - .slave_configure = iscsi_sw_tcp_slave_configure, + .device_configure = iscsi_sw_tcp_device_configure, .proc_name = "iscsi_tcp", .this_id = -1, .track_queue_depth = 1, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5fafb2f95fd1..8936eb6ba609 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -298,13 +298,17 @@ enum { /* do disk/partitions IO accounting */ BLK_FEAT_IO_STAT = (1u << 4), + + /* don't modify data until writeback is done */ + BLK_FEAT_STABLE_WRITES = (1u << 5), }; /* * Flags automatically inherited when stacking limits. */ #define BLK_FEAT_INHERIT_MASK \ - (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL) + (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ + BLK_FEAT_STABLE_WRITES) /* internal flags in queue_limits.flags */ enum { @@ -565,7 +569,6 @@ struct request_queue { #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ -#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ #define QUEUE_FLAG_DAX 19 /* device supports DAX */ #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ @@ -1323,7 +1326,7 @@ static inline bool bdev_stable_writes(struct block_device *bdev) if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) return true; - return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags); + return q->limits.features & BLK_FEAT_STABLE_WRITES; } static inline bool blk_queue_write_cache(struct request_queue *q) -- cgit v1.2.3-58-ga151 From aadd5c59c910427c0464c217d5ed588ff14e2502 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:45 +0200 Subject: block: move the synchronous flag to queue_limits Move the synchronous flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-19-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - drivers/block/brd.c | 2 +- drivers/block/zram/zram_drv.c | 4 ++-- drivers/nvdimm/btt.c | 3 +-- drivers/nvdimm/pmem.c | 4 ++-- include/linux/blkdev.h | 7 ++++--- 6 files changed, 10 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index eb73f1d348e5..957774e40b1d 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -85,7 +85,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(NOXMERGES), - QUEUE_FLAG_NAME(SYNCHRONOUS), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), QUEUE_FLAG_NAME(POLL), diff --git a/drivers/block/brd.c b/drivers/block/brd.c index b25dc463b5e3..d77deb571dbd 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -335,6 +335,7 @@ static int brd_alloc(int i) .max_hw_discard_sectors = UINT_MAX, .max_discard_segments = 1, .discard_granularity = PAGE_SIZE, + .features = BLK_FEAT_SYNCHRONOUS, }; list_for_each_entry(brd, &brd_devices, brd_list) @@ -366,7 +367,6 @@ static int brd_alloc(int i) strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); err = add_disk(disk); if (err) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f8f1b5b54795..efcb8d9d274c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2208,7 +2208,8 @@ static int zram_add(void) #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE .max_write_zeroes_sectors = UINT_MAX, #endif - .features = BLK_FEAT_STABLE_WRITES, + .features = BLK_FEAT_STABLE_WRITES | + BLK_FEAT_SYNCHRONOUS, }; struct zram *zram; int ret, device_id; @@ -2246,7 +2247,6 @@ static int zram_add(void) /* Actual capacity set using sysfs (/sys/block/zram/disksize */ set_capacity(zram->disk, 0); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); ret = device_add_disk(NULL, zram->disk, zram_disk_groups); if (ret) goto out_cleanup_disk; diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index e474afa8e9f6..e79c06d65bb7 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1501,6 +1501,7 @@ static int btt_blk_init(struct btt *btt) .logical_block_size = btt->sector_size, .max_hw_sectors = UINT_MAX, .max_integrity_segments = 1, + .features = BLK_FEAT_SYNCHRONOUS, }; int rc; @@ -1518,8 +1519,6 @@ static int btt_blk_init(struct btt *btt) btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); - set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); if (rc) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 501cf226df01..b821dcf018f6 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -455,7 +455,8 @@ static int pmem_attach_disk(struct device *dev, .logical_block_size = pmem_sector_size(ndns), .physical_block_size = PAGE_SIZE, .max_hw_sectors = UINT_MAX, - .features = BLK_FEAT_WRITE_CACHE, + .features = BLK_FEAT_WRITE_CACHE | + BLK_FEAT_SYNCHRONOUS, }; int nid = dev_to_node(dev), fua; struct resource *res = &nsio->res; @@ -546,7 +547,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); if (pmem->pfn_flags & PFN_MAP) blk_queue_flag_set(QUEUE_FLAG_DAX, q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8936eb6ba609..cee7b44a1425 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -301,6 +301,9 @@ enum { /* don't modify data until writeback is done */ BLK_FEAT_STABLE_WRITES = (1u << 5), + + /* always completes in submit context */ + BLK_FEAT_SYNCHRONOUS = (1u << 6), }; /* @@ -566,7 +569,6 @@ struct request_queue { #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ -#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ @@ -1315,8 +1317,7 @@ static inline bool bdev_nonrot(struct block_device *bdev) static inline bool bdev_synchronous(struct block_device *bdev) { - return test_bit(QUEUE_FLAG_SYNCHRONOUS, - &bdev_get_queue(bdev)->queue_flags); + return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; } static inline bool bdev_stable_writes(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From f76af42f8bf13d2620084f305f01691de9238fc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:46 +0200 Subject: block: move the nowait flag to queue_limits Move the nowait flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Stacking drivers are simplified in that they now can simply set the flag, and blk_stack_limits will clear it when the features is not supported by any of the underlying devices. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-20-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - block/blk-mq.c | 2 +- block/blk-settings.c | 9 +++++++++ drivers/block/brd.c | 4 ++-- drivers/md/dm-table.c | 18 +++--------------- drivers/md/md.c | 18 +----------------- drivers/nvme/host/multipath.c | 3 +-- include/linux/blkdev.h | 9 +++++---- 8 files changed, 22 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 957774e40b1d..62b132e9a9ce 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -96,7 +96,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(ZONE_RESETALL), QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), - QUEUE_FLAG_NAME(NOWAIT), QUEUE_FLAG_NAME(SQ_SCHED), QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE), }; diff --git a/block/blk-mq.c b/block/blk-mq.c index cf67dc13f7dd..43235acc8750 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4118,7 +4118,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, if (!lim) lim = &default_lim; - lim->features |= BLK_FEAT_IO_STAT; + lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; q = blk_alloc_queue(lim, set->numa_node); if (IS_ERR(q)) diff --git a/block/blk-settings.c b/block/blk-settings.c index 536ee202fcdc..bf4622c19b5c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -459,6 +459,15 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->features |= (b->features & BLK_FEAT_INHERIT_MASK); + /* + * BLK_FEAT_NOWAIT needs to be supported both by the stacking driver + * and all underlying devices. The stacking driver sets the flag + * before stacking the limits, and this will clear the flag if any + * of the underlying devices does not support it. + */ + if (!(b->features & BLK_FEAT_NOWAIT)) + t->features &= ~BLK_FEAT_NOWAIT; + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, b->max_user_sectors); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index d77deb571dbd..a300645cd9d4 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -335,7 +335,8 @@ static int brd_alloc(int i) .max_hw_discard_sectors = UINT_MAX, .max_discard_segments = 1, .discard_granularity = PAGE_SIZE, - .features = BLK_FEAT_SYNCHRONOUS, + .features = BLK_FEAT_SYNCHRONOUS | + BLK_FEAT_NOWAIT, }; list_for_each_entry(brd, &brd_devices, brd_list) @@ -367,7 +368,6 @@ static int brd_alloc(int i) strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); err = add_disk(disk); if (err) goto out_cleanup_disk; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aaf379cb15d9..84d636712c72 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -582,7 +582,7 @@ int dm_split_args(int *argc, char ***argvp, char *input) static void dm_set_stacking_limits(struct queue_limits *limits) { blk_set_stacking_limits(limits); - limits->features |= BLK_FEAT_IO_STAT; + limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; } /* @@ -1746,12 +1746,6 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } -static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - return !bdev_nowait(dev->bdev); -} - static bool dm_table_supports_nowait(struct dm_table *t) { for (unsigned int i = 0; i < t->num_targets; i++) { @@ -1759,10 +1753,6 @@ static bool dm_table_supports_nowait(struct dm_table *t) if (!dm_target_supports_nowait(ti->type)) return false; - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) - return false; } return true; @@ -1824,10 +1814,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, { int r; - if (dm_table_supports_nowait(t)) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); - else - blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); + if (!dm_table_supports_nowait(t)) + limits->features &= ~BLK_FEAT_NOWAIT; if (!dm_table_supports_discards(t)) { limits->max_hw_discard_sectors = 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8db0db8d5a27..f1c7d4f281c5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5788,7 +5788,7 @@ struct mddev *md_alloc(dev_t dev, char *name) int error; struct queue_limits lim = { .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | - BLK_FEAT_IO_STAT, + BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT, }; /* @@ -6150,13 +6150,6 @@ int md_run(struct mddev *mddev) } } - if (!mddev_is_dm(mddev)) { - struct request_queue *q = mddev->gendisk->queue; - - /* Set the NOWAIT flags if all underlying devices support it */ - if (nowait) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); - } if (pers->sync_request) { if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_redundancy_group)) @@ -7115,15 +7108,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); - /* - * If the new disk does not support REQ_NOWAIT, - * disable on the whole MD. - */ - if (!bdev_nowait(rdev->bdev)) { - pr_info("%s: Disabling nowait because %pg does not support nowait\n", - mdname(mddev), rdev->bdev); - blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue); - } /* * Kick recovery, maybe this spare has to be added to the * array immediately. diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 173796f2ddea..61a162c9cf4e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -538,7 +538,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_set_stacking_limits(&lim); lim.dma_alignment = 3; - lim.features |= BLK_FEAT_IO_STAT; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; if (head->ids.csi != NVME_CSI_ZNS) lim.max_zone_append_sectors = 0; @@ -550,7 +550,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); /* * This assumes all controllers that refer to a namespace either * support poll queues or not. That is not a strict guarantee, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cee7b44a1425..f3d4519d609d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -304,6 +304,9 @@ enum { /* always completes in submit context */ BLK_FEAT_SYNCHRONOUS = (1u << 6), + + /* supports REQ_NOWAIT */ + BLK_FEAT_NOWAIT = (1u << 7), }; /* @@ -580,12 +583,10 @@ struct request_queue { #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ -#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ -#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_SAME_COMP) | \ - (1UL << QUEUE_FLAG_NOWAIT)) +#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); @@ -1348,7 +1349,7 @@ static inline bool bdev_fua(struct block_device *bdev) static inline bool bdev_nowait(struct block_device *bdev) { - return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); + return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; } static inline bool bdev_is_zoned(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From f467fee48da4500786e145489787b37adae317c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:47 +0200 Subject: block: move the dax flag to queue_limits Move the dax flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-21-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - drivers/md/dm-table.c | 4 ++-- drivers/nvdimm/pmem.c | 7 ++----- drivers/s390/block/dcssblk.c | 2 +- include/linux/blkdev.h | 6 ++++-- 5 files changed, 9 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 62b132e9a9ce..f4fa820251ce 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -88,7 +88,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), QUEUE_FLAG_NAME(POLL), - QUEUE_FLAG_NAME(DAX), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 84d636712c72..e44697037e86 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1834,11 +1834,11 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; if (dm_table_supports_dax(t, device_not_dax_capable)) { - blk_queue_flag_set(QUEUE_FLAG_DAX, q); + limits->features |= BLK_FEAT_DAX; if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) set_dax_synchronous(t->md->dax_dev); } else - blk_queue_flag_clear(QUEUE_FLAG_DAX, q); + limits->features &= ~BLK_FEAT_DAX; if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b821dcf018f6..1dd74c969d5a 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -465,7 +465,6 @@ static int pmem_attach_disk(struct device *dev, struct dax_device *dax_dev; struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; - struct request_queue *q; struct gendisk *disk; void *addr; int rc; @@ -499,6 +498,8 @@ static int pmem_attach_disk(struct device *dev, } if (fua) lim.features |= BLK_FEAT_FUA; + if (is_nd_pfn(dev)) + lim.features |= BLK_FEAT_DAX; if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -509,7 +510,6 @@ static int pmem_attach_disk(struct device *dev, disk = blk_alloc_disk(&lim, nid); if (IS_ERR(disk)) return PTR_ERR(disk); - q = disk->queue; pmem->disk = disk; pmem->pgmap.owner = pmem; @@ -547,9 +547,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - if (pmem->pfn_flags & PFN_MAP) - blk_queue_flag_set(QUEUE_FLAG_DAX, q); - disk->fops = &pmem_fops; disk->private_data = pmem; nvdimm_namespace_disk_name(ndns, disk->disk_name); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 6d1689a2717e..d5a5d11ae0dc 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -548,6 +548,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char { struct queue_limits lim = { .logical_block_size = 4096, + .features = BLK_FEAT_DAX, }; int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; @@ -643,7 +644,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->fops = &dcssblk_devops; dev_info->gd->private_data = dev_info; dev_info->gd->flags |= GENHD_FL_NO_PART; - blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f3d4519d609d..7022e06a3dd9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -307,6 +307,9 @@ enum { /* supports REQ_NOWAIT */ BLK_FEAT_NOWAIT = (1u << 7), + + /* supports DAX */ + BLK_FEAT_DAX = (1u << 8), }; /* @@ -575,7 +578,6 @@ struct request_queue { #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ -#define QUEUE_FLAG_DAX 19 /* device supports DAX */ #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ @@ -602,7 +604,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_zone_resetall(q) \ test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) -#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) +#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) \ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) #ifdef CONFIG_BLK_RQ_ALLOC_TIME -- cgit v1.2.3-58-ga151 From 8023e144f9d6e35f8786937e2f0c2fea0aba6dbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:48 +0200 Subject: block: move the poll flag to queue_limits Move the poll flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Stacking drivers are simplified in that they now can simply set the flag, and blk_stack_limits will clear it when the features is not supported by any of the underlying devices. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-22-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ++-- block/blk-mq-debugfs.c | 1 - block/blk-mq.c | 31 ++++++++++++++----------- block/blk-settings.c | 10 ++++---- block/blk-sysfs.c | 4 ++-- drivers/md/dm-table.c | 54 +++++++++++-------------------------------- drivers/nvme/host/multipath.c | 12 +--------- include/linux/blkdev.h | 4 +++- 8 files changed, 45 insertions(+), 76 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 2b45a4df9a1a..8d9fbd353fc7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -791,7 +791,7 @@ void submit_bio_noacct(struct bio *bio) } } - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (!(q->limits.features & BLK_FEAT_POLL)) bio_clear_polled(bio); switch (bio_op(bio)) { @@ -915,8 +915,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) return 0; q = bdev_get_queue(bdev); - if (cookie == BLK_QC_T_NONE || - !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL)) return 0; blk_flush_plug(current->plug, false); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f4fa820251ce..3a2152791384 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -87,7 +87,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), - QUEUE_FLAG_NAME(POLL), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), diff --git a/block/blk-mq.c b/block/blk-mq.c index 43235acc8750..e2b9710ddc5a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4109,6 +4109,12 @@ void blk_mq_release(struct request_queue *q) blk_mq_sysfs_deinit(q); } +static bool blk_mq_can_poll(struct blk_mq_tag_set *set) +{ + return set->nr_maps > HCTX_TYPE_POLL && + set->map[HCTX_TYPE_POLL].nr_queues; +} + struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata) { @@ -4119,6 +4125,8 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, if (!lim) lim = &default_lim; lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; + if (blk_mq_can_poll(set)) + lim->features |= BLK_FEAT_POLL; q = blk_alloc_queue(lim, set->numa_node); if (IS_ERR(q)) @@ -4273,17 +4281,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, mutex_unlock(&q->sysfs_lock); } -static void blk_mq_update_poll_flag(struct request_queue *q) -{ - struct blk_mq_tag_set *set = q->tag_set; - - if (set->nr_maps > HCTX_TYPE_POLL && - set->map[HCTX_TYPE_POLL].nr_queues) - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - else - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); -} - int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { @@ -4311,7 +4308,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->tag_set = set; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; - blk_mq_update_poll_flag(q); INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->flush_list); @@ -4798,8 +4794,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, fallback: blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { + struct queue_limits lim; + blk_mq_realloc_hw_ctxs(set, q); - blk_mq_update_poll_flag(q); + if (q->nr_hw_queues != set->nr_hw_queues) { int i = prev_nr_hw_queues; @@ -4811,6 +4809,13 @@ fallback: set->nr_hw_queues = prev_nr_hw_queues; goto fallback; } + lim = queue_limits_start_update(q); + if (blk_mq_can_poll(set)) + lim.features |= BLK_FEAT_POLL; + else + lim.features &= ~BLK_FEAT_POLL; + if (queue_limits_commit_update(q, &lim) < 0) + pr_warn("updating the poll flag failed\n"); blk_mq_map_swqueue(q); } diff --git a/block/blk-settings.c b/block/blk-settings.c index bf4622c19b5c..026ba68d8298 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -460,13 +460,15 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->features |= (b->features & BLK_FEAT_INHERIT_MASK); /* - * BLK_FEAT_NOWAIT needs to be supported both by the stacking driver - * and all underlying devices. The stacking driver sets the flag - * before stacking the limits, and this will clear the flag if any - * of the underlying devices does not support it. + * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the + * stacking driver and all underlying devices. The stacking driver sets + * the flags before stacking the limits, and this will clear the flags + * if any of the underlying devices does not support it. */ if (!(b->features & BLK_FEAT_NOWAIT)) t->features &= ~BLK_FEAT_NOWAIT; + if (!(b->features & BLK_FEAT_POLL)) + t->features &= ~BLK_FEAT_POLL; t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index cde525724831..da4e96d686f9 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -394,13 +394,13 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, static ssize_t queue_poll_show(struct request_queue *q, char *page) { - return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); + return queue_var_show(q->limits.features & BLK_FEAT_POLL, page); } static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + if (!(q->limits.features & BLK_FEAT_POLL)) return -EINVAL; pr_info_ratelimited("writes to the poll attribute are ignored.\n"); pr_info_ratelimited("please use driver specific parameters instead.\n"); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e44697037e86..ca1f136575cf 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -582,7 +582,7 @@ int dm_split_args(int *argc, char ***argvp, char *input) static void dm_set_stacking_limits(struct queue_limits *limits) { blk_set_stacking_limits(limits); - limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; + limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; } /* @@ -1024,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t) return __table_type_request_based(dm_table_get_type(t)); } -static bool dm_table_supports_poll(struct dm_table *t); - static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { enum dm_queue_mode type = dm_table_get_type(t); unsigned int per_io_data_size = 0, front_pad, io_front_pad; unsigned int min_pool_size = 0, pool_size; struct dm_md_mempools *pools; + unsigned int bioset_flags = 0; if (unlikely(type == DM_TYPE_NONE)) { DMERR("no table type is set, can't allocate mempools"); @@ -1048,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * goto init_bs; } + if (md->queue->limits.features & BLK_FEAT_POLL) + bioset_flags |= BIOSET_PERCPU_CACHE; + for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1060,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; - if (bioset_init(&pools->io_bs, pool_size, io_front_pad, - dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) + if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) goto out_free_pools; if (t->integrity_supported && bioset_integrity_create(&pools->io_bs, pool_size)) @@ -1404,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } -static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags); -} - /* * type->iterate_devices() should be called when the sanity check needs to * iterate and check all underlying data devices. iterate_devices() will @@ -1459,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev, return 0; } -static bool dm_table_supports_poll(struct dm_table *t) -{ - for (unsigned int i = 0; i < t->num_targets; i++) { - struct dm_target *ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) - return false; - } - - return true; -} - /* * Check whether a table has no data devices attached using each * target's iterate_devices method. @@ -1817,6 +1797,13 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_nowait(t)) limits->features &= ~BLK_FEAT_NOWAIT; + /* + * The current polling impementation does not support request based + * stacking. + */ + if (!__table_type_bio_based(t->type)) + limits->features &= ~BLK_FEAT_POLL; + if (!dm_table_supports_discards(t)) { limits->max_hw_discard_sectors = 0; limits->discard_granularity = 0; @@ -1858,21 +1845,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, return r; dm_update_crypto_profile(q, t); - - /* - * Check for request-based device is left to - * dm_mq_init_request_queue()->blk_mq_init_allocated_queue(). - * - * For bio-based device, only set QUEUE_FLAG_POLL when all - * underlying devices supporting polling. - */ - if (__table_type_bio_based(t->type)) { - if (dm_table_supports_poll(t)) - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - else - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); - } - return 0; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 61a162c9cf4e..4933194d00e5 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -538,7 +538,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_set_stacking_limits(&lim); lim.dma_alignment = 3; - lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; if (head->ids.csi != NVME_CSI_ZNS) lim.max_zone_append_sectors = 0; @@ -549,16 +549,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) head->disk->private_data = head; sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); - - /* - * This assumes all controllers that refer to a namespace either - * support poll queues or not. That is not a strict guarantee, - * but if the assumption is wrong the effect is only suboptimal - * performance but not correctness problem. - */ - if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && - ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) - blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); return 0; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7022e06a3dd9..cd27b66cbacc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -310,6 +310,9 @@ enum { /* supports DAX */ BLK_FEAT_DAX = (1u << 8), + + /* supports I/O polling */ + BLK_FEAT_POLL = (1u << 9), }; /* @@ -577,7 +580,6 @@ struct request_queue { #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ -#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ -- cgit v1.2.3-58-ga151 From b1fc937a55f5735b98d9dceae5bb6ba262501f56 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:49 +0200 Subject: block: move the zoned flag into the features field Move the zoned flags into the features field to reclaim a little bit of space. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-23-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 5 ++--- drivers/block/null_blk/zoned.c | 2 +- drivers/block/ublk_drv.c | 2 +- drivers/block/virtio_blk.c | 5 +++-- drivers/md/dm-table.c | 11 ++++++----- drivers/md/dm-zone.c | 2 +- drivers/md/dm-zoned-target.c | 2 +- drivers/nvme/host/zns.c | 2 +- drivers/scsi/sd_zbc.c | 2 +- include/linux/blkdev.h | 9 ++++++--- 10 files changed, 23 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 026ba68d8298..96e07f24bd9a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi, static int blk_validate_zoned_limits(struct queue_limits *lim) { - if (!lim->zoned) { + if (!(lim->features & BLK_FEAT_ZONED)) { if (WARN_ON_ONCE(lim->max_open_zones) || WARN_ON_ONCE(lim->max_active_zones) || WARN_ON_ONCE(lim->zone_write_granularity) || @@ -602,8 +602,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_secure_erase_sectors); t->zone_write_granularity = max(t->zone_write_granularity, b->zone_write_granularity); - t->zoned = max(t->zoned, b->zoned); - if (!t->zoned) { + if (!(t->features & BLK_FEAT_ZONED)) { t->zone_write_granularity = 0; t->max_zone_append_sectors = 0; } diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index f118d304f310..ca8e739e76b9 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev, sector += dev->zone_size_sects; } - lim->zoned = true; + lim->features |= BLK_FEAT_ZONED; lim->chunk_sectors = dev->zone_size_sects; lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4fcde0999358..69c16018cbb1 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2196,7 +2196,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return -EOPNOTSUPP; - lim.zoned = true; + lim.features |= BLK_FEAT_ZONED; lim.max_active_zones = p->max_active_zones; lim.max_open_zones = p->max_open_zones; lim.max_zone_append_sectors = p->max_zone_append_sectors; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 13a2f24f1766..cea45b296f8b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - lim->zoned = true; + lim->features |= BLK_FEAT_ZONED; virtio_cread(vdev, struct virtio_blk_config, zoned.max_open_zones, &v); @@ -1546,7 +1546,8 @@ static int virtblk_probe(struct virtio_device *vdev) * All steps that follow use the VQs therefore they need to be * placed after the virtio_device_ready() call above. */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (lim.features & BLK_FEAT_ZONED)) { blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); err = blk_revalidate_disk_zones(vblk->disk); if (err) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ca1f136575cf..df6313c3fe6b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1605,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t, ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); - if (!zoned && ti_limits.zoned) { + if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) { /* * After stacking all limits, validate all devices * in table support this zoned model and zone sectors. */ - zoned = ti_limits.zoned; + zoned = (ti_limits.features & BLK_FEAT_ZONED); zone_sectors = ti_limits.chunk_sectors; } @@ -1658,12 +1658,12 @@ combine_limits: * zoned model on host-managed zoned block devices. * BUT... */ - if (limits->zoned) { + if (limits->features & BLK_FEAT_ZONED) { /* * ...IF the above limits stacking determined a zoned model * validate that all of the table's devices conform to it. */ - zoned = limits->zoned; + zoned = limits->features & BLK_FEAT_ZONED; zone_sectors = limits->chunk_sectors; } if (validate_hardware_zoned(t, zoned, zone_sectors)) @@ -1834,7 +1834,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * For a zoned target, setup the zones related queue attributes * and resources necessary for zone append emulation if necessary. */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) { + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (limits->features & limits->features & BLK_FEAT_ZONED)) { r = dm_set_zones_restrictions(t, q, limits); if (r) return r; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 5d66d916730e..88d313229b43 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -263,7 +263,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, if (nr_conv_zones >= ret) { lim->max_open_zones = 0; lim->max_active_zones = 0; - lim->zoned = false; + lim->features &= ~BLK_FEAT_ZONED; clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); disk->nr_zones = 0; return 0; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 12236e6f46f3..cd0ee144973f 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_sectors = chunk_sectors; /* We are exposing a drive-managed zoned block device */ - limits->zoned = false; + limits->features &= ~BLK_FEAT_ZONED; } /* diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 77aa0f440a6d..06f2417aa50d 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -108,7 +108,7 @@ free_data: void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, struct nvme_zone_info *zi) { - lim->zoned = 1; + lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; lim->max_zone_append_sectors = ns->ctrl->max_zone_append; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 360ec9804995..d3f84665946e 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -601,7 +601,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, if (sdkp->device->type != TYPE_ZBC) return 0; - lim->zoned = true; + lim->features |= BLK_FEAT_ZONED; /* * Per ZBC and ZAC specifications, writes in sequential write required diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cd27b66cbacc..bdc30c1fb1b5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -313,6 +313,9 @@ enum { /* supports I/O polling */ BLK_FEAT_POLL = (1u << 9), + + /* is a zoned device */ + BLK_FEAT_ZONED = (1u << 10), }; /* @@ -320,7 +323,7 @@ enum { */ #define BLK_FEAT_INHERIT_MASK \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ - BLK_FEAT_STABLE_WRITES) + BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED) /* internal flags in queue_limits.flags */ enum { @@ -372,7 +375,6 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; - bool zoned; unsigned int max_open_zones; unsigned int max_active_zones; @@ -654,7 +656,8 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q) static inline bool blk_queue_is_zoned(struct request_queue *q) { - return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned; + return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (q->limits.features & BLK_FEAT_ZONED); } #ifdef CONFIG_BLK_DEV_ZONED -- cgit v1.2.3-58-ga151 From a52758a39768f441e468a41da6c15a59d6d6011a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:50 +0200 Subject: block: move the zone_resetall flag to queue_limits Move the zone_resetall flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-24-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - drivers/block/null_blk/zoned.c | 3 +-- drivers/block/ublk_drv.c | 4 +--- drivers/block/virtio_blk.c | 3 +-- drivers/nvme/host/zns.c | 3 +-- drivers/scsi/sd_zbc.c | 5 +---- include/linux/blkdev.h | 6 ++++-- 7 files changed, 9 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3a2152791384..f2fd72f4414a 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -91,7 +91,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), QUEUE_FLAG_NAME(PCI_P2PDMA), - QUEUE_FLAG_NAME(ZONE_RESETALL), QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(SQ_SCHED), diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index ca8e739e76b9..b42c00f13132 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev, sector += dev->zone_size_sects; } - lim->features |= BLK_FEAT_ZONED; + lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; lim->chunk_sectors = dev->zone_size_sects; lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; @@ -171,7 +171,6 @@ int null_register_zoned_dev(struct nullb *nullb) struct request_queue *q = nullb->q; struct gendisk *disk = nullb->disk; - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); disk->nr_zones = bdev_nr_zones(disk->part0); pr_info("%s: using %s zone append\n", diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 69c16018cbb1..4fdff13fc23b 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) static void ublk_dev_param_zoned_apply(struct ublk_device *ub) { - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); - ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); } @@ -2196,7 +2194,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return -EOPNOTSUPP; - lim.features |= BLK_FEAT_ZONED; + lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; lim.max_active_zones = p->max_active_zones; lim.max_open_zones = p->max_open_zones; lim.max_zone_append_sectors = p->max_zone_append_sectors; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index cea45b296f8b..6c64a67ab9c9 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - lim->features |= BLK_FEAT_ZONED; + lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; virtio_cread(vdev, struct virtio_blk_config, zoned.max_open_zones, &v); @@ -1548,7 +1548,6 @@ static int virtblk_probe(struct virtio_device *vdev) */ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && (lim.features & BLK_FEAT_ZONED)) { - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); err = blk_revalidate_disk_zones(vblk->disk); if (err) goto out_cleanup_disk; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 06f2417aa50d..99bb89c2495a 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -108,13 +108,12 @@ free_data: void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, struct nvme_zone_info *zi) { - lim->features |= BLK_FEAT_ZONED; + lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; lim->max_zone_append_sectors = ns->ctrl->max_zone_append; lim->chunk_sectors = ns->head->zsze = nvme_lba_to_sect(ns->head, zi->zone_size); - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); } static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index d3f84665946e..f7067afac79c 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -592,8 +592,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, u8 buf[SD_BUF_SIZE]) { - struct gendisk *disk = sdkp->disk; - struct request_queue *q = disk->queue; unsigned int nr_zones; u32 zone_blocks = 0; int ret; @@ -601,7 +599,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, if (sdkp->device->type != TYPE_ZBC) return 0; - lim->features |= BLK_FEAT_ZONED; + lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; /* * Per ZBC and ZAC specifications, writes in sequential write required @@ -630,7 +628,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, sdkp->early_zone_info.zone_blocks = zone_blocks; /* The drive satisfies the kernel restrictions: set it up */ - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); if (sdkp->zones_max_open == U32_MAX) lim->max_open_zones = 0; else diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bdc30c1fb1b5..1077cb8d8fd8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -316,6 +316,9 @@ enum { /* is a zoned device */ BLK_FEAT_ZONED = (1u << 10), + + /* supports Zone Reset All */ + BLK_FEAT_ZONE_RESETALL = (1u << 11), }; /* @@ -586,7 +589,6 @@ struct request_queue { #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ -#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ @@ -607,7 +609,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_zone_resetall(q) \ - test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) + ((q)->limits.features & BLK_FEAT_ZONE_RESETALL) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) \ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) -- cgit v1.2.3-58-ga151 From 9c1e42e3c876c66796eda23e79836a4d92613a61 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:51 +0200 Subject: block: move the pci_p2pdma flag to queue_limits Move the pci_p2pdma flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-25-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - drivers/nvme/host/core.c | 8 +++----- include/linux/blkdev.h | 7 ++++--- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f2fd72f4414a..8b5a68861c11 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -90,7 +90,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), - QUEUE_FLAG_NAME(PCI_P2PDMA), QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(SQ_SCHED), diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5ecf762d7c88..31e752e8d632 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3735,6 +3735,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (ctrl->opts && ctrl->opts->data_digest) lim.features |= BLK_FEAT_STABLE_WRITES; + if (ctrl->ops->supports_pci_p2pdma && + ctrl->ops->supports_pci_p2pdma(ctrl)) + lim.features |= BLK_FEAT_PCI_P2PDMA; disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); if (IS_ERR(disk)) @@ -3744,11 +3747,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - - if (ctrl->ops->supports_pci_p2pdma && - ctrl->ops->supports_pci_p2pdma(ctrl)) - blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); - ns->ctrl = ctrl; kref_init(&ns->kref); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1077cb8d8fd8..ab0f7dfba556 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -319,6 +319,9 @@ enum { /* supports Zone Reset All */ BLK_FEAT_ZONE_RESETALL = (1u << 11), + + /* supports PCI(e) p2p requests */ + BLK_FEAT_PCI_P2PDMA = (1u << 12), }; /* @@ -588,7 +591,6 @@ struct request_queue { #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ -#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ @@ -611,8 +613,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_zone_resetall(q) \ ((q)->limits.features & BLK_FEAT_ZONE_RESETALL) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) -#define blk_queue_pci_p2pdma(q) \ - test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) +#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME #define blk_queue_rq_alloc_time(q) \ test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) -- cgit v1.2.3-58-ga151 From 8c8f5c85b20d0a7dc0ab9b2a17318130d69ceb5a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:52 +0200 Subject: block: move the skip_tagset_quiesce flag to queue_limits Move the skip_tagset_quiesce flag into the queue_limits feature field so that it can be set atomically with the queue frozen. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-26-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - drivers/nvme/host/core.c | 8 +++++--- include/linux/blkdev.h | 6 ++++-- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 8b5a68861c11..344f9e503bdb 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -93,7 +93,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(SQ_SCHED), - QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE), }; #undef QUEUE_FLAG_NAME diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 31e752e8d632..bf410d10b120 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4489,13 +4489,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); + struct queue_limits lim = { + .features = BLK_FEAT_SKIP_TAGSET_QUIESCE, + }; + + ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, - ctrl->connect_q); } ctrl->tagset = set; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ab0f7dfba556..2c433ebf6f20 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -322,6 +322,9 @@ enum { /* supports PCI(e) p2p requests */ BLK_FEAT_PCI_P2PDMA = (1u << 12), + + /* skip this queue in blk_mq_(un)quiesce_tagset */ + BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13), }; /* @@ -594,7 +597,6 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ -#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) @@ -629,7 +631,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) #define blk_queue_skip_tagset_quiesce(q) \ - test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) + ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); -- cgit v1.2.3-58-ga151 From 339d3948c07b4aa2940aeb874294a7d6782cec16 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Jun 2024 08:04:53 +0200 Subject: block: move the bounce flag into the features field Move the bounce flag into the features field to reclaim a little bit of space. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/20240617060532.127975-27-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 1 - block/blk.h | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 6 ++++-- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 96e07f24bd9a..d0e9096f93ca 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -479,7 +479,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_write_zeroes_sectors); t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t), queue_limits_max_zone_append_sectors(b)); - t->bounce = max(t->bounce, b->bounce); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); diff --git a/block/blk.h b/block/blk.h index 79e8d5d4fe0c..fa32f7fad5d7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -394,7 +394,7 @@ struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); static inline bool blk_queue_may_bounce(struct request_queue *q) { return IS_ENABLED(CONFIG_BOUNCE) && - q->limits.bounce == BLK_BOUNCE_HIGH && + (q->limits.features & BLK_FEAT_BOUNCE_HIGH) && max_low_pfn >= max_pfn; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 54f771ec8cfb..e2f7bfb2b9e4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1986,7 +1986,7 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) shost->dma_alignment, dma_get_cache_alignment() - 1); if (shost->no_highmem) - lim->bounce = BLK_BOUNCE_HIGH; + lim->features |= BLK_FEAT_BOUNCE_HIGH; dma_set_seg_boundary(dev, shost->dma_boundary); dma_set_max_seg_size(dev, shost->max_segment_size); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c433ebf6f20..e96ba7b97288 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -325,6 +325,9 @@ enum { /* skip this queue in blk_mq_(un)quiesce_tagset */ BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13), + + /* bounce all highmem pages */ + BLK_FEAT_BOUNCE_HIGH = (1u << 14), }; /* @@ -332,7 +335,7 @@ enum { */ #define BLK_FEAT_INHERIT_MASK \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ - BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED) + BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH) /* internal flags in queue_limits.flags */ enum { @@ -352,7 +355,6 @@ enum blk_bounce { struct queue_limits { unsigned int features; unsigned int flags; - enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; -- cgit v1.2.3-58-ga151 From f6860b6069b92559f5cdb65f48e2d82051eaebca Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 19 Jun 2024 17:45:33 +0200 Subject: block: remove the unused blk_bounce enum The enum has been replaced with the BLK_FEAT_BOUNCE_HIGH flag. Reported-by: Keith Busch Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240619154623.450048-2-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e96ba7b97288..f7d275e3fb2c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -343,15 +343,6 @@ enum { BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31), }; -/* - * BLK_BOUNCE_NONE: never bounce (default) - * BLK_BOUNCE_HIGH: bounce all highmem pages - */ -enum blk_bounce { - BLK_BOUNCE_NONE, - BLK_BOUNCE_HIGH, -}; - struct queue_limits { unsigned int features; unsigned int flags; -- cgit v1.2.3-58-ga151 From bae1c74316b86c67c95658c3a0cd312cec9aad77 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 19 Jun 2024 17:45:35 +0200 Subject: block: renumber and rename the cache disabled flag Start with the first bit, and drop the plural-S from the name. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240619154623.450048-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 6 +++--- include/linux/blkdev.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index da4e96d686f9..59e6d111ed05 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -429,7 +429,7 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, static ssize_t queue_wc_show(struct request_queue *q, char *page) { - if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED) + if (q->limits.features & BLK_FLAG_WRITE_CACHE_DISABLED) return sprintf(page, "write through\n"); return sprintf(page, "write back\n"); } @@ -452,9 +452,9 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page, lim = queue_limits_start_update(q); if (disable) - lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED; + lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED; else - lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED; + lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; err = queue_limits_commit_update(q, &lim); if (err) return err; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f7d275e3fb2c..713a98b6dbba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -339,8 +339,8 @@ enum { /* internal flags in queue_limits.flags */ enum { - /* do not send FLUSH or FUA command despite advertised write cache */ - BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31), + /* do not send FLUSH/FUA commands despite advertising a write cache */ + BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0), }; struct queue_limits { @@ -1339,7 +1339,7 @@ static inline bool bdev_stable_writes(struct block_device *bdev) static inline bool blk_queue_write_cache(struct request_queue *q) { return (q->limits.features & BLK_FEAT_WRITE_CACHE) && - !(q->limits.flags & BLK_FLAGS_WRITE_CACHE_DISABLED); + !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); } static inline bool bdev_write_cache(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From 5543217be468268dfedf504f4969771b9a377353 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 19 Jun 2024 17:45:36 +0200 Subject: block: move the misaligned flag into the features field Move the misaligned flags into the features field to reclaim a little bit of space. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240619154623.450048-5-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 20 ++++++++++---------- include/linux/blkdev.h | 4 +++- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index d0e9096f93ca..a1b10404e500 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -258,7 +258,7 @@ static int blk_validate_limits(struct queue_limits *lim) if (lim->alignment_offset) { lim->alignment_offset &= (lim->physical_block_size - 1); - lim->misaligned = 0; + lim->features &= ~BLK_FEAT_MISALIGNED; } if (!(lim->features & BLK_FEAT_WRITE_CACHE)) @@ -470,6 +470,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, if (!(b->features & BLK_FEAT_POLL)) t->features &= ~BLK_FEAT_POLL; + t->flags |= (b->flags & BLK_FEAT_MISALIGNED); + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, b->max_user_sectors); @@ -494,8 +496,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); - t->misaligned |= b->misaligned; - alignment = queue_limit_alignment_offset(b, start); /* Bottom device has different alignment. Check that it is @@ -509,7 +509,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Verify that top and bottom intervals line up */ if (max(top, bottom) % min(top, bottom)) { - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } } @@ -531,28 +531,28 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } /* Minimum I/O a multiple of the physical block size? */ if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } /* Optimal I/O a multiple of the physical block size? */ if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } /* chunk_sectors a multiple of the physical block size? */ if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { t->chunk_sectors = 0; - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } @@ -566,7 +566,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Verify that new alignment_offset is on a logical block boundary */ if (t->alignment_offset & (t->logical_block_size - 1)) { - t->misaligned = 1; + t->flags |= BLK_FEAT_MISALIGNED; ret = -1; } @@ -729,7 +729,7 @@ int bdev_alignment_offset(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - if (q->limits.misaligned) + if (q->limits.flags & BLK_FEAT_MISALIGNED) return -1; if (bdev_is_partition(bdev)) return queue_limit_alignment_offset(&q->limits, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 713a98b6dbba..7ad2b1240fc0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -341,6 +341,9 @@ enum { enum { /* do not send FLUSH/FUA commands despite advertising a write cache */ BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0), + + /* I/O topology is misaligned */ + BLK_FEAT_MISALIGNED = (1u << 1), }; struct queue_limits { @@ -374,7 +377,6 @@ struct queue_limits { unsigned short max_integrity_segments; unsigned short max_discard_segments; - unsigned char misaligned; unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; unsigned int max_open_zones; -- cgit v1.2.3-58-ga151 From 4cac3d3a712b5c76d462b29b73b9e58c0b6d9946 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 19 Jun 2024 17:45:37 +0200 Subject: block: remove the discard_alignment flag queue_limits.discard_alignment is never read except in the places where it is stacked into another limit. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240619154623.450048-6-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 10 ---------- drivers/md/dm-cache-target.c | 1 - drivers/md/dm-clone-target.c | 1 - drivers/md/dm-table.c | 1 - include/linux/blkdev.h | 1 - 5 files changed, 14 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index a1b10404e500..62588d9a38e3 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -578,16 +578,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); - if (t->discard_granularity != 0 && - t->discard_alignment != alignment) { - top = t->discard_granularity + t->discard_alignment; - bottom = b->discard_granularity + alignment; - - /* Verify that top and bottom intervals line up */ - if ((max(top, bottom) % min(top, bottom)) != 0) - t->discard_misaligned = 1; - } - t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 16884b585053..2d8dd9283ff4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3403,7 +3403,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; limits->discard_granularity = origin_limits->discard_granularity; limits->discard_alignment = origin_limits->discard_alignment; - limits->discard_misaligned = origin_limits->discard_misaligned; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index ad79b52ffc14..b4384a8b13e3 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2059,7 +2059,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits) limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors; limits->discard_granularity = dest_limits->discard_granularity; limits->discard_alignment = dest_limits->discard_alignment; - limits->discard_misaligned = dest_limits->discard_misaligned; limits->max_discard_segments = dest_limits->max_discard_segments; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index df6313c3fe6b..502ebc78d490 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1808,7 +1808,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, limits->max_hw_discard_sectors = 0; limits->discard_granularity = 0; limits->discard_alignment = 0; - limits->discard_misaligned = 0; } if (!dm_table_supports_write_zeroes(t)) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7ad2b1240fc0..86410ce41bf6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -377,7 +377,6 @@ struct queue_limits { unsigned short max_integrity_segments; unsigned short max_discard_segments; - unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; unsigned int max_open_zones; unsigned int max_active_zones; -- cgit v1.2.3-58-ga151 From 7d4dec525f5fd555037486af4d02dd3682655ba1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 19 Jun 2024 17:45:38 +0200 Subject: block: move the raid_partial_stripes_expensive flag into the features field Move the raid_partial_stripes_expensive flags into the features field to reclaim a little bit of space. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240619154623.450048-7-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 4 ---- drivers/md/bcache/super.c | 4 ++-- drivers/md/raid5.c | 2 +- include/linux/blkdev.h | 7 +++++-- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 62588d9a38e3..008fed84edb4 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -556,10 +556,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } - t->raid_partial_stripes_expensive = - max(t->raid_partial_stripes_expensive, - b->raid_partial_stripes_expensive); - /* Find lowest common alignment_offset */ t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) % max(t->physical_block_size, t->io_min); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index baa364eedd00..283b2511c6d2 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1416,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) } if (bdev_io_opt(dc->bdev)) - dc->partial_stripes_expensive = - q->limits.raid_partial_stripes_expensive; + dc->partial_stripes_expensive = q->limits.features & + BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; ret = bcache_device_init(&dc->disk, block_size, bdev_nr_sectors(dc->bdev) - dc->sb.data_offset, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e875763d6991..72f91eaa3201 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7707,7 +7707,7 @@ static int raid5_set_limits(struct mddev *mddev) blk_set_stacking_limits(&lim); lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded); - lim.raid_partial_stripes_expensive = 1; + lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; lim.discard_granularity = stripe; lim.max_write_zeroes_sectors = 0; mddev_stack_rdev_limits(mddev, &lim, 0); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 86410ce41bf6..1fa2b148c206 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -328,6 +328,9 @@ enum { /* bounce all highmem pages */ BLK_FEAT_BOUNCE_HIGH = (1u << 14), + + /* undocumented magic for bcache */ + BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15), }; /* @@ -335,7 +338,8 @@ enum { */ #define BLK_FEAT_INHERIT_MASK \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ - BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH) + BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ + BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) /* internal flags in queue_limits.flags */ enum { @@ -377,7 +381,6 @@ struct queue_limits { unsigned short max_integrity_segments; unsigned short max_discard_segments; - unsigned char raid_partial_stripes_expensive; unsigned int max_open_zones; unsigned int max_active_zones; -- cgit v1.2.3-58-ga151 From f70167a7a6e7e8a6911f3a216dc044cbfe7c1983 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 20 Jun 2024 12:53:51 +0000 Subject: block: Generalize chunk_sectors support as boundary support The purpose of the chunk_sectors limit is to ensure that a mergeble request fits within the boundary of the chunck_sector value. Such a feature will be useful for other request_queue boundary limits, so generalize the chunk_sectors merge code. This idea was proposed by Hannes Reinecke. Reviewed-by: Martin K. Petersen Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Reviewed-by: Darrick J. Wong Link: https://lore.kernel.org/r/20240620125359.2684798-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-merge.c | 20 ++++++++++++++------ drivers/md/dm.c | 2 +- include/linux/blkdev.h | 13 +++++++------ 3 files changed, 22 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/block/blk-merge.c b/block/blk-merge.c index 8957e08e020c..68969e27c831 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,6 +154,11 @@ static struct bio *bio_split_write_zeroes(struct bio *bio, return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs); } +static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim) +{ + return lim->chunk_sectors; +} + /* * Return the maximum number of sectors from the start of a bio that may be * submitted as a single request to a block device. If enough sectors remain, @@ -167,12 +172,13 @@ static inline unsigned get_max_io_size(struct bio *bio, { unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; + unsigned boundary_sectors = blk_boundary_sectors(lim); unsigned max_sectors = lim->max_sectors, start, end; - if (lim->chunk_sectors) { + if (boundary_sectors) { max_sectors = min(max_sectors, - blk_chunk_sectors_left(bio->bi_iter.bi_sector, - lim->chunk_sectors)); + blk_boundary_sectors_left(bio->bi_iter.bi_sector, + boundary_sectors)); } start = bio->bi_iter.bi_sector & (pbs - 1); @@ -588,19 +594,21 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, sector_t offset) { struct request_queue *q = rq->q; - unsigned int max_sectors; + struct queue_limits *lim = &q->limits; + unsigned int max_sectors, boundary_sectors; if (blk_rq_is_passthrough(rq)) return q->limits.max_hw_sectors; + boundary_sectors = blk_boundary_sectors(lim); max_sectors = blk_queue_get_max_sectors(rq); - if (!q->limits.chunk_sectors || + if (!boundary_sectors || req_op(rq) == REQ_OP_DISCARD || req_op(rq) == REQ_OP_SECURE_ERASE) return max_sectors; return min(max_sectors, - blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); + blk_boundary_sectors_left(offset, boundary_sectors)); } static inline int ll_new_hw_segment(struct request *req, struct bio *bio, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8a976cee448b..7d107ae06e1a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector, return len; return min_t(sector_t, len, min(max_sectors ? : queue_max_sectors(ti->table->md->queue), - blk_chunk_sectors_left(target_offset, max_granularity))); + blk_boundary_sectors_left(target_offset, max_granularity))); } static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e8253c1507a..fb7d4c21bba8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -907,14 +907,15 @@ static inline bool bio_straddles_zones(struct bio *bio) } /* - * Return how much of the chunk is left to be used for I/O at a given offset. + * Return how much within the boundary is left to be used for I/O at a given + * offset. */ -static inline unsigned int blk_chunk_sectors_left(sector_t offset, - unsigned int chunk_sectors) +static inline unsigned int blk_boundary_sectors_left(sector_t offset, + unsigned int boundary_sectors) { - if (unlikely(!is_power_of_2(chunk_sectors))) - return chunk_sectors - sector_div(offset, chunk_sectors); - return chunk_sectors - (offset & (chunk_sectors - 1)); + if (unlikely(!is_power_of_2(boundary_sectors))) + return boundary_sectors - sector_div(offset, boundary_sectors); + return boundary_sectors - (offset & (boundary_sectors - 1)); } /** -- cgit v1.2.3-58-ga151 From c34fc6f26ab86d03a2d47446f42b6cd492dfdc56 Mon Sep 17 00:00:00 2001 From: Prasad Singamsetty Date: Thu, 20 Jun 2024 12:53:52 +0000 Subject: fs: Initial atomic write support An atomic write is a write issued with torn-write protection, meaning that for a power failure or any other hardware failure, all or none of the data from the write will be stored, but never a mix of old and new data. Userspace may add flag RWF_ATOMIC to pwritev2() to indicate that the write is to be issued with torn-write prevention, according to special alignment and length rules. For any syscall interface utilizing struct iocb, add IOCB_ATOMIC for iocb->ki_flags field to indicate the same. A call to statx will give the relevant atomic write info for a file: - atomic_write_unit_min - atomic_write_unit_max - atomic_write_segments_max Both min and max values must be a power-of-2. Applications can avail of atomic write feature by ensuring that the total length of a write is a power-of-2 in size and also sized between atomic_write_unit_min and atomic_write_unit_max, inclusive. Applications must ensure that the write is at a naturally-aligned offset in the file wrt the total write length. The value in atomic_write_segments_max indicates the upper limit for IOV_ITER iovcnt. Add file mode flag FMODE_CAN_ATOMIC_WRITE, so files which do not have the flag set will have RWF_ATOMIC rejected and not just ignored. Add a type argument to kiocb_set_rw_flags() to allows reads which have RWF_ATOMIC set to be rejected. Helper function generic_atomic_write_valid() can be used by FSes to verify compliant writes. There we check for iov_iter type is for ubuf, which implies iovcnt==1 for pwritev2(), which is an initial restriction for atomic_write_segments_max. Initially the only user will be bdev file operations write handler. We will rely on the block BIO submission path to ensure write sizes are compliant for the bdev, so we don't need to check atomic writes sizes yet. Signed-off-by: Prasad Singamsetty jpg: merge into single patch and much rewrite Acked-by: Darrick J. Wong Reviewed-by: Martin K. Petersen Signed-off-by: John Garry Reviewed-by: Darrick J. Wong Link: https://lore.kernel.org/r/20240620125359.2684798-4-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- fs/aio.c | 8 ++++---- fs/btrfs/ioctl.c | 2 +- fs/read_write.c | 18 +++++++++++++++++- include/linux/fs.h | 17 +++++++++++++++-- include/uapi/linux/fs.h | 5 ++++- io_uring/rw.c | 9 ++++----- 6 files changed, 45 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/aio.c b/fs/aio.c index 57c9f7c077e6..93ef59d358b3 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1516,7 +1516,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res) iocb_put(iocb); } -static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) +static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type) { int ret; @@ -1542,7 +1542,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) } else req->ki_ioprio = get_current_ioprio(); - ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); + ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type); if (unlikely(ret)) return ret; @@ -1594,7 +1594,7 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb, struct file *file; int ret; - ret = aio_prep_rw(req, iocb); + ret = aio_prep_rw(req, iocb, READ); if (ret) return ret; file = req->ki_filp; @@ -1621,7 +1621,7 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, struct file *file; int ret; - ret = aio_prep_rw(req, iocb); + ret = aio_prep_rw(req, iocb, WRITE); if (ret) return ret; file = req->ki_filp; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index efd5d6e9589e..6ad524b894fc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4627,7 +4627,7 @@ static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool goto out_iov; init_sync_kiocb(&kiocb, file); - ret = kiocb_set_rw_flags(&kiocb, 0); + ret = kiocb_set_rw_flags(&kiocb, 0, WRITE); if (ret) goto out_iov; kiocb.ki_pos = pos; diff --git a/fs/read_write.c b/fs/read_write.c index ef6339391351..90e283b31ca1 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -730,7 +730,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, ssize_t ret; init_sync_kiocb(&kiocb, filp); - ret = kiocb_set_rw_flags(&kiocb, flags); + ret = kiocb_set_rw_flags(&kiocb, flags, type); if (ret) return ret; kiocb.ki_pos = (ppos ? *ppos : 0); @@ -1736,3 +1736,19 @@ int generic_file_rw_checks(struct file *file_in, struct file *file_out) return 0; } + +bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos) +{ + size_t len = iov_iter_count(iter); + + if (!iter_is_ubuf(iter)) + return false; + + if (!is_power_of_2(len)) + return false; + + if (!IS_ALIGNED(pos, len)) + return false; + + return true; +} diff --git a/include/linux/fs.h b/include/linux/fs.h index 0283cf366c2a..e049414bef7d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -125,8 +125,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_EXEC ((__force fmode_t)(1 << 5)) /* File writes are restricted (block device specific) */ #define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6)) +/* File supports atomic writes */ +#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7)) -/* FMODE_* bits 7 to 8 */ +/* FMODE_* bit 8 */ /* 32bit hashes as llseek() offset (for directories) */ #define FMODE_32BITHASH ((__force fmode_t)(1 << 9)) @@ -317,6 +319,7 @@ struct readahead_control; #define IOCB_SYNC (__force int) RWF_SYNC #define IOCB_NOWAIT (__force int) RWF_NOWAIT #define IOCB_APPEND (__force int) RWF_APPEND +#define IOCB_ATOMIC (__force int) RWF_ATOMIC /* non-RWF related bits - start at 16 */ #define IOCB_EVENTFD (1 << 16) @@ -351,6 +354,7 @@ struct readahead_control; { IOCB_SYNC, "SYNC" }, \ { IOCB_NOWAIT, "NOWAIT" }, \ { IOCB_APPEND, "APPEND" }, \ + { IOCB_ATOMIC, "ATOMIC"}, \ { IOCB_EVENTFD, "EVENTFD"}, \ { IOCB_DIRECT, "DIRECT" }, \ { IOCB_WRITE, "WRITE" }, \ @@ -3403,7 +3407,8 @@ static inline int iocb_flags(struct file *file) return res; } -static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) +static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, + int rw_type) { int kiocb_flags = 0; @@ -3422,6 +3427,12 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) return -EOPNOTSUPP; kiocb_flags |= IOCB_NOIO; } + if (flags & RWF_ATOMIC) { + if (rw_type != WRITE) + return -EOPNOTSUPP; + if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE)) + return -EOPNOTSUPP; + } kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); if (flags & RWF_SYNC) kiocb_flags |= IOCB_DSYNC; @@ -3613,4 +3624,6 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); +bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos); + #endif /* _LINUX_FS_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 45e4e64fd664..191a7e88a8ab 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -329,9 +329,12 @@ typedef int __bitwise __kernel_rwf_t; /* per-IO negation of O_APPEND */ #define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020) +/* Atomic Write */ +#define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040) + /* mask of flags supported by the kernel */ #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ - RWF_APPEND | RWF_NOAPPEND) + RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC) /* Pagemap ioctl */ #define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) diff --git a/io_uring/rw.c b/io_uring/rw.c index 1a2128459cb4..c004d21e2f12 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -772,7 +772,7 @@ static bool need_complete_io(struct io_kiocb *req) S_ISBLK(file_inode(req->file)->i_mode); } -static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) +static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct kiocb *kiocb = &rw->kiocb; @@ -787,7 +787,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) req->flags |= io_file_get_flags(file); kiocb->ki_flags = file->f_iocb_flags; - ret = kiocb_set_rw_flags(kiocb, rw->flags); + ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); if (unlikely(ret)) return ret; kiocb->ki_flags |= IOCB_ALLOC_CACHE; @@ -832,8 +832,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(ret < 0)) return ret; } - - ret = io_rw_init_file(req, FMODE_READ); + ret = io_rw_init_file(req, FMODE_READ, READ); if (unlikely(ret)) return ret; req->cqe.res = iov_iter_count(&io->iter); @@ -1013,7 +1012,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ssize_t ret, ret2; loff_t *ppos; - ret = io_rw_init_file(req, FMODE_WRITE); + ret = io_rw_init_file(req, FMODE_WRITE, WRITE); if (unlikely(ret)) return ret; req->cqe.res = iov_iter_count(&io->iter); -- cgit v1.2.3-58-ga151 From 0f9ca80fa4f9670ba09721e4e36b8baf086a500c Mon Sep 17 00:00:00 2001 From: Prasad Singamsetty Date: Thu, 20 Jun 2024 12:53:53 +0000 Subject: fs: Add initial atomic write support info to statx Extend statx system call to return additional info for atomic write support support for a file. Helper function generic_fill_statx_atomic_writes() can be used by FSes to fill in the relevant statx fields. For now atomic_write_segments_max will always be 1, otherwise some rules would need to be imposed on iovec length and alignment, which we don't want now. Signed-off-by: Prasad Singamsetty jpg: relocate bdev support to another patch Reviewed-by: Darrick J. Wong Reviewed-by: Martin K. Petersen Signed-off-by: John Garry Acked-by: Darrick J. Wong Link: https://lore.kernel.org/r/20240620125359.2684798-5-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- fs/stat.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/fs.h | 3 +++ include/linux/stat.h | 3 +++ include/uapi/linux/stat.h | 12 ++++++++++-- 4 files changed, 50 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/stat.c b/fs/stat.c index 70bd3e888cfa..72d0e6357b91 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -89,6 +89,37 @@ void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) } EXPORT_SYMBOL(generic_fill_statx_attr); +/** + * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes + * @stat: Where to fill in the attribute flags + * @unit_min: Minimum supported atomic write length in bytes + * @unit_max: Maximum supported atomic write length in bytes + * + * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from + * atomic write unit_min and unit_max values. + */ +void generic_fill_statx_atomic_writes(struct kstat *stat, + unsigned int unit_min, + unsigned int unit_max) +{ + /* Confirm that the request type is known */ + stat->result_mask |= STATX_WRITE_ATOMIC; + + /* Confirm that the file attribute type is known */ + stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC; + + if (unit_min) { + stat->atomic_write_unit_min = unit_min; + stat->atomic_write_unit_max = unit_max; + /* Initially only allow 1x segment */ + stat->atomic_write_segments_max = 1; + + /* Confirm atomic writes are actually supported */ + stat->attributes |= STATX_ATTR_WRITE_ATOMIC; + } +} +EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes); + /** * vfs_getattr_nosec - getattr without security checks * @path: file to get attributes from @@ -659,6 +690,9 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_dio_mem_align = stat->dio_mem_align; tmp.stx_dio_offset_align = stat->dio_offset_align; tmp.stx_subvol = stat->subvol; + tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; + tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; + tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } diff --git a/include/linux/fs.h b/include/linux/fs.h index e049414bef7d..db26b4a70c62 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3235,6 +3235,9 @@ extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); +void generic_fill_statx_atomic_writes(struct kstat *stat, + unsigned int unit_min, + unsigned int unit_max); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); diff --git a/include/linux/stat.h b/include/linux/stat.h index bf92441dbad2..3d900c86981c 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -54,6 +54,9 @@ struct kstat { u32 dio_offset_align; u64 change_cookie; u64 subvol; + u32 atomic_write_unit_min; + u32 atomic_write_unit_max; + u32 atomic_write_segments_max; }; /* These definitions are internal to the kernel for now. Mainly used by nfsd. */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 67626d535316..887a25286441 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -126,9 +126,15 @@ struct statx { __u64 stx_mnt_id; __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ - __u64 stx_subvol; /* Subvolume identifier */ /* 0xa0 */ - __u64 __spare3[11]; /* Spare space for future expansion */ + __u64 stx_subvol; /* Subvolume identifier */ + __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */ + __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */ + /* 0xb0 */ + __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */ + __u32 __spare1[1]; + /* 0xb8 */ + __u64 __spare3[9]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -157,6 +163,7 @@ struct statx { #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ #define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ +#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ @@ -192,6 +199,7 @@ struct statx { #define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */ #define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */ #define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */ +#define STATX_ATTR_WRITE_ATOMIC 0x00400000 /* File supports atomic write operations */ #endif /* _UAPI_LINUX_STAT_H */ -- cgit v1.2.3-58-ga151 From 9da3d1e912f3953196e66991d75208cde3e845e1 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 20 Jun 2024 12:53:54 +0000 Subject: block: Add core atomic write support Add atomic write support, as follows: - add helper functions to get request_queue atomic write limits - report request_queue atomic write support limits to sysfs and update Doc - support to safely merge atomic writes - deal with splitting atomic writes - misc helper functions - add a per-request atomic write flag New request_queue limits are added, as follows: - atomic_write_hw_max is set by the block driver and is the maximum length of an atomic write which the device may support. It is not necessarily a power-of-2. - atomic_write_max_sectors is derived from atomic_write_hw_max_sectors and max_hw_sectors. It is always a power-of-2. Atomic writes may be merged, and atomic_write_max_sectors would be the limit on a merged atomic write request size. This value is not capped at max_sectors, as the value in max_sectors can be controlled from userspace, and it would only cause trouble if userspace could limit atomic_write_unit_max_bytes and the other atomic write limits. - atomic_write_hw_unit_{min,max} are set by the block driver and are the min/max length of an atomic write unit which the device may support. They both must be a power-of-2. Typically atomic_write_hw_unit_max will hold the same value as atomic_write_hw_max. - atomic_write_unit_{min,max} are derived from atomic_write_hw_unit_{min,max}, max_hw_sectors, and block core limits. Both min and max values must be a power-of-2. - atomic_write_hw_boundary is set by the block driver. If non-zero, it indicates an LBA space boundary at which an atomic write straddles no longer is atomically executed by the disk. The value must be a power-of-2. Note that it would be acceptable to enforce a rule that atomic_write_hw_boundary_sectors is a multiple of atomic_write_hw_unit_max, but the resultant code would be more complicated. All atomic writes limits are by default set 0 to indicate no atomic write support. Even though it is assumed by Linux that a logical block can always be atomically written, we ignore this as it is not of particular interest. Stacked devices are just not supported either for now. An atomic write must always be submitted to the block driver as part of a single request. As such, only a single BIO must be submitted to the block layer for an atomic write. When a single atomic write BIO is submitted, it cannot be split. As such, atomic_write_unit_{max, min}_bytes are limited by the maximum guaranteed BIO size which will not be required to be split. This max size is calculated by request_queue max segments and the number of bvecs a BIO can fit, BIO_MAX_VECS. Currently we rely on userspace issuing a write with iovcnt=1 for pwritev2() - as such, we can rely on each segment containing PAGE_SIZE of data, apart from the first+last, which each can fit logical block size of data. The first+last will be LBS length/aligned as we rely on direct IO alignment rules also. New sysfs files are added to report the following atomic write limits: - atomic_write_unit_max_bytes - same as atomic_write_unit_max_sectors in bytes - atomic_write_unit_min_bytes - same as atomic_write_unit_min_sectors in bytes - atomic_write_boundary_bytes - same as atomic_write_hw_boundary_sectors in bytes - atomic_write_max_bytes - same as atomic_write_max_sectors in bytes Atomic writes may only be merged with other atomic writes and only under the following conditions: - total resultant request length <= atomic_write_max_bytes - the merged write does not straddle a boundary Helper function bdev_can_atomic_write() is added to indicate whether atomic writes may be issued to a bdev. If a bdev is a partition, the partition start must be aligned with both atomic_write_unit_min_sectors and atomic_write_hw_boundary_sectors. FSes will rely on the block layer to validate that an atomic write BIO submitted will be of valid size, so add blk_validate_atomic_write_op_size() for this purpose. Userspace expects an atomic write which is of invalid size to be rejected with -EINVAL, so add BLK_STS_INVAL for this. Also use BLK_STS_INVAL for when a BIO needs to be split, as this should mean an invalid size BIO. Flag REQ_ATOMIC is used for indicating an atomic write. Co-developed-by: Himanshu Madhani Signed-off-by: Himanshu Madhani Reviewed-by: Martin K. Petersen Signed-off-by: John Garry Reviewed-by: Keith Busch Link: https://lore.kernel.org/r/20240620125359.2684798-6-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- Documentation/ABI/stable/sysfs-block | 53 ++++++++++++++++++++++ block/blk-core.c | 19 ++++++++ block/blk-merge.c | 50 ++++++++++++++++++-- block/blk-settings.c | 88 ++++++++++++++++++++++++++++++++++++ block/blk-sysfs.c | 33 ++++++++++++++ block/blk.h | 3 ++ include/linux/blk_types.h | 8 +++- include/linux/blkdev.h | 55 ++++++++++++++++++++++ 8 files changed, 304 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 831f19a32e08..cea8856f798d 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -21,6 +21,59 @@ Description: device is offset from the internal allocation unit's natural alignment. +What: /sys/block//atomic_write_max_bytes +Date: February 2024 +Contact: Himanshu Madhani +Description: + [RO] This parameter specifies the maximum atomic write + size reported by the device. This parameter is relevant + for merging of writes, where a merged atomic write + operation must not exceed this number of bytes. + This parameter may be greater than the value in + atomic_write_unit_max_bytes as + atomic_write_unit_max_bytes will be rounded down to a + power-of-two and atomic_write_unit_max_bytes may also be + limited by some other queue limits, such as max_segments. + This parameter - along with atomic_write_unit_min_bytes + and atomic_write_unit_max_bytes - will not be larger than + max_hw_sectors_kb, but may be larger than max_sectors_kb. + + +What: /sys/block//atomic_write_unit_min_bytes +Date: February 2024 +Contact: Himanshu Madhani +Description: + [RO] This parameter specifies the smallest block which can + be written atomically with an atomic write operation. All + atomic write operations must begin at a + atomic_write_unit_min boundary and must be multiples of + atomic_write_unit_min. This value must be a power-of-two. + + +What: /sys/block//atomic_write_unit_max_bytes +Date: February 2024 +Contact: Himanshu Madhani +Description: + [RO] This parameter defines the largest block which can be + written atomically with an atomic write operation. This + value must be a multiple of atomic_write_unit_min and must + be a power-of-two. This value will not be larger than + atomic_write_max_bytes. + + +What: /sys/block//atomic_write_boundary_bytes +Date: February 2024 +Contact: Himanshu Madhani +Description: + [RO] A device may need to internally split an atomic write I/O + which straddles a given logical block address boundary. This + parameter specifies the size in bytes of the atomic boundary if + one is reported by the device. This value must be a + power-of-two and at least the size as in + atomic_write_unit_max_bytes. + Any attempt to merge atomic write I/Os must not result in a + merged I/O which crosses this boundary (if any). + What: /sys/block//diskseq Date: February 2021 diff --git a/block/blk-core.c b/block/blk-core.c index 8d9fbd353fc7..6fc1a5a1980d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -174,6 +174,8 @@ static const struct { /* Command duration limit device-side timeout */ [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" }, + [BLK_STS_INVAL] = { -EINVAL, "invalid" }, + /* everything else not covered above: */ [BLK_STS_IOERR] = { -EIO, "I/O" }, }; @@ -739,6 +741,18 @@ void submit_bio_noacct_nocheck(struct bio *bio) __submit_bio_noacct(bio); } +static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, + struct bio *bio) +{ + if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) + return BLK_STS_INVAL; + + if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) + return BLK_STS_INVAL; + + return BLK_STS_OK; +} + /** * submit_bio_noacct - re-submit a bio to the block device layer for I/O * @bio: The bio describing the location in memory and on the device. @@ -797,6 +811,11 @@ void submit_bio_noacct(struct bio *bio) switch (bio_op(bio)) { case REQ_OP_READ: case REQ_OP_WRITE: + if (bio->bi_opf & REQ_ATOMIC) { + status = blk_validate_atomic_write_op_size(q, bio); + if (status != BLK_STS_OK) + goto end_io; + } break; case REQ_OP_FLUSH: /* diff --git a/block/blk-merge.c b/block/blk-merge.c index 68969e27c831..cff20bcc0252 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,8 +154,16 @@ static struct bio *bio_split_write_zeroes(struct bio *bio, return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs); } -static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim) +static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim, + bool is_atomic) { + /* + * chunk_sectors must be a multiple of atomic_write_boundary_sectors if + * both non-zero. + */ + if (is_atomic && lim->atomic_write_boundary_sectors) + return lim->atomic_write_boundary_sectors; + return lim->chunk_sectors; } @@ -172,8 +180,18 @@ static inline unsigned get_max_io_size(struct bio *bio, { unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; - unsigned boundary_sectors = blk_boundary_sectors(lim); - unsigned max_sectors = lim->max_sectors, start, end; + bool is_atomic = bio->bi_opf & REQ_ATOMIC; + unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic); + unsigned max_sectors, start, end; + + /* + * We ignore lim->max_sectors for atomic writes because it may less + * than the actual bio size, which we cannot tolerate. + */ + if (is_atomic) + max_sectors = lim->atomic_write_max_sectors; + else + max_sectors = lim->max_sectors; if (boundary_sectors) { max_sectors = min(max_sectors, @@ -311,6 +329,11 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, *segs = nsegs; return NULL; split: + if (bio->bi_opf & REQ_ATOMIC) { + bio->bi_status = BLK_STS_INVAL; + bio_endio(bio); + return ERR_PTR(-EINVAL); + } /* * We can't sanely support splitting for a REQ_NOWAIT bio. End it * with EAGAIN if splitting is required and return an error pointer. @@ -596,11 +619,12 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, struct request_queue *q = rq->q; struct queue_limits *lim = &q->limits; unsigned int max_sectors, boundary_sectors; + bool is_atomic = rq->cmd_flags & REQ_ATOMIC; if (blk_rq_is_passthrough(rq)) return q->limits.max_hw_sectors; - boundary_sectors = blk_boundary_sectors(lim); + boundary_sectors = blk_boundary_sectors(lim, is_atomic); max_sectors = blk_queue_get_max_sectors(rq); if (!boundary_sectors || @@ -806,6 +830,18 @@ static enum elv_merge blk_try_req_merge(struct request *req, return ELEVATOR_NO_MERGE; } +static bool blk_atomic_write_mergeable_rq_bio(struct request *rq, + struct bio *bio) +{ + return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC); +} + +static bool blk_atomic_write_mergeable_rqs(struct request *rq, + struct request *next) +{ + return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC); +} + /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. @@ -829,6 +865,9 @@ static struct request *attempt_merge(struct request_queue *q, if (req->ioprio != next->ioprio) return NULL; + if (!blk_atomic_write_mergeable_rqs(req, next)) + return NULL; + /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn @@ -960,6 +999,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->ioprio != bio_prio(bio)) return false; + if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false) + return false; + return true; } diff --git a/block/blk-settings.c b/block/blk-settings.c index b19306804056..37fe4c8f6b6b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -135,6 +135,92 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) return 0; } +/* + * Returns max guaranteed bytes which we can fit in a bio. + * + * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector), + * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from + * the first and last segments. + */ +static +unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim) +{ + unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments); + unsigned int length; + + length = min(max_segments, 2) * lim->logical_block_size; + if (max_segments > 2) + length += (max_segments - 2) * PAGE_SIZE; + + return length; +} + +static void blk_atomic_writes_update_limits(struct queue_limits *lim) +{ + unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT, + blk_queue_max_guaranteed_bio(lim)); + + unit_limit = rounddown_pow_of_two(unit_limit); + + lim->atomic_write_max_sectors = + min(lim->atomic_write_hw_max >> SECTOR_SHIFT, + lim->max_hw_sectors); + lim->atomic_write_unit_min = + min(lim->atomic_write_hw_unit_min, unit_limit); + lim->atomic_write_unit_max = + min(lim->atomic_write_hw_unit_max, unit_limit); + lim->atomic_write_boundary_sectors = + lim->atomic_write_hw_boundary >> SECTOR_SHIFT; +} + +static void blk_validate_atomic_write_limits(struct queue_limits *lim) +{ + unsigned int chunk_sectors = lim->chunk_sectors; + unsigned int boundary_sectors; + + if (!lim->atomic_write_hw_max) + goto unsupported; + + boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT; + + if (boundary_sectors) { + /* + * A feature of boundary support is that it disallows bios to + * be merged which would result in a merged request which + * crosses either a chunk sector or atomic write HW boundary, + * even though chunk sectors may be just set for performance. + * For simplicity, disallow atomic writes for a chunk sector + * which is non-zero and smaller than atomic write HW boundary. + * Furthermore, chunk sectors must be a multiple of atomic + * write HW boundary. Otherwise boundary support becomes + * complicated. + * Devices which do not conform to these rules can be dealt + * with if and when they show up. + */ + if (WARN_ON_ONCE(do_div(chunk_sectors, boundary_sectors))) + goto unsupported; + + /* + * The boundary size just needs to be a multiple of unit_max + * (and not necessarily a power-of-2), so this following check + * could be relaxed in future. + * Furthermore, if needed, unit_max could even be reduced so + * that it is compliant with a !power-of-2 boundary. + */ + if (!is_power_of_2(boundary_sectors)) + goto unsupported; + } + + blk_atomic_writes_update_limits(lim); + return; + +unsupported: + lim->atomic_write_max_sectors = 0; + lim->atomic_write_boundary_sectors = 0; + lim->atomic_write_unit_min = 0; + lim->atomic_write_unit_max = 0; +} + /* * Check that the limits in lim are valid, initialize defaults for unset * values, and cap values based on others where needed. @@ -272,6 +358,8 @@ static int blk_validate_limits(struct queue_limits *lim) if (!(lim->features & BLK_FEAT_WRITE_CACHE)) lim->features &= ~BLK_FEAT_FUA; + blk_validate_atomic_write_limits(lim); + err = blk_validate_integrity_limits(lim); if (err) return err; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 59e6d111ed05..1a984179f3ac 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -118,6 +118,30 @@ static ssize_t queue_max_discard_segments_show(struct request_queue *q, return queue_var_show(queue_max_discard_segments(q), page); } +static ssize_t queue_atomic_write_max_bytes_show(struct request_queue *q, + char *page) +{ + return queue_var_show(queue_atomic_write_max_bytes(q), page); +} + +static ssize_t queue_atomic_write_boundary_show(struct request_queue *q, + char *page) +{ + return queue_var_show(queue_atomic_write_boundary_bytes(q), page); +} + +static ssize_t queue_atomic_write_unit_min_show(struct request_queue *q, + char *page) +{ + return queue_var_show(queue_atomic_write_unit_min_bytes(q), page); +} + +static ssize_t queue_atomic_write_unit_max_show(struct request_queue *q, + char *page) +{ + return queue_var_show(queue_atomic_write_unit_max_bytes(q), page); +} + static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.max_integrity_segments, page); @@ -505,6 +529,11 @@ QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); +QUEUE_RO_ENTRY(queue_atomic_write_max_bytes, "atomic_write_max_bytes"); +QUEUE_RO_ENTRY(queue_atomic_write_boundary, "atomic_write_boundary_bytes"); +QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes"); +QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); + QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); @@ -626,6 +655,10 @@ static struct attribute *queue_attrs[] = { &queue_discard_max_entry.attr, &queue_discard_max_hw_entry.attr, &queue_discard_zeroes_data_entry.attr, + &queue_atomic_write_max_bytes_entry.attr, + &queue_atomic_write_boundary_entry.attr, + &queue_atomic_write_unit_min_entry.attr, + &queue_atomic_write_unit_max_entry.attr, &queue_write_same_max_entry.attr, &queue_write_zeroes_max_entry.attr, &queue_zone_append_max_entry.attr, diff --git a/block/blk.h b/block/blk.h index 20c5718815e2..d0a986d8ee50 100644 --- a/block/blk.h +++ b/block/blk.h @@ -194,6 +194,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq) if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; + if (rq->cmd_flags & REQ_ATOMIC) + return q->limits.atomic_write_max_sectors; + return q->limits.max_sectors; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 781c4500491b..632edd71f8c6 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -162,6 +162,11 @@ typedef u16 blk_short_t; */ #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) +/* + * Invalid size or alignment. + */ +#define BLK_STS_INVAL ((__force blk_status_t)19) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with @@ -370,7 +375,7 @@ enum req_flag_bits { __REQ_SWAP, /* swap I/O */ __REQ_DRV, /* for driver use */ __REQ_FS_PRIVATE, /* for file system (submitter) use */ - + __REQ_ATOMIC, /* for atomic write operations */ /* * Command specific flags, keep last: */ @@ -402,6 +407,7 @@ enum req_flag_bits { #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) +#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fb7d4c21bba8..4816f3b1d528 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -377,6 +377,16 @@ struct queue_limits { unsigned int discard_alignment; unsigned int zone_write_granularity; + /* atomic write limits */ + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_boundary_sectors; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; + unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -1403,6 +1413,30 @@ static inline int queue_dma_alignment(const struct request_queue *q) return q ? q->limits.dma_alignment : 511; } +static inline unsigned int +queue_atomic_write_unit_max_bytes(const struct request_queue *q) +{ + return q->limits.atomic_write_unit_max; +} + +static inline unsigned int +queue_atomic_write_unit_min_bytes(const struct request_queue *q) +{ + return q->limits.atomic_write_unit_min; +} + +static inline unsigned int +queue_atomic_write_boundary_bytes(const struct request_queue *q) +{ + return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; +} + +static inline unsigned int +queue_atomic_write_max_bytes(const struct request_queue *q) +{ + return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; +} + static inline unsigned int bdev_dma_alignment(struct block_device *bdev) { return queue_dma_alignment(bdev_get_queue(bdev)); @@ -1644,6 +1678,27 @@ struct io_comp_batch { void (*complete)(struct io_comp_batch *); }; +static inline bool bdev_can_atomic_write(struct block_device *bdev) +{ + struct request_queue *bd_queue = bdev->bd_queue; + struct queue_limits *limits = &bd_queue->limits; + + if (!limits->atomic_write_unit_min) + return false; + + if (bdev_is_partition(bdev)) { + sector_t bd_start_sect = bdev->bd_start_sect; + unsigned int alignment = + max(limits->atomic_write_unit_min, + limits->atomic_write_hw_boundary); + + if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT)) + return false; + } + + return true; +} + #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } #endif /* _LINUX_BLKDEV_H */ -- cgit v1.2.3-58-ga151 From 9abcfbd235f59fb5b6379e5bc0231dad831ebace Mon Sep 17 00:00:00 2001 From: Prasad Singamsetty Date: Thu, 20 Jun 2024 12:53:55 +0000 Subject: block: Add atomic write support for statx Extend statx system call to return additional info for atomic write support support if the specified file is a block device. Reviewed-by: Martin K. Petersen Signed-off-by: Prasad Singamsetty Signed-off-by: John Garry Reviewed-by: Keith Busch Acked-by: Darrick J. Wong Reviewed-by: Darrick J. Wong Reviewed-by: Luis Chamberlain Link: https://lore.kernel.org/r/20240620125359.2684798-7-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/bdev.c | 36 ++++++++++++++++++++++++++---------- fs/stat.c | 16 +++++++++------- include/linux/blkdev.h | 6 ++++-- 3 files changed, 39 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/block/bdev.c b/block/bdev.c index ced4ac990ec8..1b4af2cc3b1e 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -1260,23 +1260,39 @@ void sync_bdevs(bool wait) } /* - * Handle STATX_DIOALIGN for block devices. - * - * Note that the inode passed to this is the inode of a block device node file, - * not the block device's internal inode. Therefore it is *not* valid to use - * I_BDEV() here; the block device has to be looked up by i_rdev instead. + * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices. */ -void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) +void bdev_statx(struct inode *backing_inode, struct kstat *stat, + u32 request_mask) { struct block_device *bdev; - bdev = blkdev_get_no_open(inode->i_rdev); + if (!(request_mask & (STATX_DIOALIGN | STATX_WRITE_ATOMIC))) + return; + + /* + * Note that backing_inode is the inode of a block device node file, + * not the block device's internal inode. Therefore it is *not* valid + * to use I_BDEV() here; the block device has to be looked up by i_rdev + * instead. + */ + bdev = blkdev_get_no_open(backing_inode->i_rdev); if (!bdev) return; - stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; - stat->dio_offset_align = bdev_logical_block_size(bdev); - stat->result_mask |= STATX_DIOALIGN; + if (request_mask & STATX_DIOALIGN) { + stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; + stat->dio_offset_align = bdev_logical_block_size(bdev); + stat->result_mask |= STATX_DIOALIGN; + } + + if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { + struct request_queue *bd_queue = bdev->bd_queue; + + generic_fill_statx_atomic_writes(stat, + queue_atomic_write_unit_min_bytes(bd_queue), + queue_atomic_write_unit_max_bytes(bd_queue)); + } blkdev_put_no_open(bdev); } diff --git a/fs/stat.c b/fs/stat.c index 72d0e6357b91..bd0698dfd7b3 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -265,6 +265,7 @@ static int vfs_statx(int dfd, struct filename *filename, int flags, { struct path path; unsigned int lookup_flags = getname_statx_lookup_flags(flags); + struct inode *backing_inode; int error; if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | @@ -290,13 +291,14 @@ retry: stat->attributes |= STATX_ATTR_MOUNT_ROOT; stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; - /* Handle STATX_DIOALIGN for block devices. */ - if (request_mask & STATX_DIOALIGN) { - struct inode *inode = d_backing_inode(path.dentry); - - if (S_ISBLK(inode->i_mode)) - bdev_statx_dioalign(inode, stat); - } + /* + * If this is a block device inode, override the filesystem + * attributes with the block device specific parameters that need to be + * obtained from the bdev backing inode. + */ + backing_inode = d_backing_inode(path.dentry); + if (S_ISBLK(backing_inode->i_mode)) + bdev_statx(backing_inode, stat, request_mask); path_put(&path); if (retry_estale(error, lookup_flags)) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4816f3b1d528..2800231046a9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1638,7 +1638,8 @@ int sync_blockdev(struct block_device *bdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); int sync_blockdev_nowait(struct block_device *bdev); void sync_bdevs(bool wait); -void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); +void bdev_statx(struct inode *backing_inode, struct kstat *stat, + u32 request_mask); void printk_all_partitions(void); int __init early_lookup_bdev(const char *pathname, dev_t *dev); #else @@ -1656,7 +1657,8 @@ static inline int sync_blockdev_nowait(struct block_device *bdev) static inline void sync_bdevs(bool wait) { } -static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) +static inline void bdev_statx(struct inode *backing_inode, struct kstat *stat, + u32 request_mask) { } static inline void printk_all_partitions(void) -- cgit v1.2.3-58-ga151 From bf4ae8f2e6407a779c0368eb0f3e047a8333be17 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 20 Jun 2024 12:53:57 +0000 Subject: scsi: sd: Atomic write support Support is divided into two main areas: - reading VPD pages and setting sdev request_queue limits - support WRITE ATOMIC (16) command and tracing The relevant block limits VPD page need to be read to allow the block layer request_queue atomic write limits to be set. These VPD page limits are described in sbc4r22 section 6.6.4 - Block limits VPD page. There are five limits of interest: - MAXIMUM ATOMIC TRANSFER LENGTH - ATOMIC ALIGNMENT - ATOMIC TRANSFER LENGTH GRANULARITY - MAXIMUM ATOMIC TRANSFER LENGTH WITH BOUNDARY - MAXIMUM ATOMIC BOUNDARY SIZE MAXIMUM ATOMIC TRANSFER LENGTH is the maximum length for a WRITE ATOMIC (16) command. It will not be greater than the device MAXIMUM TRANSFER LENGTH. ATOMIC ALIGNMENT and ATOMIC TRANSFER LENGTH GRANULARITY are the minimum alignment and length values for an atomic write in terms of logical blocks. Unlike NVMe, SCSI does not specify an LBA space boundary, but does specify a per-IO boundary granularity. The maximum boundary size is specified in MAXIMUM ATOMIC BOUNDARY SIZE. When used, this boundary value is set in the WRITE ATOMIC (16) ATOMIC BOUNDARY field - layout for the WRITE_ATOMIC_16 command can be found in sbc4r22 section 5.48. This boundary value is the granularity size at which the device may atomically write the data. A value of zero in WRITE ATOMIC (16) ATOMIC BOUNDARY field means that all data must be atomically written together. MAXIMUM ATOMIC TRANSFER LENGTH WITH BOUNDARY is the maximum atomic write length if a non-zero boundary value is set. For atomic write support, the WRITE ATOMIC (16) boundary is not of much interest, as the block layer expects each request submitted to be executed atomically. However, the SCSI spec does leave itself open to a quirky scenario where MAXIMUM ATOMIC TRANSFER LENGTH is zero, yet MAXIMUM ATOMIC TRANSFER LENGTH WITH BOUNDARY and MAXIMUM ATOMIC BOUNDARY SIZE are both non-zero. This case will be supported. To set the block layer request_queue atomic write capabilities, sanitize the VPD page limits and set limits as follows: - atomic_write_unit_min is derived from granularity and alignment values. If no granularity value is not set, use physical block size - atomic_write_unit_max is derived from MAXIMUM ATOMIC TRANSFER LENGTH. In the scenario where MAXIMUM ATOMIC TRANSFER LENGTH is zero and boundary limits are non-zero, use MAXIMUM ATOMIC BOUNDARY SIZE for atomic_write_unit_max. New flag scsi_disk.use_atomic_write_boundary is set for this scenario. - atomic_write_boundary_bytes is set to zero always SCSI also supports a WRITE ATOMIC (32) command, which is for type 2 protection enabled. This is not going to be supported now, so check for T10_PI_TYPE2_PROTECTION when setting any request_queue limits. To handle an atomic write request, add support for WRITE ATOMIC (16) command in handler sd_setup_atomic_cmnd(). Flag use_atomic_write_boundary is checked here for encoding ATOMIC BOUNDARY field. Trace info is also added for WRITE_ATOMIC_16 command. Reviewed-by: Martin K. Petersen Signed-off-by: John Garry Acked-by: Darrick J. Wong Reviewed-by: Darrick J. Wong Link: https://lore.kernel.org/r/20240620125359.2684798-9-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- drivers/scsi/scsi_trace.c | 22 +++++++++++ drivers/scsi/sd.c | 93 ++++++++++++++++++++++++++++++++++++++++++++- drivers/scsi/sd.h | 8 ++++ include/scsi/scsi_proto.h | 1 + include/trace/events/scsi.h | 1 + 5 files changed, 124 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 41a950075913..3e47c4472a80 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -325,6 +325,26 @@ out: return ret; } +static const char * +scsi_trace_atomic_write16_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + unsigned int boundary_size; + unsigned int nr_blocks; + sector_t lba; + + lba = get_unaligned_be64(&cdb[2]); + boundary_size = get_unaligned_be16(&cdb[10]); + nr_blocks = get_unaligned_be16(&cdb[12]); + + trace_seq_printf(p, "lba=%llu txlen=%u boundary_size=%u", + lba, nr_blocks, boundary_size); + + trace_seq_putc(p, 0); + + return ret; +} + static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { @@ -385,6 +405,8 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_zbc_in(p, cdb, len); case ZBC_OUT: return scsi_trace_zbc_out(p, cdb, len); + case WRITE_ATOMIC_16: + return scsi_trace_atomic_write16_out(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a27f1c7f1b61..525f48c97f5e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -939,6 +939,64 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) return scsi_alloc_sgtables(cmd); } +static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim) +{ + unsigned int logical_block_size = sdkp->device->sector_size, + physical_block_size_sectors, max_atomic, unit_min, unit_max; + + if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) || + sdkp->protection_type == T10_PI_TYPE2_PROTECTION) + return; + + physical_block_size_sectors = sdkp->physical_block_size / + sdkp->device->sector_size; + + unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ? + sdkp->atomic_granularity : + physical_block_size_sectors); + + /* + * Only use atomic boundary when we have the odd scenario of + * sdkp->max_atomic == 0, which the spec does permit. + */ + if (sdkp->max_atomic) { + max_atomic = sdkp->max_atomic; + unit_max = rounddown_pow_of_two(sdkp->max_atomic); + sdkp->use_atomic_write_boundary = 0; + } else { + max_atomic = sdkp->max_atomic_with_boundary; + unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary); + sdkp->use_atomic_write_boundary = 1; + } + + /* + * Ensure compliance with granularity and alignment. For now, keep it + * simple and just don't support atomic writes for values mismatched + * with max_{boundary}atomic, physical block size, and + * atomic_granularity itself. + * + * We're really being distrustful by checking unit_max also... + */ + if (sdkp->atomic_granularity > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_granularity) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_granularity) + return; + } + + if (sdkp->atomic_alignment > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_alignment) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_alignment) + return; + } + + lim->atomic_write_hw_max = max_atomic * logical_block_size; + lim->atomic_write_hw_boundary = 0; + lim->atomic_write_hw_unit_min = unit_min * logical_block_size; + lim->atomic_write_hw_unit_max = unit_max * logical_block_size; +} + static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { @@ -1237,6 +1295,26 @@ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; } +static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd, + sector_t lba, unsigned int nr_blocks, + bool boundary, unsigned char flags) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_ATOMIC_16; + cmd->cmnd[1] = flags; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + if (boundary) + put_unaligned_be16(nr_blocks, &cmd->cmnd[10]); + else + put_unaligned_be16(0, &cmd->cmnd[10]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + cmd->cmnd[14] = 0; + cmd->cmnd[15] = 0; + + return BLK_STS_OK; +} + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); @@ -1302,6 +1380,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); + } else if (rq->cmd_flags & REQ_ATOMIC && write) { + ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, + sdkp->use_atomic_write_boundary, + protect | fua); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); @@ -3264,7 +3346,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp, sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); if (!sdkp->lbpme) - goto out; + goto config_atomic; lba_count = get_unaligned_be32(&vpd->data[20]); desc_count = get_unaligned_be32(&vpd->data[24]); @@ -3279,6 +3361,15 @@ static void sd_read_block_limits(struct scsi_disk *sdkp, get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); + +config_atomic: + sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); + sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); + sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]); + sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]); + sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]); + + sd_config_atomic(sdkp, lim); } out: diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 7603b3c67b23..36382eca941c 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -115,6 +115,13 @@ struct scsi_disk { u32 max_unmap_blocks; u32 unmap_granularity; u32 unmap_alignment; + + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; unsigned int physical_block_size; unsigned int max_medium_access_timeouts; @@ -148,6 +155,7 @@ struct scsi_disk { unsigned security : 1; unsigned ignore_medium_access_errors : 1; unsigned rscs : 1; /* reduced stream control support */ + unsigned use_atomic_write_boundary : 1; }; #define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index 843106e1109f..70e1262b2e20 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -120,6 +120,7 @@ #define WRITE_SAME_16 0x93 #define ZBC_OUT 0x94 #define ZBC_IN 0x95 +#define WRITE_ATOMIC_16 0x9c #define SERVICE_ACTION_BIDIRECTIONAL 0x9d #define SERVICE_ACTION_IN_16 0x9e #define SERVICE_ACTION_OUT_16 0x9f diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index 8e2d9b1b0e77..05f1945ed204 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -102,6 +102,7 @@ scsi_opcode_name(WRITE_32), \ scsi_opcode_name(WRITE_SAME_32), \ scsi_opcode_name(ATA_16), \ + scsi_opcode_name(WRITE_ATOMIC_16), \ scsi_opcode_name(ATA_12)) #define scsi_hostbyte_name(result) { result, #result } -- cgit v1.2.3-58-ga151 From b6cfe2287df6e26d685af8a8a96ed1bf87bdde28 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Fri, 21 Jun 2024 12:15:05 +0900 Subject: block: Define bdev_nr_zones() as an inline function There is no need for bdev_nr_zones() to be an exported function calculating the number of zones of a block device. Instead, given that all callers use this helper with a fully initialized block device that has a gendisk, we can redefine this function as an inline helper in blkdev.h. Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240621031506.759397-3-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-zoned.c | 18 ------------------ include/linux/blkdev.h | 6 +++++- 2 files changed, 5 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 8f89705f5e1c..601c21a224c9 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -115,24 +115,6 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) } EXPORT_SYMBOL_GPL(blk_zone_cond_str); -/** - * bdev_nr_zones - Get number of zones - * @bdev: Target device - * - * Return the total number of zones of a zoned block device. For a block - * device without zone capabilities, the number of zones is always 0. - */ -unsigned int bdev_nr_zones(struct block_device *bdev) -{ - sector_t zone_sectors = bdev_zone_sectors(bdev); - - if (!bdev_is_zoned(bdev)) - return 0; - return (bdev_nr_sectors(bdev) + zone_sectors - 1) >> - ilog2(zone_sectors); -} -EXPORT_SYMBOL_GPL(bdev_nr_zones); - /** * blkdev_report_zones - Get zones information * @bdev: Target block device diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2800231046a9..1078a7d51295 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -673,7 +673,6 @@ static inline bool blk_queue_is_zoned(struct request_queue *q) } #ifdef CONFIG_BLK_DEV_ZONED -unsigned int bdev_nr_zones(struct block_device *bdev); static inline unsigned int disk_nr_zones(struct gendisk *disk) { @@ -687,6 +686,11 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) return sector >> ilog2(disk->queue->limits.chunk_sectors); } +static inline unsigned int bdev_nr_zones(struct block_device *bdev) +{ + return disk_nr_zones(bdev->bd_disk); +} + static inline unsigned int bdev_max_open_zones(struct block_device *bdev) { return bdev->bd_disk->queue->limits.max_open_zones; -- cgit v1.2.3-58-ga151 From caaf7101c01a91a882d3da2f566579dda692367d Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Fri, 21 Jun 2024 12:15:06 +0900 Subject: block: Cleanup block device zone helpers There is no need to conditionally define on CONFIG_BLK_DEV_ZONED the inline helper functions bdev_nr_zones(), bdev_max_open_zones(), bdev_max_active_zones() and disk_zone_no() as these function will return the correct valu in all cases (zoned device or not, including when CONFIG_BLK_DEV_ZONED is not set). Furthermore, disk_nr_zones() definition can be simplified as disk->nr_zones is always 0 for regular block devices. Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240621031506.759397-4-dlemoal@kernel.org Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1078a7d51295..e89003360c17 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -673,11 +673,21 @@ static inline bool blk_queue_is_zoned(struct request_queue *q) } #ifdef CONFIG_BLK_DEV_ZONED - static inline unsigned int disk_nr_zones(struct gendisk *disk) { - return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; + return disk->nr_zones; +} +bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); +#else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int disk_nr_zones(struct gendisk *disk) +{ + return 0; +} +static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) +{ + return false; } +#endif /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { @@ -701,36 +711,6 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) return bdev->bd_disk->queue->limits.max_active_zones; } -bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); -#else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int bdev_nr_zones(struct block_device *bdev) -{ - return 0; -} - -static inline unsigned int disk_nr_zones(struct gendisk *disk) -{ - return 0; -} -static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) -{ - return 0; -} -static inline unsigned int bdev_max_open_zones(struct block_device *bdev) -{ - return 0; -} - -static inline unsigned int bdev_max_active_zones(struct block_device *bdev) -{ - return 0; -} -static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) -{ - return false; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) -- cgit v1.2.3-58-ga151 From 44348870de4b8f292f97b84583a298d66fbaf738 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jun 2024 19:38:35 +0200 Subject: block: fix the blk_queue_nonrot polarity Take care of the inverse polarity of the BLK_FEAT_ROTATIONAL flag vs the old nonrot helper. Fixes: bd4a633b6f7c ("block: move the nonrot flag to queue_limits") Reported-by: kernel test robot Reported-by: Keith Busch Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20240624173835.76753-1-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e89003360c17..b2f1362c4681 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -617,7 +617,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) -#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL) +#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_zone_resetall(q) \ ((q)->limits.features & BLK_FEAT_ZONE_RESETALL) -- cgit v1.2.3-58-ga151 From d89a5c6705998ddc42b104f8eabd3c4b9e8fde08 Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Mon, 3 Jun 2024 20:57:00 +0800 Subject: nvme: fix status magic numbers Replaced some magic numbers about SC and SCT with enum and macro. Signed-off-by: Weiwen Hu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/constants.c | 2 +- drivers/nvme/host/core.c | 18 +++++++++--------- drivers/nvme/host/multipath.c | 2 +- drivers/nvme/host/nvme.h | 4 ++-- drivers/nvme/host/pr.c | 2 +- include/linux/nvme.h | 14 ++++++++++++-- 6 files changed, 26 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 6f2ebb5fcdb0..2b9e6cfaf2a8 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = { const char *nvme_get_error_status_str(u16 status) { - status &= 0x7ff; + status &= NVME_SCT_SC_MASK; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) return nvme_statuses[status]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 80d8460bb92f..744403b2d852 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -261,7 +261,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -329,8 +329,8 @@ static void nvme_log_error(struct request *req) nvme_sect_to_lba(ns->head, blk_rq_pos(req)), blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); return; @@ -341,8 +341,8 @@ static void nvme_log_error(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); } @@ -359,8 +359,8 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : "", nr->cmd->common.cdw10, @@ -388,7 +388,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; if (req->cmd_flags & REQ_NVME_MPATH) { @@ -1256,7 +1256,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: - * + * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 4933194d00e5..8a4d0e377114 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -83,7 +83,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - u16 status = nvme_req(req)->status & 0x7ff; + u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK; unsigned long flags; struct bio *bio; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 969349068086..f3a7be42c4d0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -689,7 +689,7 @@ static inline u32 nvme_bytes_to_numd(size_t len) static inline bool nvme_is_ana_error(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: case NVME_SC_ANA_PERSISTENT_LOSS: @@ -702,7 +702,7 @@ static inline bool nvme_is_ana_error(u16 status) static inline bool nvme_is_path_error(u16 status) { /* check for a status code type of 'path related status' */ - return (status & 0x700) == 0x300; + return (status & NVME_SCT_MASK) == NVME_SCT_PATH; } /* diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index a6db5edfab03..7347ddf85f00 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -77,7 +77,7 @@ static int nvme_status_to_pr_err(int status) if (nvme_is_path_error(status)) return PR_STS_PATH_FAILED; - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 425573202295..c8d7fdd095a1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1846,6 +1846,7 @@ enum { /* * Generic Command Status: */ + NVME_SCT_GENERIC = 0x0, NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -1893,6 +1894,7 @@ enum { /* * Command Specific Status: */ + NVME_SCT_COMMAND_SPECIFIC = 0x100, NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -1966,6 +1968,7 @@ enum { /* * Media and Data Integrity Errors: */ + NVME_SCT_MEDIA_ERROR = 0x200, NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -1978,6 +1981,7 @@ enum { /* * Path-related Errors: */ + NVME_SCT_PATH = 0x300, NVME_SC_INTERNAL_PATH_ERROR = 0x300, NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_INACCESSIBLE = 0x302, @@ -1986,11 +1990,17 @@ enum { NVME_SC_HOST_PATH_ERROR = 0x370, NVME_SC_HOST_ABORTED_CMD = 0x371, - NVME_SC_CRD = 0x1800, + NVME_SC_MASK = 0x00ff, /* Status Code */ + NVME_SCT_MASK = 0x0700, /* Status Code Type */ + NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, + + NVME_SC_CRD = 0x1800, /* Command Retry Delayed */ NVME_SC_MORE = 0x2000, - NVME_SC_DNR = 0x4000, + NVME_SC_DNR = 0x4000, /* Do Not Retry */ }; +#define NVME_SCT(status) ((status) >> 8 & 7) + struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: -- cgit v1.2.3-58-ga151 From dd0b0a4a2c5d7209457dc172997d1243ad269cfa Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Mon, 3 Jun 2024 20:57:01 +0800 Subject: nvme: rename CDR/MORE/DNR to NVME_STATUS_* CDR/MORE/DNR fields are not belonging to SC in the NVMe spec, rename them to NVME_STATUS_* to avoid confusion. Signed-off-by: Weiwen Hu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 22 ++++++++-------- drivers/nvme/host/fabrics.c | 10 ++++---- drivers/nvme/host/fault_inject.c | 2 +- drivers/nvme/host/fc.c | 6 ++--- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/target/admin-cmd.c | 24 +++++++++--------- drivers/nvme/target/core.c | 46 +++++++++++++++++----------------- drivers/nvme/target/discovery.c | 14 +++++------ drivers/nvme/target/fabrics-cmd-auth.c | 16 ++++++------ drivers/nvme/target/fabrics-cmd.c | 36 +++++++++++++------------- drivers/nvme/target/io-cmd-bdev.c | 12 ++++----- drivers/nvme/target/passthru.c | 10 ++++---- drivers/nvme/target/rdma.c | 10 ++++---- drivers/nvme/target/tcp.c | 4 +-- drivers/nvme/target/zns.c | 30 +++++++++++----------- include/linux/nvme.h | 6 ++--- 16 files changed, 125 insertions(+), 125 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 744403b2d852..8d8e7a3549c6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req) u16 crd; /* The mask and shift result must be <= 3 */ - crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; @@ -331,8 +331,8 @@ static void nvme_log_error(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); return; } @@ -343,8 +343,8 @@ static void nvme_log_error(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); } static void nvme_log_err_passthru(struct request *req) @@ -361,8 +361,8 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_error_status_str(nr->status), NVME_SCT(nr->status), /* Status Code Type */ nr->status & NVME_SC_MASK, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : "", + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : "", nr->cmd->common.cdw10, nr->cmd->common.cdw11, nr->cmd->common.cdw12, @@ -384,7 +384,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return COMPLETE; if (blk_noretry_request(req) || - (nvme_req(req)->status & NVME_SC_DNR) || + (nvme_req(req)->status & NVME_STATUS_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; @@ -3940,7 +3940,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { - int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, @@ -3956,7 +3956,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret > 0 && (ret & NVME_SC_DNR)) + if (ret > 0 && (ret & NVME_STATUS_DNR)) nvme_ns_remove(ns); } @@ -4147,7 +4147,7 @@ static void nvme_scan_work(struct work_struct *work) * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); - if (ret > 0 && ret & NVME_SC_DNR) + if (ret > 0 && ret & NVME_STATUS_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index ceb9c0ed3120..b5a4b5fd573e 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } @@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read64); @@ -275,7 +275,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_write32); @@ -295,7 +295,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & ~NVME_SC_DNR; + int err_sctype = errval & ~NVME_STATUS_DNR; if (errval < 0) { dev_err(ctrl->device, @@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); */ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) return false; if (status == -EKEYREJECTED) diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 1ba10a5c656d..1d1b6441a339 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req) /* inject status code and DNR bit */ status = fault_inject->status; if (fault_inject->dont_retry) - status |= NVME_SC_DNR; + status |= NVME_STATUS_DNR; nvme_req(req)->status = status; } } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d5a383766b34..fee84b0055d0 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { if (portptr->port_state == FC_OBJSTATE_ONLINE) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: reconnect failure\n", ctrl->cnum); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index f3a7be42c4d0..73442d3f504b 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -878,7 +878,7 @@ enum { NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), /* Set BLK_MQ_REQ_RESERVED when allocating request */ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), - /* Retry command when NVME_SC_DNR is not set in the result */ + /* Retry command when NVME_STATUS_DNR is not set in the result */ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), }; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index f5b7054a4a05..f7e1156ac7ec 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) @@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; out: nvmet_req_complete(req, status); @@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) pr_debug("unhandled identify cns %d on qid %d\n", req->cmd->identify.cns, req->sq->qid); req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } /* @@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) if (val32 & ~mask) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); @@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; if (ncqr == 0xffff || nsqr == 0xffff) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } nvmet_set_result(req, @@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: - status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); return; } ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; @@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 06f0c587f343..738fdfebb889 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -55,18 +55,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); - return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case -ENODATA: @@ -76,7 +76,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; } } @@ -86,7 +86,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req) req->sq->qid); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, @@ -97,7 +97,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -106,7 +106,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -115,7 +115,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -145,7 +145,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); @@ -444,7 +444,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) req->error_loc = offsetof(struct nvme_common_command, nsid); if (nvmet_subsys_nsid_exists(subsys, nsid)) return NVME_SC_INTERNAL_PATH_ERROR; - return NVME_SC_INVALID_NS | NVME_SC_DNR; + return NVME_SC_INVALID_NS | NVME_STATUS_DNR; } percpu_ref_get(&req->ns->ref); @@ -904,7 +904,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return nvmet_parse_fabrics_io_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) @@ -966,7 +966,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -977,7 +977,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -995,7 +995,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, trace_nvmet_req_init(req, req->cmd); if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -1022,7 +1022,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1034,7 +1034,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1303,18 +1303,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req) if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!nvmet_check_auth_status(req))) { pr_warn("qid %d not authenticated\n", req->sq->qid); - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; } return 0; } @@ -1388,7 +1388,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, int ret; u16 status; - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", @@ -1404,7 +1404,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, hostnqn, subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); - status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } @@ -1455,7 +1455,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { - status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; goto out_free_sqs; } ctrl->cntlid = ret; diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index ce54da8c6b36..28843df5fa7c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (offset & 0x3) { req->error_loc = offsetof(struct nvme_get_log_page_command, lpo); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { req->error_loc = offsetof(struct nvme_identify, cns); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } switch (cmd->common.opcode) { @@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) default: pr_debug("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index d61b8c6ff3b2..d68c2dae5feb 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req) u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, secp); goto done; } if (req->cmd->auth_send.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp0); goto done; } if (req->cmd->auth_send.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp1); goto done; } tl = le32_to_cpu(req->cmd->auth_send.tl); if (!tl) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, tl); goto done; @@ -438,26 +438,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) u16 status = 0; if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, secp); goto done; } if (req->cmd->auth_receive.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp0); goto done; } if (req->cmd->auth_receive.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } al = le32_to_cpu(req->cmd->auth_receive.al); if (!al) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, al); goto done; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 042b379cbb36..344ab75d8864 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvmf_property_set_command, offset); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } out: nvmet_req_complete(req, status); @@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->cap; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } else { @@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->csts; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } @@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto err; } if (ctrl->sqs[qid] != NULL) { pr_warn("qid %u has already been created\n", qid); req->error_loc = offsetof(struct nvmf_connect_command, qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } /* for fabrics, this value applies to only the I/O Submission Queues */ @@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; } old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { pr_warn("queue already connected!\n"); req->error_loc = offsetof(struct nvmf_connect_command, opcode); - return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -233,14 +233,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); req->error_loc = offsetof(struct nvmf_connect_command, recfmt); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } @@ -260,7 +260,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) dhchap_status); nvmet_ctrl_put(ctrl); if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) - status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); + status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR); else status = NVME_SC_INTERNAL; goto out; @@ -311,7 +311,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } @@ -320,13 +320,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req); if (!ctrl) { - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto out; } if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } @@ -356,13 +356,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) pr_debug("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { pr_debug("invalid capsule type 0x%x on unconnected queue.\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->connect.qid == 0) diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index b628bc5ee998..0bda83d0fc3e 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -137,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) */ switch (blk_sts) { case BLK_STS_NOSPC: - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: @@ -149,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; break; default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case BLK_STS_MEDIUM: @@ -161,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) break; case BLK_STS_IOERR: default: - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } @@ -358,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req) return 0; if (blkdev_issue_flush(req->ns->bdev)) - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; return 0; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index bb4a69d538fd..b10e31ec0948 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ns = nvme_find_get_ns(ctrl, nsid); if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) * emulated in the future if regular targets grow support for * this feature. */ - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return nvmet_setup_passthru_command(req); @@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) case NVME_FEAT_RESV_PERSIST: /* No reservations, see nvmet_parse_passthru_io_cmd() */ default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } @@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; case NVME_ID_CNS_NS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; @@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; default: return nvmet_setup_passthru_command(req); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 689bb5d3cfdc..498b3ca59651 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; } /* no data command? */ @@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { @@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 380f22ee3ebb..d305d7162dde 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; if (len > cmd->req.port->inline_data_size) - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 0021d06041c1..af9e13be7678 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) } if (!bdev_is_zoned(req->ns->bdev)) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_identify, nsid); goto out; } @@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; } if (out_bufsize < sizeof(struct nvme_zone_report)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.pr) { @@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.zrasf) { @@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) return NVME_SC_SUCCESS; case -EINVAL: case -EIO: - return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; default: return NVME_SC_INTERNAL; } @@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) default: /* this is needed to quiet compiler warning */ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (op == REQ_OP_LAST) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; goto out; } @@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (sect >= get_capacity(bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (zone_sectors - 1)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) } if (total_len != nvmet_rw_data_len(req)) { - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c8d7fdd095a1..27faae34245d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1994,9 +1994,9 @@ enum { NVME_SCT_MASK = 0x0700, /* Status Code Type */ NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, - NVME_SC_CRD = 0x1800, /* Command Retry Delayed */ - NVME_SC_MORE = 0x2000, - NVME_SC_DNR = 0x4000, /* Do Not Retry */ + NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */ + NVME_STATUS_MORE = 0x2000, + NVME_STATUS_DNR = 0x4000, /* Do Not Retry */ }; #define NVME_SCT(status) ((status) >> 8 & 7) -- cgit v1.2.3-58-ga151 From 99032e9dbabcfc64bf71e94a1b662c4bfe534ed1 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 27 May 2024 07:15:23 +0200 Subject: nvmet-fc: implement host_traddr() Implement callback to display the host transport address by adding a callback 'host_traddr' for nvmet_fc_target_template. Signed-off-by: Hannes Reinecke Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: James Smart Signed-off-by: Daniel Wagner Signed-off-by: Keith Busch --- drivers/nvme/target/fc.c | 33 +++++++++++++++++++++++++++++++++ include/linux/nvme-fc-driver.h | 4 ++++ 2 files changed, 37 insertions(+) (limited to 'include') diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 337ee1cb09ae..a8fb89da7281 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2934,6 +2934,38 @@ nvmet_fc_discovery_chg(struct nvmet_port *port) tgtport->ops->discovery_event(&tgtport->fc_target_port); } +static ssize_t +nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_size) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_fc_tgt_queue *queue = + container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); + struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; + struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; + u64 wwnn, wwpn; + ssize_t ret = 0; + + if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) + return -ENODEV; + if (!hostport || !nvmet_fc_hostport_get(hostport)) { + ret = -ENODEV; + goto out_put; + } + + if (tgtport->ops->host_traddr) { + ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); + if (ret) + goto out_put_host; + ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); + } +out_put_host: + nvmet_fc_hostport_put(hostport); +out_put: + nvmet_fc_tgtport_put(tgtport); + return ret; +} + static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_FC, @@ -2943,6 +2975,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, .discovery_chg = nvmet_fc_discovery_chg, + .host_traddr = nvmet_fc_host_traddr, }; static int __init nvmet_fc_init_module(void) diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 4109f1bd6128..89ea1ebd975a 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -920,6 +920,9 @@ struct nvmet_fc_target_port { * further references to hosthandle. * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host(). * + * @host_traddr: called by the transport to retrieve the node name and + * port name of the host port address. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -975,6 +978,7 @@ struct nvmet_fc_target_template { void (*ls_abort)(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq); void (*host_release)(void *hosthandle); + int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn); u32 max_hw_queues; u16 max_sgl_segments; -- cgit v1.2.3-58-ga151 From 210b1f6576e8b367907e7ff51ef425062e1468e4 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 24 Jun 2024 08:56:17 -0700 Subject: nvme-pci: do not directly handle subsys reset fallout Scheduling reset_work after a nvme subsystem reset is expected to fail on pcie, but this also prevents potential handling the platform's pcie services may provide that might successfully recovering the link without re-enumeration. Such examples include AER, DPC, and power's EEH. Provide a pci specific operation that safely initiates a subsystem reset, and instead of scheduling reset work, read back the status register to trigger a pcie read error. Since this only affects pci, the other fabrics drivers subscribe to a generic nvmf subsystem reset that is exactly the same as before. The loop fabric doesn't use it because nvmet doesn't support setting that property anyway. And since we're using the magic NSSR value in two places now, provide a symbolic define for it. Reported-by: Nilay Shroff Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/fabrics.c | 15 +++++++++++++++ drivers/nvme/host/fabrics.h | 1 + drivers/nvme/host/fc.c | 1 + drivers/nvme/host/nvme.h | 14 +++----------- drivers/nvme/host/pci.c | 36 ++++++++++++++++++++++++++++++++++++ drivers/nvme/host/rdma.c | 1 + drivers/nvme/host/tcp.c | 1 + include/linux/nvme.h | 3 +++ 8 files changed, 61 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index b5a4b5fd573e..44e342a46f39 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -280,6 +280,21 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) } EXPORT_SYMBOL_GPL(nvmf_reg_write32); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl) +{ + int ret; + + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); +} +EXPORT_SYMBOL_GPL(nvmf_subsystem_reset); + /** * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for * connect() errors. diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 602135910ae9..21d75dc4a3a0 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -217,6 +217,7 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts) int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_register_transport(struct nvmf_transport_ops *ops); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index fee84b0055d0..b81af7919e94 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3382,6 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_fc_free_ctrl, .submit_async_event = nvme_fc_submit_async_event, .delete_ctrl = nvme_fc_delete_ctrl, diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 73442d3f504b..c63f2b452369 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -551,6 +551,7 @@ struct nvme_ctrl_ops { int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl); + int (*subsystem_reset)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*stop_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); @@ -649,18 +650,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl); static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) { - int ret; - - if (!ctrl->subsystem) + if (!ctrl->subsystem || !ctrl->ops->subsystem_reset) return -ENOTTY; - if (!nvme_wait_reset(ctrl)) - return -EBUSY; - - ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); - if (ret) - return ret; - - return nvme_try_sched_reset(ctrl); + return ctrl->ops->subsystem_reset(ctrl); } /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c92125b0238d..c1e1ecd0f1df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1143,6 +1143,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) spin_unlock(&nvmeq->sq_lock); } +static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + int ret = 0; + + /* + * Taking the shutdown_lock ensures the BAR mapping is not being + * altered by reset_work. Holding this lock before the RESETTING state + * change, if successful, also ensures nvme_remove won't be able to + * proceed to iounmap until we're done. + */ + mutex_lock(&dev->shutdown_lock); + if (!dev->bar_mapped_size) { + ret = -ENODEV; + goto unlock; + } + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { + ret = -EBUSY; + goto unlock; + } + + writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); + nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); + + /* + * Read controller status to flush the previous write and trigger a + * pcie read error. + */ + readl(dev->bar + NVME_REG_CSTS); +unlock: + mutex_unlock(&dev->shutdown_lock); + return ret; +} + static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { struct nvme_command c = { }; @@ -2859,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .reg_read64 = nvme_pci_reg_read64, .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, + .subsystem_reset = nvme_pci_subsystem_reset, .get_address = nvme_pci_get_address, .print_device_info = nvme_pci_print_device_info, .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5c44c7c5c688..2eb33842f971 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2201,6 +2201,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_rdma_free_ctrl, .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3be67c98c906..a2a47d3ab99f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2662,6 +2662,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_tcp_free_ctrl, .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 27faae34245d..57e27e48c913 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -25,6 +25,9 @@ #define NVME_NSID_ALL 0xffffffff +/* Special NSSR value, 'NVMe' */ +#define NVME_SUBSYS_RESET 0x4E564D65 + enum nvme_subsys_type { /* Referral to another discovery type target subsystem */ NVME_NQN_DISC = 1, -- cgit v1.2.3-58-ga151 From cf546dd289e0f6d2594c25e2fb4e19ee67c6d988 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 27 May 2024 17:40:10 +0200 Subject: block: change rq_integrity_vec to respect the iterator If we allocate a bio that is larger than NVMe maximum request size, attach integrity metadata to it and send it to the NVMe subsystem, the integrity metadata will be corrupted. Splitting the bio works correctly. The function bio_split will clone the bio, trim the iterator of the first bio and advance the iterator of the second bio. However, the function rq_integrity_vec has a bug - it returns the first vector of the bio's metadata and completely disregards the metadata iterator that was advanced when the bio was split. Thus, the second bio uses the same metadata as the first bio and this leads to metadata corruption. This commit changes rq_integrity_vec, so that it calls mp_bvec_iter_bvec instead of returning the first vector. mp_bvec_iter_bvec reads the iterator and uses it to build a bvec for the current position in the iterator. The "queue_max_integrity_segments(rq->q) > 1" check was removed, because the updated rq_integrity_vec function works correctly with multiple segments. Signed-off-by: Mikulas Patocka Reviewed-by: Anuj Gupta Reviewed-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/49d1afaa-f934-6ed2-a678-e0d428c63a65@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 6 +++--- include/linux/blk-integrity.h | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 102a9fb0c65f..5d8035218de9 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct bio_vec bv = rq_integrity_vec(req); - iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), - rq_dma_dir(req), 0); + iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) return BLK_STS_IOERR; cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); @@ -967,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); + rq_integrity_vec(req).bv_len, rq_dma_dir(req)); } if (blk_rq_nr_phys_segments(req)) diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index d201140d77a3..0fdd62e6d4b0 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -90,14 +90,13 @@ static inline bool blk_integrity_rq(struct request *rq) } /* - * Return the first bvec that contains integrity data. Only drivers that are - * limited to a single integrity segment should use this helper. + * Return the current bvec that contains the integrity data. bip_iter may be + * advanced to iterate over the integrity data. */ -static inline struct bio_vec *rq_integrity_vec(struct request *rq) +static inline struct bio_vec rq_integrity_vec(struct request *rq) { - if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) - return NULL; - return rq->bio->bi_integrity->bip_vec; + return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec, + rq->bio->bi_integrity->bip_iter); } #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline int blk_rq_count_integrity_sg(struct request_queue *q, @@ -148,7 +147,8 @@ static inline int blk_integrity_rq(struct request *rq) static inline struct bio_vec *rq_integrity_vec(struct request *rq) { - return NULL; + /* the optimizer will remove all calls to this function */ + return (struct bio_vec){ }; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ -- cgit v1.2.3-58-ga151 From ec9b1cf0b0ebfb52274971a8a0e74e0a133f64fb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 16:26:24 +0200 Subject: block: rename BLK_FEAT_MISALIGNED This is a flag for ->flags and not a feature for ->features. And fix the one place that actually incorrectly cleared it from ->features. Signed-off-by: Christoph Hellwig Reviewed-by: John Garry Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240626142637.300624-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 18 +++++++++--------- include/linux/blkdev.h | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index ec7dbe93d5c3..ed39a55c5bae 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -351,7 +351,7 @@ static int blk_validate_limits(struct queue_limits *lim) if (lim->alignment_offset) { lim->alignment_offset &= (lim->physical_block_size - 1); - lim->features &= ~BLK_FEAT_MISALIGNED; + lim->flags &= ~BLK_FLAG_MISALIGNED; } if (!(lim->features & BLK_FEAT_WRITE_CACHE)) @@ -564,7 +564,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, if (!(b->features & BLK_FEAT_POLL)) t->features &= ~BLK_FEAT_POLL; - t->flags |= (b->flags & BLK_FEAT_MISALIGNED); + t->flags |= (b->flags & BLK_FLAG_MISALIGNED); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_user_sectors = min_not_zero(t->max_user_sectors, @@ -603,7 +603,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Verify that top and bottom intervals line up */ if (max(top, bottom) % min(top, bottom)) { - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } } @@ -625,28 +625,28 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } /* Minimum I/O a multiple of the physical block size? */ if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } /* Optimal I/O a multiple of the physical block size? */ if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } /* chunk_sectors a multiple of the physical block size? */ if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { t->chunk_sectors = 0; - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } @@ -656,7 +656,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, /* Verify that new alignment_offset is on a logical block boundary */ if (t->alignment_offset & (t->logical_block_size - 1)) { - t->flags |= BLK_FEAT_MISALIGNED; + t->flags |= BLK_FLAG_MISALIGNED; ret = -1; } @@ -809,7 +809,7 @@ int bdev_alignment_offset(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - if (q->limits.flags & BLK_FEAT_MISALIGNED) + if (q->limits.flags & BLK_FLAG_MISALIGNED) return -1; if (bdev_is_partition(bdev)) return queue_limit_alignment_offset(&q->limits, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b2f1362c4681..1a7e9d9c16d7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -347,7 +347,7 @@ enum { BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0), /* I/O topology is misaligned */ - BLK_FEAT_MISALIGNED = (1u << 1), + BLK_FLAG_MISALIGNED = (1u << 1), }; struct queue_limits { -- cgit v1.2.3-58-ga151 From fcf865e357f80285af12c0c9a49f89d71acb7f4b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 16:26:25 +0200 Subject: block: convert features and flags to __bitwise types ... and let sparse help us catch mismatches or abuses. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240626142637.300624-5-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 6 ++-- include/linux/blkdev.h | 85 +++++++++++++++++++++++++------------------------- 2 files changed, 46 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3a167abecdce..2e6d9b918127 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -288,7 +288,7 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) } static ssize_t queue_feature_store(struct request_queue *q, const char *page, - size_t count, unsigned int feature) + size_t count, blk_features_t feature) { struct queue_limits lim; unsigned long val; @@ -418,7 +418,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, static ssize_t queue_poll_show(struct request_queue *q, char *page) { - return queue_var_show(q->limits.features & BLK_FEAT_POLL, page); + return queue_var_show(!!(q->limits.features & BLK_FEAT_POLL), page); } static ssize_t queue_poll_store(struct request_queue *q, const char *page, @@ -492,7 +492,7 @@ static ssize_t queue_fua_show(struct request_queue *q, char *page) static ssize_t queue_dax_show(struct request_queue *q, char *page) { - return queue_var_show(blk_queue_dax(q), page); + return queue_var_show(!!blk_queue_dax(q), page); } #define QUEUE_RO_ENTRY(_prefix, _name) \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1a7e9d9c16d7..b37826b350a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -283,55 +283,56 @@ static inline bool blk_op_is_passthrough(blk_opf_t op) } /* flags set by the driver in queue_limits.features */ -enum { - /* supports a volatile write cache */ - BLK_FEAT_WRITE_CACHE = (1u << 0), +typedef unsigned int __bitwise blk_features_t; - /* supports passing on the FUA bit */ - BLK_FEAT_FUA = (1u << 1), +/* supports a volatile write cache */ +#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) - /* rotational device (hard drive or floppy) */ - BLK_FEAT_ROTATIONAL = (1u << 2), +/* supports passing on the FUA bit */ +#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) - /* contributes to the random number pool */ - BLK_FEAT_ADD_RANDOM = (1u << 3), +/* rotational device (hard drive or floppy) */ +#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) - /* do disk/partitions IO accounting */ - BLK_FEAT_IO_STAT = (1u << 4), +/* contributes to the random number pool */ +#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) - /* don't modify data until writeback is done */ - BLK_FEAT_STABLE_WRITES = (1u << 5), +/* do disk/partitions IO accounting */ +#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) - /* always completes in submit context */ - BLK_FEAT_SYNCHRONOUS = (1u << 6), +/* don't modify data until writeback is done */ +#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) - /* supports REQ_NOWAIT */ - BLK_FEAT_NOWAIT = (1u << 7), +/* always completes in submit context */ +#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) - /* supports DAX */ - BLK_FEAT_DAX = (1u << 8), +/* supports REQ_NOWAIT */ +#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) - /* supports I/O polling */ - BLK_FEAT_POLL = (1u << 9), +/* supports DAX */ +#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) - /* is a zoned device */ - BLK_FEAT_ZONED = (1u << 10), +/* supports I/O polling */ +#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) - /* supports Zone Reset All */ - BLK_FEAT_ZONE_RESETALL = (1u << 11), +/* is a zoned device */ +#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) - /* supports PCI(e) p2p requests */ - BLK_FEAT_PCI_P2PDMA = (1u << 12), +/* supports Zone Reset All */ +#define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11)) - /* skip this queue in blk_mq_(un)quiesce_tagset */ - BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13), +/* supports PCI(e) p2p requests */ +#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) - /* bounce all highmem pages */ - BLK_FEAT_BOUNCE_HIGH = (1u << 14), +/* skip this queue in blk_mq_(un)quiesce_tagset */ +#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) - /* undocumented magic for bcache */ - BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15), -}; +/* bounce all highmem pages */ +#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) + +/* undocumented magic for bcache */ +#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ + ((__force blk_features_t)(1u << 15)) /* * Flags automatically inherited when stacking limits. @@ -342,17 +343,17 @@ enum { BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) /* internal flags in queue_limits.flags */ -enum { - /* do not send FLUSH/FUA commands despite advertising a write cache */ - BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0), +typedef unsigned int __bitwise blk_flags_t; - /* I/O topology is misaligned */ - BLK_FLAG_MISALIGNED = (1u << 1), -}; +/* do not send FLUSH/FUA commands despite advertising a write cache */ +#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) + +/* I/O topology is misaligned */ +#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) struct queue_limits { - unsigned int features; - unsigned int flags; + blk_features_t features; + blk_flags_t flags; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; -- cgit v1.2.3-58-ga151 From 73781b3b81e76583708a652c853d54d03dce031d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 16:26:27 +0200 Subject: block: remove disk_update_readahead Mark blk_apply_bdi_limits non-static and open code disk_update_readahead in the only caller. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240626142637.300624-7-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 8 +------- block/blk.h | 2 ++ block/genhd.c | 2 +- include/linux/blkdev.h | 1 - 4 files changed, 4 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index c2221b7406d4..c692e80bb4f8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -55,7 +55,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) } EXPORT_SYMBOL(blk_set_stacking_limits); -static void blk_apply_bdi_limits(struct backing_dev_info *bdi, +void blk_apply_bdi_limits(struct backing_dev_info *bdi, struct queue_limits *lim) { /* @@ -434,12 +434,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim) } EXPORT_SYMBOL_GPL(queue_limits_set); -void disk_update_readahead(struct gendisk *disk) -{ - blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); -} -EXPORT_SYMBOL_GPL(disk_update_readahead); - /** * blk_limits_io_min - set minimum request size for a device * @limits: the queue limits diff --git a/block/blk.h b/block/blk.h index d0a986d8ee50..95e5a4f81693 100644 --- a/block/blk.h +++ b/block/blk.h @@ -358,6 +358,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); int blk_set_default_limits(struct queue_limits *lim); +void blk_apply_bdi_limits(struct backing_dev_info *bdi, + struct queue_limits *lim); int blk_dev_init(void); /* diff --git a/block/genhd.c b/block/genhd.c index 8f1f3c6b4d67..4dc95a463505 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -524,7 +524,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor); } - disk_update_readahead(disk); + blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); disk_add_events(disk); set_bit(GD_ADDED, &disk->state); return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b37826b350a2..6b88382012e9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -973,7 +973,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q) /* * Access functions for manipulating queue properties */ -void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); -- cgit v1.2.3-58-ga151 From abfc9d810926dfbf5645c7755c8d5ab96273f27d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 16:26:28 +0200 Subject: block: remove the fallback case in queue_dma_alignment Now that all updates go through blk_validate_limits the default of 511 is set at initialization time. Also remove the unused NULL check as calling this helper on a NULL queue can't happen (and doesn't make much sense to start with). Signed-off-by: Christoph Hellwig Reviewed-by: John Garry Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240626142637.300624-8-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6b88382012e9..94fcbc912312 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1394,7 +1394,7 @@ static inline bool bdev_is_zone_start(struct block_device *bdev, static inline int queue_dma_alignment(const struct request_queue *q) { - return q ? q->limits.dma_alignment : 511; + return q->limits.dma_alignment; } static inline unsigned int -- cgit v1.2.3-58-ga151 From e94b45d08b5d1c230c0f59c3eed758d28658851e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 16:26:29 +0200 Subject: block: move dma_pad_mask into queue_limits dma_pad_mask is a queue_limits by all ways of looking at it, so move it there and set it through the atomic queue limits APIs. Add a little helper that takes the alignment and pad into account to simplify the code that is touched a bit. Note that there never was any need for the > check in blk_queue_update_dma_pad, this probably was just copy and paste from dma_update_dma_alignment. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240626142637.300624-9-hch@lst.de Signed-off-by: Jens Axboe --- block/bio-integrity.c | 2 +- block/blk-map.c | 2 +- block/blk-settings.c | 17 ----------------- drivers/ata/libata-scsi.c | 3 +-- drivers/ata/pata_macio.c | 4 ++-- drivers/scsi/scsi_lib.c | 4 ++-- drivers/ufs/core/ufshcd.c | 10 ++++++---- include/linux/blkdev.h | 12 ++++++++---- 8 files changed, 21 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 173ffd4d6237..356ca0d3d62f 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -312,7 +312,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, u32 seed) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); - unsigned int align = q->dma_pad_mask | queue_dma_alignment(q); + unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits); struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages; struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec; unsigned int direction, nr_bvecs; diff --git a/block/blk-map.c b/block/blk-map.c index 71210cdb3442..bce144091128 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -634,7 +634,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, const struct iov_iter *iter, gfp_t gfp_mask) { bool copy = false, map_bvec = false; - unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); + unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits); struct bio *bio = NULL; struct iov_iter i; int ret = -EINVAL; diff --git a/block/blk-settings.c b/block/blk-settings.c index c692e80bb4f8..2e559cf97cc8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -768,23 +768,6 @@ incompatible: } EXPORT_SYMBOL_GPL(queue_limits_stack_integrity); -/** - * blk_queue_update_dma_pad - update pad mask - * @q: the request queue for the device - * @mask: pad mask - * - * Update dma pad mask. - * - * Appending pad buffer to a request modifies the last entry of a - * scatter list such that it includes the pad buffer. - **/ -void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) -{ - if (mask > q->dma_pad_mask) - q->dma_pad_mask = mask; -} -EXPORT_SYMBOL(blk_queue_update_dma_pad); - /** * blk_set_queue_depth - tell the block layer about the device queue depth * @q: the request queue for the device diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cdf29b178ddc..682971c4cbe4 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1024,7 +1024,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, struct ata_device *dev) { - struct request_queue *q = sdev->request_queue; int depth = 1; if (!ata_id_has_unload(dev->id)) @@ -1038,7 +1037,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, sdev->sector_size = ATA_SECT_SIZE; /* set DMA padding */ - blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); + lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1; /* make room for appending the drain */ lim->max_segments--; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 3cb455a32d92..1b85e8bf4ef9 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -816,7 +816,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, /* OHare has issues with non cache aligned DMA on some chipsets */ if (priv->kind == controller_ohare) { lim->dma_alignment = 31; - blk_queue_update_dma_pad(sdev->request_queue, 31); + lim->dma_pad_mask = 31; /* Tell the world about it */ ata_dev_info(dev, "OHare alignment limits applied\n"); @@ -831,7 +831,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { /* Allright these are bad, apply restrictions */ lim->dma_alignment = 15; - blk_queue_update_dma_pad(sdev->request_queue, 15); + lim->dma_pad_mask = 15; /* We enable MWI and hack cache line size directly here, this * is specific to this chipset and not normal values, we happen diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e2f7bfb2b9e4..3958a6d14bf4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1139,9 +1139,9 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) */ count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0cf07194bbe8..b7957a431589 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5193,17 +5193,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) } /** - * ufshcd_slave_configure - adjust SCSI device configurations + * ufshcd_device_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device + * @lim: queue limits * * Return: 0 (success). */ -static int ufshcd_slave_configure(struct scsi_device *sdev) +static int ufshcd_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; - blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1; /* * Block runtime-pm until all consumers are added. @@ -8907,7 +8909,7 @@ static const struct scsi_host_template ufshcd_driver_template = { .queuecommand = ufshcd_queuecommand, .mq_poll = ufshcd_poll, .slave_alloc = ufshcd_slave_alloc, - .slave_configure = ufshcd_slave_configure, + .device_configure = ufshcd_device_configure, .slave_destroy = ufshcd_slave_destroy, .change_queue_depth = ufshcd_change_queue_depth, .eh_abort_handler = ufshcd_abort, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 94fcbc912312..a53e3434e1a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -401,6 +401,7 @@ struct queue_limits { * due to possible offsets. */ unsigned int dma_alignment; + unsigned int dma_pad_mask; struct blk_integrity integrity; }; @@ -509,8 +510,6 @@ struct request_queue { */ int id; - unsigned int dma_pad_mask; - /* * queue settings */ @@ -981,7 +980,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx); -extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); struct blk_independent_access_ranges * @@ -1433,10 +1431,16 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev, bdev_logical_block_size(bdev) - 1); } +static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) +{ + return lim->dma_alignment | lim->dma_pad_mask; +} + static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, unsigned int len) { - unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); + return !(addr & alignment) && !(len & alignment); } -- cgit v1.2.3-58-ga151 From 69b6517687a4b1fb250bd8c9c193a0a304c8ba17 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 26 Jun 2024 19:01:58 -0600 Subject: block: use the right type for stub rq_integrity_vec() For !CONFIG_BLK_DEV_INTEGRITY, rq_integrity_vec() wasn't updated properly. Fix it up. Fixes: cf546dd289e0 ("block: change rq_integrity_vec to respect the iterator") Signed-off-by: Jens Axboe --- include/linux/blk-integrity.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 0fdd62e6d4b0..c58634924782 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -145,7 +145,7 @@ static inline int blk_integrity_rq(struct request *rq) return 0; } -static inline struct bio_vec *rq_integrity_vec(struct request *rq) +static inline struct bio_vec rq_integrity_vec(struct request *rq) { /* the optimizer will remove all calls to this function */ return (struct bio_vec){ }; -- cgit v1.2.3-58-ga151 From 63db4a1f795a19e4e12f036a12a5f61c48b03e5c Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 27 Jun 2024 16:07:35 +0000 Subject: block: Delete blk_queue_flag_test_and_set() Since commit 70200574cc22 ("block: remove QUEUE_FLAG_DISCARD"), blk_queue_flag_test_and_set() has not been used, so delete it. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240627160735.842189-1-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-core.c | 14 -------------- include/linux/blkdev.h | 1 - 2 files changed, 15 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 6fc1a5a1980d..71b7622c523a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -94,20 +94,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) } EXPORT_SYMBOL(blk_queue_flag_clear); -/** - * blk_queue_flag_test_and_set - atomically test and set a queue flag - * @flag: flag to be set - * @q: request queue - * - * Returns the previous value of @flag - 0 if the flag was not set and 1 if - * the flag was already set. - */ -bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) -{ - return test_and_set_bit(flag, &q->queue_flags); -} -EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); - #define REQ_OP_NAME(name) [REQ_OP_##name] = #name static const char *const blk_op_name[] = { REQ_OP_NAME(READ), diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a53e3434e1a2..53c41ef4222c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -609,7 +609,6 @@ struct request_queue { void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) -- cgit v1.2.3-58-ga151 From d19b46340b3c0ea66bef0f6c58876cc085813ba8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 26 Jun 2024 06:59:38 +0200 Subject: block: remove bio_integrity_process Move the bvec interation into the generate/verify helpers to avoid a bit of argument passing churn. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240626045950.189758-6-hch@lst.de Signed-off-by: Jens Axboe --- block/bio-integrity.c | 47 +-------------------- block/blk.h | 7 +--- block/t10-pi.c | 97 +++++++++++++++++++++++++++++++++---------- include/linux/blk-integrity.h | 9 ---- 4 files changed, 79 insertions(+), 81 deletions(-) (limited to 'include') diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 2f8a46170034..ad296849aa2a 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -374,44 +374,6 @@ free_bvec: } EXPORT_SYMBOL_GPL(bio_integrity_map_user); -/** - * bio_integrity_process - Process integrity metadata for a bio - * @bio: bio to generate/verify integrity metadata for - * @proc_iter: iterator to process - */ -static blk_status_t bio_integrity_process(struct bio *bio, - struct bvec_iter *proc_iter) -{ - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); - struct blk_integrity_iter iter; - struct bvec_iter bviter; - struct bio_vec bv; - struct bio_integrity_payload *bip = bio_integrity(bio); - blk_status_t ret = BLK_STS_OK; - - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; - iter.interval = 1 << bi->interval_exp; - iter.seed = proc_iter->bi_sector; - iter.prot_buf = bvec_virt(bip->bip_vec); - - __bio_for_each_segment(bv, bio, bviter, *proc_iter) { - void *kaddr = bvec_kmap_local(&bv); - - iter.data_buf = kaddr; - iter.data_size = bv.bv_len; - if (bio_data_dir(bio) == WRITE) - blk_integrity_generate(&iter, bi); - else - ret = blk_integrity_verify(&iter, bi); - kunmap_local(kaddr); - - if (ret) - break; - - } - return ret; -} - /** * bio_integrity_prep - Prepare bio for integrity I/O * @bio: bio to prepare @@ -490,7 +452,7 @@ bool bio_integrity_prep(struct bio *bio) /* Auto-generate integrity metadata if this is a write */ if (bio_data_dir(bio) == WRITE) - bio_integrity_process(bio, &bio->bi_iter); + blk_integrity_generate(bio); else bip->bio_iter = bio->bi_iter; return true; @@ -516,12 +478,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; - /* - * At the moment verify is called bio's iterator was advanced - * during split and completion, we need to rewind iterator to - * it's original position. - */ - bio->bi_status = bio_integrity_process(bio, &bip->bio_iter); + blk_integrity_verify(bio); bio_integrity_free(bio); bio_endio(bio); } diff --git a/block/blk.h b/block/blk.h index 95e5a4f81693..47dadd2439b1 100644 --- a/block/blk.h +++ b/block/blk.h @@ -9,7 +9,6 @@ #include #include "blk-crypto-internal.h" -struct blk_integrity_iter; struct elevator_type; /* Max future timer expiry for timeouts */ @@ -681,10 +680,8 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops, struct file *bdev_file); int bdev_permission(dev_t dev, blk_mode_t mode, void *holder); -void blk_integrity_generate(struct blk_integrity_iter *iter, - struct blk_integrity *bi); -blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, - struct blk_integrity *bi); +void blk_integrity_generate(struct bio *bio); +void blk_integrity_verify(struct bio *bio); void blk_integrity_prepare(struct request *rq); void blk_integrity_complete(struct request *rq, unsigned int nr_bytes); diff --git a/block/t10-pi.c b/block/t10-pi.c index cd7fa60d63ff..425e2836f3e1 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -13,6 +13,15 @@ #include #include "blk.h" +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len, unsigned char csum_type) { @@ -364,33 +373,77 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) } } -void blk_integrity_generate(struct blk_integrity_iter *iter, - struct blk_integrity *bi) +void blk_integrity_generate(struct bio *bio) { - switch (bi->csum_type) { - case BLK_INTEGRITY_CSUM_CRC64: - ext_pi_crc64_generate(iter, bi); - break; - case BLK_INTEGRITY_CSUM_CRC: - case BLK_INTEGRITY_CSUM_IP: - t10_pi_generate(iter, bi); - break; - default: - break; + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity_iter iter; + struct bvec_iter bviter; + struct bio_vec bv; + + iter.disk_name = bio->bi_bdev->bd_disk->disk_name; + iter.interval = 1 << bi->interval_exp; + iter.seed = bio->bi_iter.bi_sector; + iter.prot_buf = bvec_virt(bip->bip_vec); + bio_for_each_segment(bv, bio, bviter) { + void *kaddr = bvec_kmap_local(&bv); + + iter.data_buf = kaddr; + iter.data_size = bv.bv_len; + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + ext_pi_crc64_generate(&iter, bi); + break; + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + t10_pi_generate(&iter, bi); + break; + default: + break; + } + kunmap_local(kaddr); } } -blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, - struct blk_integrity *bi) +void blk_integrity_verify(struct bio *bio) { - switch (bi->csum_type) { - case BLK_INTEGRITY_CSUM_CRC64: - return ext_pi_crc64_verify(iter, bi); - case BLK_INTEGRITY_CSUM_CRC: - case BLK_INTEGRITY_CSUM_IP: - return t10_pi_verify(iter, bi); - default: - return BLK_STS_OK; + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity_iter iter; + struct bvec_iter bviter; + struct bio_vec bv; + + /* + * At the moment verify is called bi_iter has been advanced during split + * and completion, so use the copy created during submission here. + */ + iter.disk_name = bio->bi_bdev->bd_disk->disk_name; + iter.interval = 1 << bi->interval_exp; + iter.seed = bip->bio_iter.bi_sector; + iter.prot_buf = bvec_virt(bip->bip_vec); + __bio_for_each_segment(bv, bio, bviter, bip->bio_iter) { + void *kaddr = bvec_kmap_local(&bv); + blk_status_t ret = BLK_STS_OK; + + iter.data_buf = kaddr; + iter.data_size = bv.bv_len; + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + ret = ext_pi_crc64_verify(&iter, bi); + break; + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + ret = t10_pi_verify(&iter, bi); + break; + default: + break; + } + kunmap_local(kaddr); + + if (ret) { + bio->bi_status = ret; + return; + } } } diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index c58634924782..804f856ed3e5 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -14,15 +14,6 @@ enum blk_integrity_flags { BLK_INTEGRITY_STACKED = 1 << 4, }; -struct blk_integrity_iter { - void *prot_buf; - void *data_buf; - sector_t seed; - unsigned int data_size; - unsigned short interval; - const char *disk_name; -}; - const char *blk_integrity_profile_name(struct blk_integrity *bi); bool queue_limits_stack_integrity(struct queue_limits *t, struct queue_limits *b); -- cgit v1.2.3-58-ga151 From aa6ff4eb7c10d9a6532db3ea9e78124bf14e70ae Mon Sep 17 00:00:00 2001 From: Dongliang Cui Date: Fri, 14 Jun 2024 15:49:36 +0800 Subject: block: Add ioprio to block_rq tracepoint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes we need to track the processing order of requests with ioprio set. So the ioprio of request can be useful information. Example: block_rq_insert: 8,0 RA 16384 () 6500840 + 32 be,0,6 [binder:815_3] block_rq_issue: 8,0 RA 16384 () 6500840 + 32 be,0,6 [binder:815_3] block_rq_complete: 8,0 RA () 6500840 + 32 be,0,6 [0] Signed-off-by: Dongliang Cui Reviewed-by: Bart Van Assche Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20240614074936.113659-1-dongliang.cui@unisoc.com Signed-off-by: Jens Axboe --- include/trace/events/block.h | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 0e128ad51460..1527d5d45e01 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -9,9 +9,17 @@ #include #include #include +#include #define RWBS_LEN 8 +#define IOPRIO_CLASS_STRINGS \ + { IOPRIO_CLASS_NONE, "none" }, \ + { IOPRIO_CLASS_RT, "rt" }, \ + { IOPRIO_CLASS_BE, "be" }, \ + { IOPRIO_CLASS_IDLE, "idle" }, \ + { IOPRIO_CLASS_INVALID, "invalid"} + #ifdef CONFIG_BUFFER_HEAD DECLARE_EVENT_CLASS(block_buffer, @@ -82,6 +90,7 @@ TRACE_EVENT(block_rq_requeue, __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) + __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), @@ -90,16 +99,20 @@ TRACE_EVENT(block_rq_requeue, __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); + __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), - TP_printk("%d,%d %s (%s) %llu + %u [%d]", + TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, 0) + (unsigned long long)__entry->sector, __entry->nr_sector, + __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), + IOPRIO_CLASS_STRINGS), + IOPRIO_PRIO_HINT(__entry->ioprio), + IOPRIO_PRIO_LEVEL(__entry->ioprio), 0) ); DECLARE_EVENT_CLASS(block_rq_completion, @@ -113,6 +126,7 @@ DECLARE_EVENT_CLASS(block_rq_completion, __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int , error ) + __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), @@ -122,16 +136,20 @@ DECLARE_EVENT_CLASS(block_rq_completion, __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->error = blk_status_to_errno(error); + __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), - TP_printk("%d,%d %s (%s) %llu + %u [%d]", + TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->error) + (unsigned long long)__entry->sector, __entry->nr_sector, + __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), + IOPRIO_CLASS_STRINGS), + IOPRIO_PRIO_HINT(__entry->ioprio), + IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error) ); /** @@ -180,6 +198,7 @@ DECLARE_EVENT_CLASS(block_rq, __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) + __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, 1 ) @@ -190,17 +209,21 @@ DECLARE_EVENT_CLASS(block_rq, __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); + __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), - TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", + TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __entry->bytes, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + (unsigned long long)__entry->sector, __entry->nr_sector, + __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), + IOPRIO_CLASS_STRINGS), + IOPRIO_PRIO_HINT(__entry->ioprio), + IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm) ); /** -- cgit v1.2.3-58-ga151 From 5476394aa9f27d670dd2bac426fdb6ac12b12cb3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Jun 2024 13:14:01 +0200 Subject: block: simplify queue_logical_block_size queue_logical_block_size is never called with a 0 queue, and the logical_block_size field in queue_limits is always initialized for a live queue. Signed-off-by: Christoph Hellwig Reviewed-by: John Garry Link: https://lore.kernel.org/r/20240627111407.476276-2-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 53c41ef4222c..4d0d4b83bc74 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1227,12 +1227,7 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev) static inline unsigned queue_logical_block_size(const struct request_queue *q) { - int retval = 512; - - if (q && q->limits.logical_block_size) - retval = q->limits.logical_block_size; - - return retval; + return q->limits.logical_block_size; } static inline unsigned int bdev_logical_block_size(struct block_device *bdev) -- cgit v1.2.3-58-ga151 From 162e06871e6dcde861ef608e0c00a8b6a2d35d43 Mon Sep 17 00:00:00 2001 From: Anuj Gupta Date: Thu, 4 Jul 2024 11:45:15 +0530 Subject: block: t10-pi: Return correct ref tag when queue has no integrity profile Commit c6e56cf6b2e7 ("block: move integrity information into queue_limits") changed the ref tag calculation logic. It would break if there is no integrity profile. This in turn causes read/write failures for such cases. Fixes: c6e56cf6b2e7 ("block: move integrity information into queue_limits") Signed-off-by: Anuj Gupta Link: https://lore.kernel.org/r/20240704061515.282343-1-joshi.k@samsung.com Signed-off-by: Jens Axboe --- include/linux/t10-pi.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 1773610010eb..2c59fe3efcd4 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -39,8 +39,11 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { - unsigned int shift = rq->q->limits.integrity.interval_exp; + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + rq->q->limits.integrity.interval_exp) + shift = rq->q->limits.integrity.interval_exp; return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } @@ -61,8 +64,11 @@ static inline u64 lower_48_bits(u64 n) static inline u64 ext_pi_ref_tag(struct request *rq) { - unsigned int shift = rq->q->limits.integrity.interval_exp; + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + rq->q->limits.integrity.interval_exp) + shift = rq->q->limits.integrity.interval_exp; return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); } -- cgit v1.2.3-58-ga151 From 81e7706345f06e1e97a092f59697b7e20a0ee868 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 4 Jul 2024 14:28:14 +0900 Subject: dm: handle REQ_OP_ZONE_RESET_ALL This commit implements processing of the REQ_OP_ZONE_RESET_ALL operation for zoned mapped devices. Given that this operation always has a BIO sector of 0 and a 0 size, processing through the regular BIO __split_and_process_bio() function does not work because this function would always select the first target. Instead, handling of this operation is implemented using the function __send_zone_reset_all(). Similarly to the __send_empty_flush() function, the new __send_zone_reset_all() function manually goes through all targets of a mapped device table doing the following: 1) If the target can natively support REQ_OP_ZONE_RESET_ALL, __send_duplicate_bios() is used to forward the reset all operation to the target. This case is handled with the __send_zone_reset_all_native() function. 2) For other targets, the function __send_zone_reset_all_emulated() is executed to emulate the execution of REQ_OP_ZONE_RESET_ALL using regular REQ_OP_ZONE_RESET operations. Targets that can natively support REQ_OP_ZONE_RESET_ALL are identified using the new target field zone_reset_all_supported. This boolean is set to true in for targets that have reliable zone limits, that is, targets that map all sequential write required zones of their zoned device(s). Setting this field is handled in dm_set_zones_restrictions() and device_get_zone_resource_limits(). For targets with unreliable zone limits, REQ_OP_ZONE_RESET_ALL must be emulated (case 2 above). This is implemented with __send_zone_reset_all_emulated() and is similar to the block layer function blkdev_zone_reset_all_emulated(): first a report zones is done for the zones of the target to identify zones that need reset, that is, any sequential write required zone that is not already empty. This is done using a bitmap and the function dm_zone_get_reset_bitmap() which sets to 1 the bit corresponding to a zone that needs reset. Next, this zone bitmap is inspected and a clone BIO modified to use the REQ_OP_ZONE_RESET operation issued for any zone with its bit set in the zone bitmap. This implementation is more efficient than what the block layer does with blkdev_zone_reset_all_emulated(), which is always used for DM zoned devices currently: as we can natively use REQ_OP_ZONE_RESET_ALL on targets mapping all sequential write required zones, resetting all zones of a zoned mapped device can be much faster compared to always emulating this operation using regular per-zone reset. In the worst case, this implementation is as-efficient as the block layer emulation. This reduction in the time it takes to reset all zones of a zoned mapped device depends directly on the mapped device targets mapping (reliable zone limits or not). Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240704052816.623865-4-dlemoal@kernel.org Signed-off-by: Jens Axboe --- drivers/md/dm-zone.c | 50 +++++++++++++++- drivers/md/dm.c | 135 +++++++++++++++++++++++++++++++++++++++++- drivers/md/dm.h | 3 + include/linux/device-mapper.h | 7 +++ 4 files changed, 190 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 4d37e53b50ee..c0d41c36e06e 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -292,10 +292,12 @@ static int device_get_zone_resource_limits(struct dm_target *ti, /* * If the target does not map all sequential zones, the limits - * will not be reliable. + * will not be reliable and we cannot use REQ_OP_ZONE_RESET_ALL. */ - if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) + if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) { zlim->reliable_limits = false; + ti->zone_reset_all_supported = false; + } /* * If the target maps less sequential zones than the limit values, then @@ -353,6 +355,14 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); + /* + * Assume that the target can accept REQ_OP_ZONE_RESET_ALL. + * device_get_zone_resource_limits() may adjust this if one of + * the device used by the target does not have all its + * sequential write required zones mapped. + */ + ti->zone_reset_all_supported = true; + if (!ti->type->iterate_devices || ti->type->iterate_devices(ti, device_get_zone_resource_limits, &zlim)) { @@ -420,3 +430,39 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) return; } + +static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, + void *data) +{ + /* + * For an all-zones reset, ignore conventional, empty, read-only + * and offline zones. + */ + switch (zone->cond) { + case BLK_ZONE_COND_NOT_WP: + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_READONLY: + case BLK_ZONE_COND_OFFLINE: + return 0; + default: + set_bit(idx, (unsigned long *)data); + return 0; + } +} + +int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, + sector_t sector, unsigned int nr_zones, + unsigned long *need_reset) +{ + int ret; + + ret = dm_blk_do_report_zones(md, t, sector, nr_zones, + dm_zone_need_reset_cb, need_reset); + if (ret != nr_zones) { + DMERR("Get %s zone reset bitmap failed\n", + md->disk->disk_name); + return -EIO; + } + + return 0; +} diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0d80caccbd9e..4b1b69e576a5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1606,6 +1606,7 @@ static bool is_abnormal_io(struct bio *bio) case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: + case REQ_OP_ZONE_RESET_ALL: return true; default: return false; @@ -1774,6 +1775,119 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) { return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0); } + +static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, + struct dm_target *ti) +{ + struct bio_list blist = BIO_EMPTY_LIST; + struct mapped_device *md = ci->io->md; + unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; + unsigned long *need_reset; + unsigned int i, nr_zones, nr_reset; + unsigned int num_bios = 0; + blk_status_t sts = BLK_STS_OK; + sector_t sector = ti->begin; + struct bio *clone; + int ret; + + nr_zones = ti->len >> ilog2(zone_sectors); + need_reset = bitmap_zalloc(nr_zones, GFP_NOIO); + if (!need_reset) + return BLK_STS_RESOURCE; + + ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, + nr_zones, need_reset); + if (ret) { + sts = BLK_STS_IOERR; + goto free_bitmap; + } + + /* If we have no zone to reset, we are done. */ + nr_reset = bitmap_weight(need_reset, nr_zones); + if (!nr_reset) + goto free_bitmap; + + atomic_add(nr_zones, &ci->io->io_count); + + for (i = 0; i < nr_zones; i++) { + + if (!test_bit(i, need_reset)) { + sector += zone_sectors; + continue; + } + + if (bio_list_empty(&blist)) { + /* This may take a while, so be nice to others */ + if (num_bios) + cond_resched(); + + /* + * We may need to reset thousands of zones, so let's + * not go crazy with the clone allocation. + */ + alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32), + NULL, GFP_NOIO); + } + + /* Get a clone and change it to a regular reset operation. */ + clone = bio_list_pop(&blist); + clone->bi_opf &= ~REQ_OP_MASK; + clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; + clone->bi_iter.bi_sector = sector; + clone->bi_iter.bi_size = 0; + __map_bio(clone); + + sector += zone_sectors; + num_bios++; + nr_reset--; + } + + WARN_ON_ONCE(!bio_list_empty(&blist)); + atomic_sub(nr_zones - num_bios, &ci->io->io_count); + ci->sector_count = 0; + +free_bitmap: + bitmap_free(need_reset); + + return sts; +} + +static void __send_zone_reset_all_native(struct clone_info *ci, + struct dm_target *ti) +{ + unsigned int bios; + + atomic_add(1, &ci->io->io_count); + bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO); + atomic_sub(1 - bios, &ci->io->io_count); + + ci->sector_count = 0; +} + +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + struct dm_table *t = ci->map; + blk_status_t sts = BLK_STS_OK; + + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (ti->zone_reset_all_supported) { + __send_zone_reset_all_native(ci, ti); + continue; + } + + sts = __send_zone_reset_all_emulated(ci, ti); + if (sts != BLK_STS_OK) + break; + } + + /* Release the reference that alloc_io() took for submission. */ + atomic_sub(1, &ci->io->io_count); + + return sts; +} + #else static inline bool dm_zone_bio_needs_split(struct mapped_device *md, struct bio *bio) @@ -1784,6 +1898,10 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) { return false; } +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + return BLK_STS_NOTSUPP; +} #endif /* @@ -1797,9 +1915,14 @@ static void dm_split_and_process_bio(struct mapped_device *md, blk_status_t error = BLK_STS_OK; bool is_abnormal, need_split; - need_split = is_abnormal = is_abnormal_io(bio); - if (static_branch_unlikely(&zoned_enabled)) - need_split = is_abnormal || dm_zone_bio_needs_split(md, bio); + is_abnormal = is_abnormal_io(bio); + if (static_branch_unlikely(&zoned_enabled)) { + /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */ + need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) && + (is_abnormal || dm_zone_bio_needs_split(md, bio)); + } else { + need_split = is_abnormal; + } if (unlikely(need_split)) { /* @@ -1840,6 +1963,12 @@ static void dm_split_and_process_bio(struct mapped_device *md, goto out; } + if (static_branch_unlikely(&zoned_enabled) && + (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { + error = __send_zone_reset_all(&ci); + goto out; + } + error = __split_and_process_bio(&ci); if (error || !ci.sector_count) goto out; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index c984ecb64b1e..cc466ad5cb1d 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -110,6 +110,9 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); bool dm_is_zone_write(struct mapped_device *md, struct bio *bio); int dm_zone_map_bio(struct dm_target_io *io); +int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, + sector_t sector, unsigned int nr_zones, + unsigned long *need_reset); #else #define dm_blk_report_zones NULL static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 82b2195efaca..15d28164bbbd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -357,6 +357,13 @@ struct dm_target { */ bool discards_supported:1; + /* + * Automatically set by dm-core if this target supports + * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated + * using REQ_OP_ZONE_RESET. Target drivers must not set this manually. + */ + bool zone_reset_all_supported:1; + /* * Set if this target requires that discards be split on * 'max_discard_sectors' boundaries. -- cgit v1.2.3-58-ga151 From f2a7bea23710fceb99dac6da4ef82c3cc8932f7f Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 4 Jul 2024 14:28:15 +0900 Subject: block: Remove REQ_OP_ZONE_RESET_ALL emulation Now that device mapper can handle resetting all zones of a mapped zoned device using REQ_OP_ZONE_RESET_ALL, all zoned block device drivers support this operation. With this, the request queue feature BLK_FEAT_ZONE_RESETALL is not necessary and the emulation code in blk-zone.c can be removed. Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240704052816.623865-5-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-core.c | 5 +-- block/blk-zoned.c | 76 ++---------------------------------------- drivers/block/null_blk/zoned.c | 2 +- drivers/block/ublk_drv.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/nvme/host/zns.c | 2 +- drivers/scsi/sd_zbc.c | 2 +- include/linux/blkdev.h | 5 --- 8 files changed, 9 insertions(+), 87 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 71b7622c523a..02bceeb36f2c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -830,11 +830,8 @@ void submit_bio_noacct(struct bio *bio) case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: - if (!bdev_is_zoned(bio->bi_bdev)) - goto not_supported; - break; case REQ_OP_ZONE_RESET_ALL: - if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q)) + if (!bdev_is_zoned(bio->bi_bdev)) goto not_supported; break; case REQ_OP_DRV_IN: diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 601c21a224c9..0007ef9cd5ca 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -157,70 +157,6 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node, GFP_NOIO, node); } -static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, - void *data) -{ - /* - * For an all-zones reset, ignore conventional, empty, read-only - * and offline zones. - */ - switch (zone->cond) { - case BLK_ZONE_COND_NOT_WP: - case BLK_ZONE_COND_EMPTY: - case BLK_ZONE_COND_READONLY: - case BLK_ZONE_COND_OFFLINE: - return 0; - default: - set_bit(idx, (unsigned long *)data); - return 0; - } -} - -static int blkdev_zone_reset_all_emulated(struct block_device *bdev) -{ - struct gendisk *disk = bdev->bd_disk; - sector_t capacity = bdev_nr_sectors(bdev); - sector_t zone_sectors = bdev_zone_sectors(bdev); - unsigned long *need_reset; - struct bio *bio = NULL; - sector_t sector = 0; - int ret; - - need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones); - if (!need_reset) - return -ENOMEM; - - ret = disk->fops->report_zones(disk, 0, disk->nr_zones, - blk_zone_need_reset_cb, need_reset); - if (ret < 0) - goto out_free_need_reset; - - ret = 0; - while (sector < capacity) { - if (!test_bit(disk_zone_no(disk, sector), need_reset)) { - sector += zone_sectors; - continue; - } - - bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC, - GFP_KERNEL); - bio->bi_iter.bi_sector = sector; - sector += zone_sectors; - - /* This may take a while, so be nice to others */ - cond_resched(); - } - - if (bio) { - ret = submit_bio_wait(bio); - bio_put(bio); - } - -out_free_need_reset: - kfree(need_reset); - return ret; -} - static int blkdev_zone_reset_all(struct block_device *bdev) { struct bio bio; @@ -247,7 +183,6 @@ static int blkdev_zone_reset_all(struct block_device *bdev) int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sector, sector_t nr_sectors) { - struct request_queue *q = bdev_get_queue(bdev); sector_t zone_sectors = bdev_zone_sectors(bdev); sector_t capacity = bdev_nr_sectors(bdev); sector_t end_sector = sector + nr_sectors; @@ -275,16 +210,11 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, return -EINVAL; /* - * In the case of a zone reset operation over all zones, - * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this - * command. For other devices, we emulate this command behavior by - * identifying the zones needing a reset. + * In the case of a zone reset operation over all zones, use + * REQ_OP_ZONE_RESET_ALL. */ - if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { - if (!blk_queue_zone_resetall(q)) - return blkdev_zone_reset_all_emulated(bdev); + if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) return blkdev_zone_reset_all(bdev); - } while (sector < end_sector) { bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 7996e2e7dce2..9bc768b2ca56 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -164,7 +164,7 @@ int null_init_zoned_dev(struct nullb_device *dev, sector += dev->zone_size_sects; } - lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; + lim->features |= BLK_FEAT_ZONED; lim->chunk_sectors = dev->zone_size_sects; lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4fdff13fc23b..d10a2ea07292 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2194,7 +2194,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return -EOPNOTSUPP; - lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; + lim.features |= BLK_FEAT_ZONED; lim.max_active_zones = p->max_active_zones; lim.max_open_zones = p->max_open_zones; lim.max_zone_append_sectors = p->max_zone_append_sectors; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6c64a67ab9c9..84c3efd0c611 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; + lim->features |= BLK_FEAT_ZONED; virtio_cread(vdev, struct virtio_blk_config, zoned.max_open_zones, &v); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 99bb89c2495a..9a06f9d98cd6 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -108,7 +108,7 @@ free_data: void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, struct nvme_zone_info *zi) { - lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; + lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; lim->max_zone_append_sectors = ns->ctrl->max_zone_append; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index f7067afac79c..c8b9654d30f0 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -599,7 +599,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, if (sdkp->device->type != TYPE_ZBC) return 0; - lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL; + lim->features |= BLK_FEAT_ZONED; /* * Per ZBC and ZAC specifications, writes in sequential write required diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d0d4b83bc74..dc250d8070d2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -318,9 +318,6 @@ typedef unsigned int __bitwise blk_features_t; /* is a zoned device */ #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) -/* supports Zone Reset All */ -#define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11)) - /* supports PCI(e) p2p requests */ #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) @@ -618,8 +615,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) -#define blk_queue_zone_resetall(q) \ - ((q)->limits.features & BLK_FEAT_ZONE_RESETALL) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME -- cgit v1.2.3-58-ga151 From bf86bcdb40123ee99669ee91b67e023669433a1a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Jul 2024 18:51:20 +0200 Subject: blk-lib: check for kill signal in ioctl BLKZEROOUT Zeroout can access a significant capacity and take longer than the user expected. A user may change their mind about wanting to run that command and attempt to kill the process and do something else with their device. But since the task is uninterruptable, they have to wait for it to finish, which could be many hours. Add a new BLKDEV_ZERO_KILLABLE flag for blkdev_issue_zeroout that checks for a fatal signal at each iteration so the user doesn't have to wait for their regretted operation to complete naturally. Heavily based on an earlier patch from Keith Busch. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240701165219.1571322-11-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-lib.c | 66 ++++++++++++++++++++++++++++++++------------------ block/ioctl.c | 2 +- include/linux/blkdev.h | 1 + 3 files changed, 45 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/block/blk-lib.c b/block/blk-lib.c index 2384acd913d2..9f735efa6c94 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -115,24 +115,27 @@ static void __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { - struct bio *bio = *biop; - while (nr_sects) { unsigned int len = min_t(sector_t, nr_sects, bio_write_zeroes_limit(bdev)); + struct bio *bio; + + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) + break; - bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); + bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; bio->bi_iter.bi_size = len << SECTOR_SHIFT; + *biop = bio_chain_and_submit(*biop, bio); + nr_sects -= len; sector += len; cond_resched(); } - - *biop = bio; } static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, @@ -145,6 +148,12 @@ static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, blk_start_plug(&plug); __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) { + bio_await_chain(bio); + blk_finish_plug(&plug); + return -EINTR; + } ret = submit_bio_wait(bio); bio_put(bio); } @@ -176,29 +185,34 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) static void __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, - struct bio **biop) + struct bio **biop, unsigned int flags) { - struct bio *bio = *biop; - int bi_size = 0; - unsigned int sz; + while (nr_sects) { + unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); + struct bio *bio; - while (nr_sects != 0) { - bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), - REQ_OP_WRITE, gfp_mask); + bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; - while (nr_sects != 0) { - sz = min((sector_t) PAGE_SIZE, nr_sects << 9); - bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); - nr_sects -= bi_size >> 9; - sector += bi_size >> 9; - if (bi_size < sz) + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) + break; + + do { + unsigned int len, added; + + len = min_t(sector_t, + PAGE_SIZE, nr_sects << SECTOR_SHIFT); + added = bio_add_page(bio, ZERO_PAGE(0), len, 0); + if (added < len) break; - } + nr_sects -= added >> SECTOR_SHIFT; + sector += added >> SECTOR_SHIFT; + } while (nr_sects); + + *biop = bio_chain_and_submit(*biop, bio); cond_resched(); } - - *biop = bio; } static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, @@ -212,8 +226,14 @@ static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, return -EOPNOTSUPP; blk_start_plug(&plug); - __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio); + __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) { + bio_await_chain(bio); + blk_finish_plug(&plug); + return -EINTR; + } ret = submit_bio_wait(bio); bio_put(bio); } @@ -255,7 +275,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, - biop); + biop, flags); } return 0; } diff --git a/block/ioctl.c b/block/ioctl.c index d570e1695896..e8e4a4190f18 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -224,7 +224,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, goto fail; err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, - BLKDEV_ZERO_NOUNMAP); + BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE); fail: filemap_invalidate_unlock(bdev->bd_mapping); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index dc250d8070d2..02e04df27282 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1095,6 +1095,7 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ +#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, -- cgit v1.2.3-58-ga151 From 25f76c3db2f08428b5acd082a52787164001eb6e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 6 Jul 2024 09:52:17 +0200 Subject: block: add a bvec_phys helper Get callers out of poking into bvec internals a bit more. Not a huge win right now, but with the proposed new DMA mapping API we might end up with a lot more of this otherwise. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20240706075228.2350978-2-hch@lst.de Signed-off-by: Jens Axboe --- arch/m68k/emu/nfblock.c | 2 +- block/bio.c | 2 +- block/blk.h | 4 ++-- include/linux/bvec.h | 14 ++++++++++++++ 4 files changed, 18 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 8eea7ef91151..874fe9588773 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -71,7 +71,7 @@ static void nfhd_submit_bio(struct bio *bio) len = bvec.bv_len; len >>= 9; nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, - page_to_phys(bvec.bv_page) + bvec.bv_offset); + bvec_phys(&bvec)); sec += len; } bio_endio(bio); diff --git a/block/bio.c b/block/bio.c index e9e809a63c59..a3b1b2266c50 100644 --- a/block/bio.c +++ b/block/bio.c @@ -953,7 +953,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, bool *same_page) { unsigned long mask = queue_segment_boundary(q); - phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; + phys_addr_t addr1 = bvec_phys(bv); phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; if ((addr1 | mask) != (addr2 | mask)) diff --git a/block/blk.h b/block/blk.h index 47dadd2439b1..8e8936e97307 100644 --- a/block/blk.h +++ b/block/blk.h @@ -98,8 +98,8 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, struct bio_vec *vec1, struct bio_vec *vec2) { unsigned long mask = queue_segment_boundary(q); - phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; - phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; + phys_addr_t addr1 = bvec_phys(vec1); + phys_addr_t addr2 = bvec_phys(vec2); /* * Merging adjacent physical pages may not work correctly under KMSAN diff --git a/include/linux/bvec.h b/include/linux/bvec.h index bd1e361b351c..f41c7f0ef91e 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -280,4 +280,18 @@ static inline void *bvec_virt(struct bio_vec *bvec) return page_address(bvec->bv_page) + bvec->bv_offset; } +/** + * bvec_phys - return the physical address for a bvec + * @bvec: bvec to return the physical address for + */ +static inline phys_addr_t bvec_phys(const struct bio_vec *bvec) +{ + /* + * Note this open codes page_to_phys because page_to_phys is defined in + * , which we don't want to pull in here. If it ever moves to + * a sensible place we should start using it. + */ + return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset; +} + #endif /* __LINUX_BVEC_H */ -- cgit v1.2.3-58-ga151 From fe3d508ba95bc63adba661355115be340275c0d1 Mon Sep 17 00:00:00 2001 From: John Garry Date: Mon, 8 Jul 2024 09:16:48 +0000 Subject: block: Validate logical block size in blk_validate_limits() Some drivers validate that their own logical block size. It is no harm to always do this, so validate in blk_validate_limits(). This allows us to remove the validation in most of those drivers. Add a comment to blk_validate_block_size() to inform users that self- validation of LBS is usually unnecessary. Signed-off-by: John Garry Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240708091651.177447-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- block/blk-settings.c | 4 ++++ include/linux/blkdev.h | 1 + 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 9fa4eed4df06..cd8a8eabc9a5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -235,6 +235,10 @@ static int blk_validate_limits(struct queue_limits *lim) */ if (!lim->logical_block_size) lim->logical_block_size = SECTOR_SIZE; + else if (blk_validate_block_size(lim->logical_block_size)) { + pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size); + return -EINVAL; + } if (lim->physical_block_size < lim->logical_block_size) lim->physical_block_size = lim->logical_block_size; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 02e04df27282..dce4a6bf7307 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -268,6 +268,7 @@ static inline dev_t disk_devt(struct gendisk *disk) return MKDEV(disk->major, disk->first_minor); } +/* blk_validate_limits() validates bsize, so drivers don't usually need to */ static inline int blk_validate_block_size(unsigned long bsize) { if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) -- cgit v1.2.3-58-ga151