diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-11 13:58:04 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-11 13:58:04 -0800 |
commit | 01d550f0fcc06c7292f79a6f1453aac122d1d2c8 (patch) | |
tree | 58b58ac1cb833af0469b1942774a382633bc6cda /drivers/block | |
parent | d05e626603d57936314816433db8bf1d34b5a504 (diff) | |
parent | 587371ed783b046f22ba7a5e1cc9a19ae35123b4 (diff) |
Merge tag 'for-6.8/block-2024-01-08' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe:
"Pretty quiet round this time around. This contains:
- NVMe updates via Keith:
- nvme fabrics spec updates (Guixin, Max)
- nvme target udpates (Guixin, Evan)
- nvme attribute refactoring (Daniel)
- nvme-fc numa fix (Keith)
- MD updates via Song:
- Fix/Cleanup RCU usage from conf->disks[i].rdev (Yu Kuai)
- Fix raid5 hang issue (Junxiao Bi)
- Add Yu Kuai as Reviewer of the md subsystem
- Remove deprecated flavors (Song Liu)
- raid1 read error check support (Li Nan)
- Better handle events off-by-1 case (Alex Lyakas)
- Efficiency improvements for passthrough (Kundan)
- Support for mapping integrity data directly (Keith)
- Zoned write fix (Damien)
- rnbd fixes (Kees, Santosh, Supriti)
- Default to a sane discard size granularity (Christoph)
- Make the default max transfer size naming less confusing
(Christoph)
- Remove support for deprecated host aware zoned model (Christoph)
- Misc fixes (me, Li, Matthew, Min, Ming, Randy, liyouhong, Daniel,
Bart, Christoph)"
* tag 'for-6.8/block-2024-01-08' of git://git.kernel.dk/linux: (78 commits)
block: Treat sequential write preferred zone type as invalid
block: remove disk_clear_zoned
sd: remove the !ZBC && blk_queue_is_zoned case in sd_read_block_characteristics
drivers/block/xen-blkback/common.h: Fix spelling typo in comment
blk-cgroup: fix rcu lockdep warning in blkg_lookup()
blk-cgroup: don't use removal safe list iterators
block: floor the discard granularity to the physical block size
mtd_blkdevs: use the default discard granularity
bcache: use the default discard granularity
zram: use the default discard granularity
null_blk: use the default discard granularity
nbd: use the default discard granularity
ubd: use the default discard granularity
block: default the discard granularity to sector size
bcache: discard_granularity should not be smaller than a sector
block: remove two comments in bio_split_discard
block: rename and document BLK_DEF_MAX_SECTORS
loop: don't abuse BLK_DEF_MAX_SECTORS
aoe: don't abuse BLK_DEF_MAX_SECTORS
null_blk: don't cap max_hw_sectors to BLK_DEF_MAX_SECTORS
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoeblk.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 16 | ||||
-rw-r--r-- | drivers/block/loop.c | 5 | ||||
-rw-r--r-- | drivers/block/nbd.c | 6 | ||||
-rw-r--r-- | drivers/block/null_blk/main.c | 13 | ||||
-rw-r--r-- | drivers/block/null_blk/zoned.c | 2 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-clt.c | 13 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-proto.h | 14 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-srv.c | 44 | ||||
-rw-r--r-- | drivers/block/ublk_drv.c | 2 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 78 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 2 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 1 |
13 files changed, 84 insertions, 115 deletions
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index cf6883756155..d2dbf8aaccb5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -383,7 +383,8 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->flags & DEVFL_TKILL); WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); - blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS); + /* random number picked from the history block max_sectors cap */ + blk_queue_max_hw_sectors(gd->queue, 2560u); blk_queue_io_opt(gd->queue, SZ_2M); d->bufpool = mp; d->blkq = gd->queue; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 64b3a1c76f03..742b2908ff68 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -838,8 +838,8 @@ static bool plausible_request_size(int size) } /* clear the bit corresponding to the piece of storage in question: - * size byte of data starting from sector. Only clear a bits of the affected - * one ore more _aligned_ BM_BLOCK_SIZE blocks. + * size byte of data starting from sector. Only clear bits of the affected + * one or more _aligned_ BM_BLOCK_SIZE blocks. * * called by worker on C_SYNC_TARGET and receiver on SyncSource. * @@ -957,7 +957,9 @@ static int _is_in_al(struct drbd_device *device, unsigned int enr) * @device: DRBD device. * @sector: The sector number. * - * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. + * This functions sleeps on al_wait. + * + * Returns: %0 on success, -EINTR if interrupted. */ int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) { @@ -1004,11 +1006,13 @@ retry: /** * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep - * @device: DRBD device. + * @peer_device: DRBD device. * @sector: The sector number. * * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then - * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN + * tries to set it to BME_LOCKED. + * + * Returns: %0 upon success, and -EAGAIN * if there is still application IO going on in this area. */ int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector) @@ -1190,7 +1194,7 @@ void drbd_rs_cancel_all(struct drbd_device *device) * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU * @device: DRBD device. * - * Returns 0 upon success, -EAGAIN if at least one reference count was + * Returns: %0 upon success, -EAGAIN if at least one reference count was * not zero. */ int drbd_rs_del_all(struct drbd_device *device) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 8a8cd4fc9238..146b32fa7b47 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1301,8 +1301,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) loop_set_size(lo, new_size); } - loop_config_discard(lo); - /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); @@ -2036,7 +2034,8 @@ static int loop_add(int i) } lo->lo_queue = lo->lo_disk->queue; - blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); + /* random number picked from the history block max_sectors cap */ + blk_queue_max_hw_sectors(lo->lo_queue, 2560u); /* * By default, we do buffer IO, so it doesn't make sense to enable diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index b6414e1e645b..4e72ec4e25ac 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -334,10 +334,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, if (!nbd->pid) return 0; - if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { - nbd->disk->queue->limits.discard_granularity = blksize; + if (nbd->config->flags & NBD_FLAG_SEND_TRIM) blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); - } blk_queue_logical_block_size(nbd->disk->queue, blksize); blk_queue_physical_block_size(nbd->disk->queue, blksize); @@ -1357,7 +1355,6 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - nbd->disk->queue->limits.discard_granularity = 0; blk_queue_max_discard_sectors(nbd->disk->queue, 0); mutex_unlock(&nbd->config_lock); @@ -1850,7 +1847,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) * Tell the block layer that we are not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - disk->queue->limits.discard_granularity = 0; blk_queue_max_discard_sectors(disk->queue, 0); blk_queue_max_segment_size(disk->queue, UINT_MAX); blk_queue_max_segments(disk->queue, USHRT_MAX); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 3021d58ca51c..9f7695f00c2d 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1880,7 +1880,6 @@ static void null_config_discard(struct nullb *nullb) return; } - nullb->q->limits.discard_granularity = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); } @@ -2186,10 +2185,8 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_logical_block_size(nullb->q, dev->blocksize); blk_queue_physical_block_size(nullb->q, dev->blocksize); - if (!dev->max_sectors) - dev->max_sectors = queue_max_hw_sectors(nullb->q); - dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS); - blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); + if (dev->max_sectors) + blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); if (dev->virt_boundary) blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); @@ -2289,12 +2286,6 @@ static int __init null_init(void) g_bs = PAGE_SIZE; } - if (g_max_sectors > BLK_DEF_MAX_SECTORS) { - pr_warn("invalid max sectors\n"); - pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS); - g_max_sectors = BLK_DEF_MAX_SECTORS; - } - if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { pr_err("invalid home_node value\n"); g_home_node = NUMA_NO_NODE; diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 55c5b48bc276..6f5e0994862e 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb) struct nullb_device *dev = nullb->dev; struct request_queue *q = nullb->q; - disk_set_zoned(nullb->disk, BLK_ZONED_HM); + disk_set_zoned(nullb->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); blk_queue_chunk_sectors(q, dev->zone_size_sects); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index b0550b68645d..4044c369d22a 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1006,10 +1006,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, msg.prio = cpu_to_le16(req_get_ioprio(rq)); /* - * We only support discards with single segment for now. + * We only support discards/WRITE_ZEROES with single segment for now. * See queue limits. */ - if (req_op(rq) != REQ_OP_DISCARD) + if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES)) sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); if (sg_cnt == 0) @@ -1362,6 +1362,8 @@ static void setup_request_queue(struct rnbd_clt_dev *dev, blk_queue_write_cache(dev->queue, !!(rsp->cache_policy & RNBD_WRITEBACK), !!(rsp->cache_policy & RNBD_FUA)); + blk_queue_max_write_zeroes_sectors(dev->queue, + le32_to_cpu(rsp->max_write_zeroes_sectors)); } static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, @@ -1567,8 +1569,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, dev = init_dev(sess, access_mode, pathname, nr_poll_queues); if (IS_ERR(dev)) { - pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", - pathname, sess->sessname, PTR_ERR(dev)); + pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %pe\n", + pathname, sess->sessname, dev); ret = PTR_ERR(dev); goto put_sess; } @@ -1626,10 +1628,11 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, } rnbd_clt_info(dev, - "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n", + "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_write_zeroes_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n", dev->gd->disk_name, le64_to_cpu(rsp->nsectors), le16_to_cpu(rsp->logical_block_size), le16_to_cpu(rsp->physical_block_size), + le32_to_cpu(rsp->max_write_zeroes_sectors), le32_to_cpu(rsp->max_discard_sectors), le32_to_cpu(rsp->discard_granularity), le32_to_cpu(rsp->discard_alignment), diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index e32f8f2c868a..f35be51d213c 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -128,7 +128,7 @@ enum rnbd_cache_policy { * @device_id: device_id on server side to identify the device * @nsectors: number of sectors in the usual 512b unit * @max_hw_sectors: max hardware sectors in the usual 512b unit - * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit + * @max_write_zeroes_sectors: max sectors for WRITE ZEROES in the 512b unit * @max_discard_sectors: max. sectors that can be discarded at once in 512b * unit. * @discard_granularity: size of the internal discard allocation unit in bytes @@ -145,7 +145,7 @@ struct rnbd_msg_open_rsp { __le32 device_id; __le64 nsectors; __le32 max_hw_sectors; - __le32 max_write_same_sectors; + __le32 max_write_zeroes_sectors; __le32 max_discard_sectors; __le32 discard_granularity; __le32 discard_alignment; @@ -186,7 +186,7 @@ struct rnbd_msg_io { * @RNBD_OP_FLUSH: flush the volatile write cache * @RNBD_OP_DISCARD: discard sectors * @RNBD_OP_SECURE_ERASE: securely erase sectors - * @RNBD_OP_WRITE_SAME: write the same sectors many times + * @RNBD_OP_WRITE_ZEROES: write zeroes sectors * @RNBD_F_SYNC: request is sync (sync write or read) * @RNBD_F_FUA: forced unit access @@ -199,7 +199,7 @@ enum rnbd_io_flags { RNBD_OP_FLUSH = 2, RNBD_OP_DISCARD = 3, RNBD_OP_SECURE_ERASE = 4, - RNBD_OP_WRITE_SAME = 5, + RNBD_OP_WRITE_ZEROES = 5, /* Flags */ RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0), @@ -236,6 +236,9 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) case RNBD_OP_SECURE_ERASE: bio_opf = REQ_OP_SECURE_ERASE; break; + case RNBD_OP_WRITE_ZEROES: + bio_opf = REQ_OP_WRITE_ZEROES; + break; default: WARN(1, "Unknown RNBD type: %d (flags %d)\n", rnbd_op(rnbd_opf), rnbd_opf); @@ -268,6 +271,9 @@ static inline u32 rq_to_rnbd_flags(struct request *rq) case REQ_OP_SECURE_ERASE: rnbd_opf = RNBD_OP_SECURE_ERASE; break; + case REQ_OP_WRITE_ZEROES: + rnbd_opf = RNBD_OP_WRITE_ZEROES; + break; case REQ_OP_FLUSH: rnbd_opf = RNBD_OP_FLUSH; break; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 65de51f3dfd9..3a0d5dcec6f2 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -136,8 +136,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, sess_dev = rnbd_get_sess_dev(dev_id, srv_sess); if (IS_ERR(sess_dev)) { - pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n", - srv_sess->sessname, dev_id); + pr_err_ratelimited("Got I/O request on session %s for unknown device id %d: %pe\n", + srv_sess->sessname, dev_id, sess_dev); err = -ENOTCONN; goto err; } @@ -544,7 +544,8 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev)); rsp->max_hw_sectors = cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); - rsp->max_write_same_sectors = 0; + rsp->max_write_zeroes_sectors = + cpu_to_le32(bdev_write_zeroes_sectors(bdev)); rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev)); rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev)); rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev)); @@ -585,6 +586,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess, { char *full_path; char *a, *b; + int len; full_path = kmalloc(PATH_MAX, GFP_KERNEL); if (!full_path) @@ -596,19 +598,19 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess, */ a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path)); if (a) { - int len = a - dev_search_path; + len = a - dev_search_path; len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len, dev_search_path, srv_sess->sessname, dev_name); - if (len >= PATH_MAX) { - pr_err("Too long path: %s, %s, %s\n", - dev_search_path, srv_sess->sessname, dev_name); - kfree(full_path); - return ERR_PTR(-EINVAL); - } } else { - snprintf(full_path, PATH_MAX, "%s/%s", - dev_search_path, dev_name); + len = snprintf(full_path, PATH_MAX, "%s/%s", + dev_search_path, dev_name); + } + if (len >= PATH_MAX) { + pr_err("Too long path: %s, %s, %s\n", + dev_search_path, srv_sess->sessname, dev_name); + kfree(full_path); + return ERR_PTR(-EINVAL); } /* eliminitate duplicated slashes */ @@ -709,24 +711,24 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name); if (IS_ERR(full_path)) { ret = PTR_ERR(full_path); - pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n", - open_msg->dev_name, srv_sess->sessname, ret); + pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %pe\n", + open_msg->dev_name, srv_sess->sessname, full_path); goto reject; } bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL); if (IS_ERR(bdev_handle)) { ret = PTR_ERR(bdev_handle); - pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n", - full_path, srv_sess->sessname, ret); + pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n", + full_path, srv_sess->sessname, bdev_handle); goto free_path; } srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess, open_msg->access_mode); if (IS_ERR(srv_dev)) { - pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n", - full_path, srv_sess->sessname, PTR_ERR(srv_dev)); + pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n", + full_path, srv_sess->sessname, srv_dev); ret = PTR_ERR(srv_dev); goto blkdev_put; } @@ -736,8 +738,8 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, open_msg->access_mode == RNBD_ACCESS_RO, srv_dev); if (IS_ERR(srv_sess_dev)) { - pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n", - full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev)); + pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %pe\n", + full_path, srv_sess->sessname, srv_sess_dev); ret = PTR_ERR(srv_sess_dev); goto srv_dev_put; } @@ -818,7 +820,7 @@ static int __init rnbd_srv_init_module(void) }; rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr); if (IS_ERR(rtrs_ctx)) { - pr_err("rtrs_srv_open(), err: %d\n", err); + pr_err("rtrs_srv_open(), err: %pe\n", rtrs_ctx); return PTR_ERR(rtrs_ctx); } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 3eaf02ebeebe..5aa729069bc1 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -250,7 +250,7 @@ static int ublk_dev_param_zoned_apply(struct ublk_device *ub) { const struct ublk_param_zoned *p = &ub->params.zoned; - disk_set_zoned(ub->ub_disk, BLK_ZONED_HM); + disk_set_zoned(ub->ub_disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); blk_queue_required_elevator_features(ub->ub_disk->queue, ELEVATOR_F_ZBD_SEQ_WRITE); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 47556d8ccc32..3b6b9abb8ce1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -722,52 +722,15 @@ fail_report: return ret; } -static void virtblk_revalidate_zones(struct virtio_blk *vblk) -{ - u8 model; - - virtio_cread(vblk->vdev, struct virtio_blk_config, - zoned.model, &model); - switch (model) { - default: - dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model); - fallthrough; - case VIRTIO_BLK_Z_NONE: - case VIRTIO_BLK_Z_HA: - disk_set_zoned(vblk->disk, BLK_ZONED_NONE); - return; - case VIRTIO_BLK_Z_HM: - WARN_ON_ONCE(!vblk->zone_sectors); - if (!blk_revalidate_disk_zones(vblk->disk, NULL)) - set_capacity_and_notify(vblk->disk, 0); - } -} - static int virtblk_probe_zoned_device(struct virtio_device *vdev, struct virtio_blk *vblk, struct request_queue *q) { u32 v, wg; - u8 model; - - virtio_cread(vdev, struct virtio_blk_config, - zoned.model, &model); - - switch (model) { - case VIRTIO_BLK_Z_NONE: - case VIRTIO_BLK_Z_HA: - /* Present the host-aware device as non-zoned */ - return 0; - case VIRTIO_BLK_Z_HM: - break; - default: - dev_err(&vdev->dev, "unsupported zone model %d\n", model); - return -EINVAL; - } dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - disk_set_zoned(vblk->disk, BLK_ZONED_HM); + disk_set_zoned(vblk->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); virtio_cread(vdev, struct virtio_blk_config, @@ -839,23 +802,12 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, */ #define virtblk_report_zones NULL -static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) -{ -} - static inline int virtblk_probe_zoned_device(struct virtio_device *vdev, struct virtio_blk *vblk, struct request_queue *q) { - u8 model; - - virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model); - if (model == VIRTIO_BLK_Z_HM) { - dev_err(&vdev->dev, - "virtio_blk: zoned devices are not supported"); - return -EOPNOTSUPP; - } - - return 0; + dev_err(&vdev->dev, + "virtio_blk: zoned devices are not supported"); + return -EOPNOTSUPP; } #endif /* CONFIG_BLK_DEV_ZONED */ @@ -1005,7 +957,6 @@ static void virtblk_config_changed_work(struct work_struct *work) struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); - virtblk_revalidate_zones(vblk); virtblk_update_capacity(vblk, true); } @@ -1570,9 +1521,26 @@ static int virtblk_probe(struct virtio_device *vdev) * placed after the virtio_device_ready() call above. */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { - err = virtblk_probe_zoned_device(vdev, vblk, q); - if (err) + u8 model; + + virtio_cread(vdev, struct virtio_blk_config, zoned.model, + &model); + switch (model) { + case VIRTIO_BLK_Z_NONE: + case VIRTIO_BLK_Z_HA: + /* Present the host-aware device as non-zoned */ + break; + case VIRTIO_BLK_Z_HM: + err = virtblk_probe_zoned_device(vdev, vblk, q); + if (err) + goto out_cleanup_disk; + break; + default: + dev_err(&vdev->dev, "unsupported zone model %d\n", + model); + err = -EINVAL; goto out_cleanup_disk; + } } err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 5ff50e76cee5..1432c83183d0 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -132,7 +132,7 @@ struct blkif_x86_32_request { struct blkif_x86_64_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ - uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */ + uint32_t _pad1; /* offsetof(blkif_request..,u.rw.id)==8 */ uint64_t id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 2b1d82473be8..6772e0c654fa 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2226,7 +2226,6 @@ static int zram_add(void) ZRAM_LOGICAL_BLOCK_SIZE); blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); - zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); /* |