summaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@kernel.org>2023-03-01 12:48:43 -0500
committerMike Snitzer <snitzer@kernel.org>2023-03-30 15:57:50 -0400
commit06961c487a33a222fd3d84998dc6398ed0449373 (patch)
treece2512c7db900e8f3a92fc1e13502162701ba079 /drivers/md/dm.c
parentbb46c56165faf284cf42c197317bff24f899835a (diff)
dm: split discards further if target sets max_discard_granularity
The block core (bio_split_discard) will already split discards based on the 'discard_granularity' and 'max_discard_sectors' queue_limits. But the DM thin target also needs to ensure that it doesn't receive a discard that spans a 'max_discard_sectors' boundary. Introduce a dm_target 'max_discard_granularity' flag that if set will cause DM core to split discard bios relative to 'max_discard_sectors'. This treats 'discard_granularity' as a "min_discard_granularity" and 'max_discard_sectors' as a "max_discard_granularity". Requested-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dfde0088147a..20c6b72a0245 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1162,7 +1162,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
return ti->len - target_offset;
}
-static sector_t max_io_len(struct dm_target *ti, sector_t sector)
+static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
+ unsigned int max_granularity)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
@@ -1173,11 +1174,16 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
* explains why stacked chunk_sectors based splitting via
* bio_split_to_limits() isn't possible here.
*/
- if (!ti->max_io_len)
+ if (!max_granularity)
return len;
return min_t(sector_t, len,
min(queue_max_sectors(ti->table->md->queue),
- blk_chunk_sectors_left(target_offset, ti->max_io_len)));
+ blk_chunk_sectors_left(target_offset, max_granularity)));
+}
+
+static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
+{
+ return __max_io_len(ti, sector, ti->max_io_len);
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1565,12 +1571,13 @@ static void __send_empty_flush(struct clone_info *ci)
}
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios)
+ unsigned int num_bios,
+ unsigned int max_granularity)
{
unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
- max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+ __max_io_len(ti, ci->sector, max_granularity));
atomic_add(num_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1606,10 +1613,16 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
struct dm_target *ti)
{
unsigned int num_bios = 0;
+ unsigned int max_granularity = 0;
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
+ if (ti->max_discard_granularity) {
+ struct queue_limits *limits =
+ dm_get_queue_limits(ti->table->md);
+ max_granularity = limits->max_discard_sectors;
+ }
break;
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
@@ -1630,7 +1643,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
- __send_changing_extent_only(ci, ti, num_bios);
+ __send_changing_extent_only(ci, ti, num_bios, max_granularity);
return BLK_STS_OK;
}