summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-06-20 06:54:53 -0600
committerJens Axboe <axboe@kernel.dk>2024-06-20 06:55:20 -0600
commite821bcecdf82c7293322df15d8172ff274f84ef4 (patch)
treed77821a541205f2e600da887cac72db191c7b005 /block
parent5ddb88f22eb97218d9295e69c39e0ff7cc64e09c (diff)
parent7d4dec525f5fd555037486af4d02dd3682655ba1 (diff)
Merge branch 'for-6.11/block-limits' into for-6.11/block
Merge in queue limits cleanups. * for-6.11/block-limits: block: move the raid_partial_stripes_expensive flag into the features field block: remove the discard_alignment flag block: move the misaligned flag into the features field block: renumber and rename the cache disabled flag block: fix spelling and grammar for in writeback_cache_control.rst block: remove the unused blk_bounce enum
Diffstat (limited to 'block')
-rw-r--r--block/blk-settings.c34
-rw-r--r--block/blk-sysfs.c6
2 files changed, 13 insertions, 27 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9c7f101aaf52..b19306804056 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -266,7 +266,7 @@ static int blk_validate_limits(struct queue_limits *lim)
if (lim->alignment_offset) {
lim->alignment_offset &= (lim->physical_block_size - 1);
- lim->misaligned = 0;
+ lim->features &= ~BLK_FEAT_MISALIGNED;
}
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
@@ -477,6 +477,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (!(b->features & BLK_FEAT_POLL))
t->features &= ~BLK_FEAT_POLL;
+ t->flags |= (b->flags & BLK_FEAT_MISALIGNED);
+
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors);
@@ -501,8 +503,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
- t->misaligned |= b->misaligned;
-
alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
@@ -516,7 +516,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Verify that top and bottom intervals line up */
if (max(top, bottom) % min(top, bottom)) {
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
}
@@ -538,42 +538,38 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* chunk_sectors a multiple of the physical block size? */
if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
t->chunk_sectors = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
- t->raid_partial_stripes_expensive =
- max(t->raid_partial_stripes_expensive,
- b->raid_partial_stripes_expensive);
-
/* Find lowest common alignment_offset */
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) {
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
@@ -585,16 +581,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
- if (t->discard_granularity != 0 &&
- t->discard_alignment != alignment) {
- top = t->discard_granularity + t->discard_alignment;
- bottom = b->discard_granularity + alignment;
-
- /* Verify that top and bottom intervals line up */
- if ((max(top, bottom) % min(top, bottom)) != 0)
- t->discard_misaligned = 1;
- }
-
t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
b->max_discard_sectors);
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
@@ -736,7 +722,7 @@ int bdev_alignment_offset(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q->limits.misaligned)
+ if (q->limits.flags & BLK_FEAT_MISALIGNED)
return -1;
if (bdev_is_partition(bdev))
return queue_limit_alignment_offset(&q->limits,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index da4e96d686f9..59e6d111ed05 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -429,7 +429,7 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
- if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED)
+ if (q->limits.features & BLK_FLAG_WRITE_CACHE_DISABLED)
return sprintf(page, "write through\n");
return sprintf(page, "write back\n");
}
@@ -452,9 +452,9 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
lim = queue_limits_start_update(q);
if (disable)
- lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED;
+ lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
else
- lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED;
+ lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
err = queue_limits_commit_update(q, &lim);
if (err)
return err;