summaryrefslogtreecommitdiff
path: root/fs/iomap
diff options
context:
space:
mode:
authorRitesh Harjani (IBM) <ritesh.list@gmail.com>2023-07-10 14:12:22 -0700
committerRitesh Harjani (IBM) <ritesh.list@gmail.com>2023-07-25 10:55:55 +0530
commitcc86181a3b7605f394182cdc38b6a87632511a88 (patch)
treeec6219814ea8021551996cf28bd46d7952b01f26 /fs/iomap
parent3ea5c76cadeedcc0e40e180d2c4e37dc289fee22 (diff)
iomap: Add some uptodate state handling helpers for ifs state bitmap
This patch adds two of the helper routines ifs_is_fully_uptodate() and ifs_block_is_uptodate() for managing uptodate state of "ifs" state bitmap. In later patches ifs state bitmap array will also handle dirty state of all blocks of a folio. Hence this patch adds some helper routines for handling uptodate state of the ifs state bitmap. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 6fff25f955e8..85159e5b3d1d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -36,6 +36,20 @@ struct iomap_folio_state {
static struct bio_set iomap_ioend_bioset;
+static inline bool ifs_is_fully_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs)
+{
+ struct inode *inode = folio->mapping->host;
+
+ return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
+}
+
+static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
+ unsigned int block)
+{
+ return test_bit(block, ifs->state);
+}
+
static void ifs_set_range_uptodate(struct folio *folio,
struct iomap_folio_state *ifs, size_t off, size_t len)
{
@@ -47,7 +61,7 @@ static void ifs_set_range_uptodate(struct folio *folio,
spin_lock_irqsave(&ifs->state_lock, flags);
bitmap_set(ifs->state, first_blk, nr_blks);
- if (bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)))
+ if (ifs_is_fully_uptodate(folio, ifs))
folio_mark_uptodate(folio);
spin_unlock_irqrestore(&ifs->state_lock, flags);
}
@@ -92,14 +106,12 @@ static struct iomap_folio_state *ifs_alloc(struct inode *inode,
static void ifs_free(struct folio *folio)
{
struct iomap_folio_state *ifs = folio_detach_private(folio);
- struct inode *inode = folio->mapping->host;
- unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (!ifs)
return;
WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
- WARN_ON_ONCE(bitmap_full(ifs->state, nr_blocks) !=
+ WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
folio_test_uptodate(folio));
kfree(ifs);
}
@@ -130,7 +142,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
/* move forward for each leading block marked uptodate */
for (i = first; i <= last; i++) {
- if (!test_bit(i, ifs->state))
+ if (!ifs_block_is_uptodate(ifs, i))
break;
*pos += block_size;
poff += block_size;
@@ -140,7 +152,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
/* truncate len if we find any trailing uptodate block(s) */
for ( ; i <= last; i++) {
- if (test_bit(i, ifs->state)) {
+ if (ifs_block_is_uptodate(ifs, i)) {
plen -= (last - i + 1) * block_size;
last = i - 1;
break;
@@ -444,7 +456,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
last = (from + count - 1) >> inode->i_blkbits;
for (i = first; i <= last; i++)
- if (!test_bit(i, ifs->state))
+ if (!ifs_block_is_uptodate(ifs, i))
return false;
return true;
}
@@ -1621,7 +1633,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* invalid, grab a new one.
*/
for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
- if (ifs && !test_bit(i, ifs->state))
+ if (ifs && !ifs_block_is_uptodate(ifs, i))
continue;
error = wpc->ops->map_blocks(wpc, inode, pos);