diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 13:50:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 13:50:13 -0800 |
commit | d151e8bea1509a6f72a8929882d9ecb66e936b09 (patch) | |
tree | 2d79a8afadf4f2e36d5df6deb485e471b6ac9b92 /fs | |
parent | 8762069330316392331e693befd8a5b632833618 (diff) | |
parent | 471859f57d42537626a56312cfb50cd6acee09ae (diff) |
Merge tag 'iomap-6.3-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull iomap updates from Darrick Wong:
"This is mostly rearranging things to make life easier for gfs2,
nothing all that mindblowing for this release.
- Change when the iomap page_done function is called so that we still
have a locked folio in the success case. This fixes a writeback
race in gfs2
- Change when the iomap page_prepare function is called so that gfs2
can recover from OOM scenarios more gracefully
- Rename the iomap page_ops to folio_ops, since they operate on
folios now"
* tag 'iomap-6.3-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
iomap: Rename page_ops to folio_ops
iomap: Rename page_prepare handler to get_folio
iomap: Add __iomap_get_folio helper
iomap/gfs2: Get page in page_prepare handler
iomap: Add iomap_get_folio helper
iomap: Rename page_done handler to put_folio
iomap/gfs2: Unlock and put folio in page_done handler
iomap: Add __iomap_put_folio helper
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/bmap.c | 38 | ||||
-rw-r--r-- | fs/iomap/buffered-io.c | 91 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 4 |
3 files changed, 88 insertions, 45 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index e7537fd305dd..e191ecfb1fde 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -956,26 +956,40 @@ hole_found: goto out; } -static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos, - unsigned len) +static struct folio * +gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len) { + struct inode *inode = iter->inode; unsigned int blockmask = i_blocksize(inode) - 1; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned int blocks; + struct folio *folio; + int status; blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits; - return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); + status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); + if (status) + return ERR_PTR(status); + + folio = iomap_get_folio(iter, pos); + if (IS_ERR(folio)) + gfs2_trans_end(sdp); + return folio; } -static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, - unsigned copied, struct page *page) +static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, + unsigned copied, struct folio *folio) { struct gfs2_trans *tr = current->journal_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); - if (page && !gfs2_is_stuffed(ip)) - gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); + if (!gfs2_is_stuffed(ip)) + gfs2_page_add_databufs(ip, &folio->page, offset_in_page(pos), + copied); + + folio_unlock(folio); + folio_put(folio); if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); @@ -983,9 +997,9 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, gfs2_trans_end(sdp); } -static const struct iomap_page_ops gfs2_iomap_page_ops = { - .page_prepare = gfs2_iomap_page_prepare, - .page_done = gfs2_iomap_page_done, +static const struct iomap_folio_ops gfs2_iomap_folio_ops = { + .get_folio = gfs2_iomap_get_folio, + .put_folio = gfs2_iomap_put_folio, }; static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, @@ -1061,7 +1075,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, } if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip)) - iomap->page_ops = &gfs2_iomap_page_ops; + iomap->folio_ops = &gfs2_iomap_folio_ops; return 0; out_trans_end: @@ -1277,7 +1291,7 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock, /* * NOTE: Never call gfs2_block_zero_range with an open transaction because it * uses iomap write to perform its actions, which begin their own transactions - * (iomap_begin, page_prepare, etc.) + * (iomap_begin, get_folio, etc.) */ static int gfs2_block_zero_range(struct inode *inode, loff_t from, unsigned int length) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 356193e44cf0..d3c300563eb8 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -457,6 +457,33 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); +/** + * iomap_get_folio - get a folio reference for writing + * @iter: iteration structure + * @pos: start offset of write + * + * Returns a locked reference to the folio at @pos, or an error pointer if the + * folio could not be obtained. + */ +struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos) +{ + unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; + struct folio *folio; + + if (iter->flags & IOMAP_NOWAIT) + fgp |= FGP_NOWAIT; + + folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, + fgp, mapping_gfp_mask(iter->inode->i_mapping)); + if (folio) + return folio; + + if (iter->flags & IOMAP_NOWAIT) + return ERR_PTR(-EAGAIN); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL_GPL(iomap_get_folio); + bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), @@ -575,6 +602,30 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, return 0; } +static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, + size_t len) +{ + const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; + + if (folio_ops && folio_ops->get_folio) + return folio_ops->get_folio(iter, pos, len); + else + return iomap_get_folio(iter, pos); +} + +static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, + struct folio *folio) +{ + const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; + + if (folio_ops && folio_ops->put_folio) { + folio_ops->put_folio(iter->inode, pos, ret, folio); + } else { + folio_unlock(folio); + folio_put(folio); + } +} + static int iomap_write_begin_inline(const struct iomap_iter *iter, struct folio *folio) { @@ -587,15 +638,11 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter, static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, size_t len, struct folio **foliop) { - const struct iomap_page_ops *page_ops = iter->iomap.page_ops; + const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); struct folio *folio; - unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; int status = 0; - if (iter->flags & IOMAP_NOWAIT) - fgp |= FGP_NOWAIT; - BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); @@ -606,18 +653,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, if (!mapping_large_folio_support(iter->inode->i_mapping)) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); - if (page_ops && page_ops->page_prepare) { - status = page_ops->page_prepare(iter->inode, pos, len); - if (status) - return status; - } - - folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, - fgp, mapping_gfp_mask(iter->inode->i_mapping)); - if (!folio) { - status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM; - goto out_no_page; - } + folio = __iomap_get_folio(iter, pos, len); + if (IS_ERR(folio)) + return PTR_ERR(folio); /* * Now we have a locked folio, before we do anything with it we need to @@ -629,9 +667,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, * could do the wrong thing here (zero a page range incorrectly or fail * to zero) and corrupt data. */ - if (page_ops && page_ops->iomap_valid) { - bool iomap_valid = page_ops->iomap_valid(iter->inode, - &iter->iomap); + if (folio_ops && folio_ops->iomap_valid) { + bool iomap_valid = folio_ops->iomap_valid(iter->inode, + &iter->iomap); if (!iomap_valid) { iter->iomap.flags |= IOMAP_F_STALE; status = 0; @@ -656,13 +694,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, return 0; out_unlock: - folio_unlock(folio); - folio_put(folio); + __iomap_put_folio(iter, pos, 0, folio); iomap_write_failed(iter->inode, pos, len); -out_no_page: - if (page_ops && page_ops->page_done) - page_ops->page_done(iter->inode, pos, 0, NULL); return status; } @@ -712,7 +746,6 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter, static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, size_t copied, struct folio *folio) { - const struct iomap_page_ops *page_ops = iter->iomap.page_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t old_size = iter->inode->i_size; size_t ret; @@ -735,14 +768,10 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, i_size_write(iter->inode, pos + ret); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } - folio_unlock(folio); + __iomap_put_folio(iter, pos, ret, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); - if (page_ops && page_ops->page_done) - page_ops->page_done(iter->inode, pos, ret, &folio->page); - folio_put(folio); - if (ret < len) iomap_write_failed(iter->inode, pos + ret, len - ret); return ret; diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index fc1946f80a4a..69dbe7814128 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -83,7 +83,7 @@ xfs_iomap_valid( return true; } -static const struct iomap_page_ops xfs_iomap_page_ops = { +static const struct iomap_folio_ops xfs_iomap_folio_ops = { .iomap_valid = xfs_iomap_valid, }; @@ -133,7 +133,7 @@ xfs_bmbt_to_iomap( iomap->flags |= IOMAP_F_DIRTY; iomap->validity_cookie = sequence_cookie; - iomap->page_ops = &xfs_iomap_page_ops; + iomap->folio_ops = &xfs_iomap_folio_ops; return 0; } |