summaryrefslogtreecommitdiff
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorNaohiro Aota <naohiro.aota@wdc.com>2022-06-21 15:41:01 +0900
committerDavid Sterba <dsterba@suse.com>2022-07-25 17:45:38 +0200
commit71aa147b4d9d81fa65afa6016f50d7818b64a54f (patch)
tree48080a79554a26b3aa92c316fde0a6fa79602bfc /fs/btrfs/inode.c
parent99826e4cab979aed35993ac855b354f91f7474d9 (diff)
btrfs: fix error handling of fallback uncompress write
When cow_file_range() fails in the middle of the allocation loop, it unlocks the pages but leaves the ordered extents intact. Thus, we need to call btrfs_cleanup_ordered_extents() to finish the created ordered extents. Also, we need to call end_extent_writepage() if locked_page is available because btrfs_cleanup_ordered_extents() never processes the region on the locked_page. Furthermore, we need to set the mapping as error if locked_page is unavailable before unlocking the pages, so that the errno is properly propagated to the user space. CC: stable@vger.kernel.org # 5.18+ Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 11ff5bb40153..90b1c1df0943 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -928,8 +928,18 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
goto out;
}
if (ret < 0) {
- if (locked_page)
+ btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
+ if (locked_page) {
+ const u64 page_start = page_offset(locked_page);
+ const u64 page_end = page_start + PAGE_SIZE - 1;
+
+ btrfs_page_set_error(inode->root->fs_info, locked_page,
+ page_start, PAGE_SIZE);
+ set_page_writeback(locked_page);
+ end_page_writeback(locked_page);
+ end_extent_writepage(locked_page, ret, page_start, page_end);
unlock_page(locked_page);
+ }
goto out;
}
@@ -1378,9 +1388,12 @@ out_unlock:
* However, in case of unlock == 0, we still need to unlock the pages
* (except @locked_page) to ensure all the pages are unlocked.
*/
- if (!unlock && orig_start < start)
+ if (!unlock && orig_start < start) {
+ if (!locked_page)
+ mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
locked_page, 0, page_ops);
+ }
/*
* For the range (2). If we reserved an extent for our delalloc range