summaryrefslogtreecommitdiff
path: root/fs/bcachefs/subvolume.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-07-16 23:21:17 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:10:08 -0400
commitae2e13d7809d79ea4d7c0cd8ee060b7911012e37 (patch)
treee12ed129dcec1d25552431f3658d1f8469daa55c /fs/bcachefs/subvolume.c
parentef1634f0f19d676483888c2a05d7e406b366d2db (diff)
bcachefs: bch2_run_explicit_recovery_pass()
This introduces bch2_run_explicit_recovery_pass() and uses it for when fsck detects that we need to re-run dead snaphots cleanup, and makes dead snapshot cleanup more like a normal recovery pass. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/subvolume.c')
-rw-r--r--fs/bcachefs/subvolume.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index 287492c29bcc..7de6fe0cdd43 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -331,8 +331,10 @@ int bch2_mark_snapshot(struct btree_trans *trans,
parent - id - 1 < IS_ANCESTOR_BITMAP)
__set_bit(parent - id - 1, t->is_ancestor);
- if (BCH_SNAPSHOT_DELETED(s.v))
+ if (BCH_SNAPSHOT_DELETED(s.v)) {
set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots);
+ }
} else {
memset(t, 0, sizeof(*t));
}
@@ -1302,9 +1304,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
u32 i, id;
int ret = 0;
- if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
- return 0;
-
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
ret = bch2_fs_read_write_early(c);
if (ret) {
@@ -1399,7 +1398,8 @@ static void bch2_delete_dead_snapshots_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
- bch2_delete_dead_snapshots(c);
+ if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
+ bch2_delete_dead_snapshots(c);
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
}