diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-03-24 18:02:16 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:08:57 -0400 |
commit | e751c01a8ee1ca934cc0953e2e77ad4ea3e64d5e (patch) | |
tree | 9930602caa160b05f2e62925c86192ae1ab9bc31 /fs/bcachefs/bkey_methods.c | |
parent | 4cf91b0270dc16a6637db4c200c7fb745b941065 (diff) |
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bkey_methods.c')
-rw-r--r-- | fs/bcachefs/bkey_methods.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index 5e7eadeb3b57..6fe95b802e13 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -119,10 +119,17 @@ const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, return "nonzero size field"; } - if (k.k->p.snapshot) + if (type != BKEY_TYPE_btree && + !btree_type_has_snapshots(type) && + k.k->p.snapshot) return "nonzero snapshot"; if (type != BKEY_TYPE_btree && + btree_type_has_snapshots(type) && + k.k->p.snapshot != U32_MAX) + return "invalid snapshot field"; + + if (type != BKEY_TYPE_btree && !bkey_cmp(k.k->p, POS_MAX)) return "POS_MAX key"; @@ -310,14 +317,15 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, const struct bkey_ops *ops; struct bkey uk; struct bkey_s u; + unsigned nr_compat = 5; int i; /* * Do these operations in reverse order in the write path: */ - for (i = 0; i < 4; i++) - switch (!write ? i : 3 - i) { + for (i = 0; i < nr_compat; i++) + switch (!write ? i : nr_compat - 1 - i) { case 0: if (big_endian != CPU_BIG_ENDIAN) bch2_bkey_swab_key(f, k); @@ -351,6 +359,28 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, } break; case 3: + if (version < bcachefs_metadata_version_snapshot && + (level || btree_type_has_snapshots(btree_id))) { + struct bkey_i *u = packed_to_bkey(k); + + if (u) { + u->k.p.snapshot = write + ? 0 : U32_MAX; + } else { + u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT]; + u64 max_packed = min_packed + + ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]); + + uk = __bch2_bkey_unpack_key(f, k); + uk.p.snapshot = write + ? min_packed : min_t(u64, U32_MAX, max_packed); + + BUG_ON(!bch2_bkey_pack_key(k, &uk, f)); + } + } + + break; + case 4: if (!bkey_packed(k)) { u = bkey_i_to_s(packed_to_bkey(k)); } else { |