summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_update_interior.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-08-30 15:18:31 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:11 -0400
commit67e0dd8f0d8b4bf09098c4692abcb43a20089dff (patch)
tree8ba50f2d86b09cae23a39a02982abff3524e2f45 /fs/bcachefs/btree_update_interior.h
parent8f54337dc6825f323f7761c182d98efdd180ce70 (diff)
bcachefs: btree_path
This splits btree_iter into two components: btree_iter is now the externally visible componont, and it points to a btree_path which is now reference counted. This means we no longer have to clone iterators up front if they might be mutated - btree_path can be shared by multiple iterators, and cloned if an iterator would mutate a shared btree_path. This will help us use iterators more efficiently, as well as slimming down the main long lived state in btree_trans, and significantly cleans up the logic for iterator lifetimes. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_update_interior.h')
-rw-r--r--fs/bcachefs/btree_update_interior.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 13b3a1bf0f4f..c06cfcc66db7 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -117,39 +117,39 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct bkey_format);
-int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
+int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned);
-int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
+int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *,
unsigned, unsigned, enum btree_node_sibling);
static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
- struct btree_iter *iter,
+ struct btree_path *path,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
{
struct btree *b;
- if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
+ if (path->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return 0;
- if (!bch2_btree_node_relock(trans, iter, level))
+ if (!bch2_btree_node_relock(trans, path, level))
return 0;
- b = iter->l[level].b;
+ b = path->l[level].b;
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
return 0;
- return __bch2_foreground_maybe_merge(trans, iter, level, flags, sib);
+ return __bch2_foreground_maybe_merge(trans, path, level, flags, sib);
}
static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
- struct btree_iter *iter,
+ struct btree_path *path,
unsigned level,
unsigned flags)
{
- return bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
+ return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_prev_sib) ?:
- bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
+ bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_next_sib);
}