summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_io.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-11-09 13:01:52 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:46 -0400
commit6a747c4683803abb01ce246ac2faf7f171cb3872 (patch)
tree365dc5fa2987b2aa0efb61c1965c41fe4170c131 /fs/bcachefs/btree_io.h
parent01819cfe37e864a0e7d6f208c2e5b4635c66f974 (diff)
bcachefs: Add accounting for dirty btree nodes/keys
This lets us improve journal reclaim, so that it now tries to make sure no more than 3/4s of the btree node cache and btree key cache are dirty - ensuring the shrinkers can free memory. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_io.h')
-rw-r--r--fs/bcachefs/btree_io.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 626d0f071b70..1a4b11e99cc4 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -14,6 +14,23 @@ struct btree_write;
struct btree;
struct btree_iter;
+static inline bool btree_node_dirty(struct btree *b)
+{
+ return test_bit(BTREE_NODE_dirty, &b->flags);
+}
+
+static inline void set_btree_node_dirty(struct bch_fs *c, struct btree *b)
+{
+ if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
+ atomic_inc(&c->btree_cache.dirty);
+}
+
+static inline void clear_btree_node_dirty(struct bch_fs *c, struct btree *b)
+{
+ if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
+ atomic_dec(&c->btree_cache.dirty);
+}
+
struct btree_read_bio {
struct bch_fs *c;
u64 start_time;