summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-10-28 19:35:13 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:30 -0400
commitb7ba66c8450a58649393b47bc8975926b1e80814 (patch)
tree995a557869e367e2d60bb5adeac4ffceab4dcca7 /fs
parentff929515cc52ed693ff2116be3af9f32122e9b54 (diff)
bcachefs: Inline more of bch2_trans_commit hot path
The main optimization here is that if we let bch2_replicas_delta_list_apply() fail, we can completely skip calling bch2_bkey_replicas_marked_locked(). And assorted other small optimizations. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/btree_iter.c14
-rw-r--r--fs/bcachefs/btree_locking.h18
-rw-r--r--fs/bcachefs/btree_update_interior.h6
-rw-r--r--fs/bcachefs/btree_update_leaf.c104
-rw-r--r--fs/bcachefs/buckets.c48
-rw-r--r--fs/bcachefs/buckets.h6
6 files changed, 99 insertions, 97 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 25ad6b69b6bd..c264b927f992 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -64,21 +64,9 @@ static inline int btree_iter_pos_cmp(struct btree_iter *iter,
/* Btree node locking: */
-/*
- * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
- * succeed:
- */
void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
{
- struct btree_iter *linked;
-
- EBUG_ON(iter->l[b->c.level].b != b);
- EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
-
- trans_for_each_iter_with_node(iter->trans, b, linked)
- linked->l[b->c.level].lock_seq += 2;
-
- six_unlock_write(&b->c.lock);
+ bch2_btree_node_unlock_write_inlined(b, iter);
}
void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index aa5882cc4852..a164924ca656 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -202,6 +202,24 @@ static inline bool bch2_btree_node_relock(struct btree_iter *iter,
__bch2_btree_node_relock(iter, level);
}
+/*
+ * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
+ * succeed:
+ */
+static inline void
+bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
+{
+ struct btree_iter *linked;
+
+ EBUG_ON(iter->l[b->c.level].b != b);
+ EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
+
+ trans_for_each_iter_with_node(iter->trans, b, linked)
+ linked->l[b->c.level].lock_seq += 2;
+
+ six_unlock_write(&b->c.lock);
+}
+
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index f9e092bf69aa..85f1320fa7b1 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -284,17 +284,17 @@ static inline unsigned btree_write_set_buffer(struct btree *b)
static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
struct btree *b)
{
- struct bset *i = btree_bset_last(b);
+ struct bset_tree *t = bset_tree_last(b);
struct btree_node_entry *bne = max(write_block(b),
(void *) btree_bkey_last(b, bset_tree_last(b)));
ssize_t remaining_space =
__bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
- if (unlikely(bset_written(b, i))) {
+ if (unlikely(bset_written(b, bset(b, t)))) {
if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
return bne;
} else {
- if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
+ if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
return bne;
}
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 38a27d3a3b40..85580e63b5ca 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -17,6 +17,7 @@
#include "replicas.h"
#include "trace.h"
+#include <linux/prefetch.h>
#include <linux/sort.h>
static inline bool same_leaf_as_prev(struct btree_trans *trans,
@@ -50,23 +51,6 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
bch2_btree_init_next(c, b, iter);
}
-static void btree_trans_lock_write(struct btree_trans *trans, bool lock)
-{
- struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
- unsigned iter;
-
- trans_for_each_update_sorted(trans, i, iter) {
- if (same_leaf_as_prev(trans, iter))
- continue;
-
- if (lock)
- bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
- else
- bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
- }
-}
-
static inline void btree_trans_sort_updates(struct btree_trans *trans)
{
struct btree_insert_entry *l, *r;
@@ -377,29 +361,6 @@ btree_key_can_insert(struct btree_trans *trans,
return BTREE_INSERT_OK;
}
-static int btree_trans_check_can_insert(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
-{
- struct btree_insert_entry *i;
- unsigned iter, u64s = 0;
- int ret;
-
- trans_for_each_update_sorted(trans, i, iter) {
- /* Multiple inserts might go to same leaf: */
- if (!same_leaf_as_prev(trans, iter))
- u64s = 0;
-
- u64s += i->k->k.u64s;
- ret = btree_key_can_insert(trans, i, &u64s);
- if (ret) {
- *stopped_at = i;
- return ret;
- }
- }
-
- return 0;
-}
-
static inline void do_btree_insert_one(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
@@ -450,6 +411,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
+ unsigned iter, u64s = 0;
+ bool marking = false;
int ret;
if (race_fault()) {
@@ -462,25 +425,28 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
* held, otherwise another thread could write the node changing the
* amount of space available:
*/
- ret = btree_trans_check_can_insert(trans, stopped_at);
- if (ret)
- return ret;
- trans_for_each_update(trans, i) {
- if (!btree_node_type_needs_gc(i->iter->btree_id))
- continue;
+ prefetch(&trans->c->journal.flags);
- if (!fs_usage) {
- percpu_down_read(&c->mark_lock);
- fs_usage = bch2_fs_usage_scratch_get(c);
- }
+ trans_for_each_update_sorted(trans, i, iter) {
+ /* Multiple inserts might go to same leaf: */
+ if (!same_leaf_as_prev(trans, iter))
+ u64s = 0;
- /* Must be called under mark_lock: */
- if (!bch2_bkey_replicas_marked_locked(c,
- bkey_i_to_s_c(i->k), true)) {
- ret = BTREE_INSERT_NEED_MARK_REPLICAS;
- goto err;
+ u64s += i->k->k.u64s;
+ ret = btree_key_can_insert(trans, i, &u64s);
+ if (ret) {
+ *stopped_at = i;
+ return ret;
}
+
+ if (btree_node_type_needs_gc(i->iter->btree_id))
+ marking = true;
+ }
+
+ if (marking) {
+ percpu_down_read(&c->mark_lock);
+ fs_usage = bch2_fs_usage_scratch_get(c);
}
/*
@@ -508,16 +474,20 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
i->k->k.version = MAX_VERSION;
}
+ /* Must be called under mark_lock: */
+ if (marking && trans->fs_usage_deltas &&
+ bch2_replicas_delta_list_apply(c, &fs_usage->u,
+ trans->fs_usage_deltas)) {
+ ret = BTREE_INSERT_NEED_MARK_REPLICAS;
+ goto err;
+ }
+
trans_for_each_update(trans, i)
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
update_has_nontrans_triggers(i))
bch2_mark_update(trans, i, &fs_usage->u, mark_flags);
- if (fs_usage && trans->fs_usage_deltas)
- bch2_replicas_delta_list_apply(c, &fs_usage->u,
- trans->fs_usage_deltas);
-
- if (fs_usage)
+ if (marking)
bch2_trans_fs_usage_apply(trans, fs_usage);
if (unlikely(c->gc_pos.phase))
@@ -526,7 +496,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
trans_for_each_update(trans, i)
do_btree_insert_one(trans, i);
err:
- if (fs_usage) {
+ if (marking) {
bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock);
}
@@ -609,9 +579,17 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
*/
btree_trans_sort_updates(trans);
- btree_trans_lock_write(trans, true);
+ trans_for_each_update_sorted(trans, i, idx)
+ if (!same_leaf_as_prev(trans, idx))
+ bch2_btree_node_lock_for_insert(trans->c,
+ i->iter->l[0].b, i->iter);
+
ret = bch2_trans_commit_write_locked(trans, stopped_at);
- btree_trans_lock_write(trans, false);
+
+ trans_for_each_update_sorted(trans, i, idx)
+ if (!same_leaf_as_prev(trans, idx))
+ bch2_btree_node_unlock_write_inlined(i->iter->l[0].b,
+ i->iter);
/*
* Drop journal reservation after dropping write locks, since dropping
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 0c2ca7601fde..c90c2d1b7706 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -498,14 +498,18 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c)
}
}
-static inline void update_replicas(struct bch_fs *c,
- struct bch_fs_usage *fs_usage,
- struct bch_replicas_entry *r,
- s64 sectors)
+static inline int update_replicas(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
+ struct bch_replicas_entry *r,
+ s64 sectors)
{
int idx = bch2_replicas_entry_idx(c, r);
- BUG_ON(idx < 0);
+ if (idx < 0)
+ return -1;
+
+ if (!fs_usage)
+ return 0;
switch (r->data_type) {
case BCH_DATA_BTREE:
@@ -519,6 +523,7 @@ static inline void update_replicas(struct bch_fs *c,
break;
}
fs_usage->replicas[idx] += sectors;
+ return 0;
}
static inline void update_cached_sectors(struct bch_fs *c,
@@ -579,14 +584,29 @@ static inline void update_cached_sectors_list(struct btree_trans *trans,
update_replicas_list(trans, &r.e, sectors);
}
-void bch2_replicas_delta_list_apply(struct bch_fs *c,
- struct bch_fs_usage *fs_usage,
- struct replicas_delta_list *r)
+static inline struct replicas_delta *
+replicas_delta_next(struct replicas_delta *d)
+{
+ return (void *) d + replicas_entry_bytes(&d->r) + 8;
+}
+
+int bch2_replicas_delta_list_apply(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
+ struct replicas_delta_list *r)
{
struct replicas_delta *d = r->d;
struct replicas_delta *top = (void *) r->d + r->used;
unsigned i;
+ for (d = r->d; d != top; d = replicas_delta_next(d))
+ if (update_replicas(c, fs_usage, &d->r, d->delta)) {
+ top = d;
+ goto unwind;
+ }
+
+ if (!fs_usage)
+ return 0;
+
fs_usage->nr_inodes += r->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
@@ -594,13 +614,11 @@ void bch2_replicas_delta_list_apply(struct bch_fs *c,
fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
}
- while (d != top) {
- BUG_ON((void *) d > (void *) top);
-
- update_replicas(c, fs_usage, &d->r, d->delta);
-
- d = (void *) d + replicas_entry_bytes(&d->r) + 8;
- }
+ return 0;
+unwind:
+ for (d = r->d; d != top; d = replicas_delta_next(d))
+ update_replicas(c, fs_usage, &d->r, -d->delta);
+ return -1;
}
#define do_mark_fn(fn, c, pos, flags, ...) \
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 5f91a57abc70..336729f763e1 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -279,9 +279,9 @@ int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
struct bch_fs_usage *, unsigned);
-void bch2_replicas_delta_list_apply(struct bch_fs *,
- struct bch_fs_usage *,
- struct replicas_delta_list *);
+int bch2_replicas_delta_list_apply(struct bch_fs *,
+ struct bch_fs_usage *,
+ struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
unsigned, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *,