diff options
-rw-r--r-- | fs/bcachefs/btree_cache.c | 40 | ||||
-rw-r--r-- | fs/bcachefs/btree_cache.h | 5 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 155 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.h | 10 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 30 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 5 | ||||
-rw-r--r-- | fs/bcachefs/recovery.c | 4 |
11 files changed, 136 insertions, 132 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 5c12897964b6..354c75f59730 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -639,6 +639,7 @@ err: /* Slowpath, don't want it inlined into btree_iter_traverse() */ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, + struct btree_trans *trans, struct btree_iter *iter, const struct bkey_i *k, enum btree_id btree_id, @@ -655,8 +656,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, * Parent node must be locked, else we could read in a btree node that's * been freed: */ - if (iter && !bch2_btree_node_relock(iter, level + 1)) { - btree_trans_restart(iter->trans); + if (trans && !bch2_btree_node_relock(trans, iter, level + 1)) { + btree_trans_restart(trans); return ERR_PTR(-EINTR); } @@ -687,23 +688,23 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, six_unlock_intent(&b->c.lock); /* Unlock before doing IO: */ - if (iter && sync) - bch2_trans_unlock(iter->trans); + if (trans && sync) + bch2_trans_unlock(trans); bch2_btree_node_read(c, b, sync); if (!sync) return NULL; - if (iter && - (!bch2_trans_relock(iter->trans) || - !bch2_btree_iter_relock_intent(iter))) { - BUG_ON(!iter->trans->restarted); + if (trans && + (!bch2_trans_relock(trans) || + !bch2_btree_iter_relock_intent(trans, iter))) { + BUG_ON(!trans->restarted); return ERR_PTR(-EINTR); } if (!six_relock_type(&b->c.lock, lock_type, seq)) { - btree_trans_restart(iter->trans); + btree_trans_restart(trans); return ERR_PTR(-EINTR); } @@ -786,7 +787,7 @@ retry: * else we could read in a btree node from disk that's been * freed: */ - b = bch2_btree_node_fill(c, iter, k, iter->btree_id, + b = bch2_btree_node_fill(c, trans, iter, k, iter->btree_id, level, lock_type, true); /* We raced and found the btree node in the cache */ @@ -828,7 +829,7 @@ lock_node: if (btree_node_read_locked(iter, level + 1)) btree_node_unlock(iter, level + 1); - if (!btree_node_lock(b, k->k.p, level, iter, lock_type, + if (!btree_node_lock(trans, iter, b, k->k.p, level, lock_type, lock_node_check_fn, (void *) k, trace_ip)) { if (!trans->restarted) goto retry; @@ -839,7 +840,7 @@ lock_node: b->c.level != level || race_fault())) { six_unlock_type(&b->c.lock, lock_type); - if (bch2_btree_node_relock(iter, level + 1)) + if (bch2_btree_node_relock(trans, iter, level + 1)) goto retry; trace_trans_restart_btree_node_reused(trans->ip, @@ -863,9 +864,9 @@ lock_node: * should_be_locked is not set on this iterator yet, so we need * to relock it specifically: */ - if (iter && + if (trans && (!bch2_trans_relock(trans) || - !bch2_btree_iter_relock_intent(iter))) { + !bch2_btree_iter_relock_intent(trans, iter))) { BUG_ON(!trans->restarted); return ERR_PTR(-EINTR); } @@ -924,7 +925,7 @@ retry: if (nofill) goto out; - b = bch2_btree_node_fill(c, NULL, k, btree_id, + b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id, level, SIX_LOCK_read, true); /* We raced and found the btree node in the cache */ @@ -982,21 +983,24 @@ out: return b; } -int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, +int bch2_btree_node_prefetch(struct bch_fs *c, + struct btree_trans *trans, + struct btree_iter *iter, const struct bkey_i *k, enum btree_id btree_id, unsigned level) { struct btree_cache *bc = &c->btree_cache; struct btree *b; - BUG_ON(iter && !btree_node_locked(iter, level + 1)); + BUG_ON(trans && !btree_node_locked(iter, level + 1)); BUG_ON(level >= BTREE_MAX_DEPTH); b = btree_cache_find(bc, k); if (b) return 0; - b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false); + b = bch2_btree_node_fill(c, trans, iter, k, btree_id, + level, SIX_LOCK_read, false); return PTR_ERR_OR_ZERO(b); } diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index eb57dc3c70b7..3b671cf0056d 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -27,8 +27,9 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *, struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *, enum btree_id, unsigned, bool); -int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *, - const struct bkey_i *, enum btree_id, unsigned); +int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, + struct btree_iter *, const struct bkey_i *, + enum btree_id, unsigned); void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 70995d61dd49..16b9f6a986f4 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -107,17 +107,14 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans, bch2_btree_node_unlock_write_inlined(trans, iter, b); } -void __bch2_btree_node_lock_write(struct btree_trans *trans, - struct btree_iter *iter, struct btree *b) +void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) { - struct btree_iter *linked; + struct btree_iter *iter; unsigned readers = 0; - EBUG_ON(!btree_node_intent_locked(iter, b->c.level)); - - trans_for_each_iter(trans, linked) - if (linked->l[b->c.level].b == b && - btree_node_read_locked(linked, b->c.level)) + trans_for_each_iter(trans, iter) + if (iter->l[b->c.level].b == b && + btree_node_read_locked(iter, b->c.level)) readers++; /* @@ -141,7 +138,8 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, this_cpu_add(*b->c.lock.readers, readers); } -bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) +bool __bch2_btree_node_relock(struct btree_trans *trans, + struct btree_iter *iter, unsigned level) { struct btree *b = btree_iter_node(iter, level); int want = __btree_lock_want(iter, level); @@ -154,7 +152,7 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) || (btree_node_lock_seq_matches(iter, b, level) && - btree_node_lock_increment(iter->trans, b, level, want))) { + btree_node_lock_increment(trans, b, level, want))) { mark_btree_node_locked(iter, level, want); return true; } else { @@ -162,7 +160,8 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) } } -static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level) +static bool bch2_btree_node_upgrade(struct btree_trans *trans, + struct btree_iter *iter, unsigned level) { struct btree *b = iter->l[level].b; @@ -183,7 +182,7 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level) goto success; if (btree_node_lock_seq_matches(iter, b, level) && - btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) { + btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) { btree_node_unlock(iter, level); goto success; } @@ -206,8 +205,8 @@ static inline bool btree_iter_get_locks(struct btree_trans *trans, break; if (!(upgrade - ? bch2_btree_node_upgrade(iter, l) - : bch2_btree_node_relock(iter, l))) { + ? bch2_btree_node_upgrade(trans, iter, l) + : bch2_btree_node_relock(trans, iter, l))) { (upgrade ? trace_node_upgrade_fail : trace_node_relock_fail)(trans->ip, trace_ip, @@ -255,13 +254,13 @@ static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b, } /* Slowpath: */ -bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, - unsigned level, struct btree_iter *iter, +bool __bch2_btree_node_lock(struct btree_trans *trans, + struct btree_iter *iter, + struct btree *b, struct bpos pos, unsigned level, enum six_lock_type type, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) { - struct btree_trans *trans = iter->trans; struct btree_iter *linked, *deadlock_iter = NULL; u64 start_time = local_clock(); unsigned reason = 9; @@ -367,16 +366,10 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, /* Btree iterator locking: */ #ifdef CONFIG_BCACHEFS_DEBUG -static void bch2_btree_iter_verify_locks(struct btree_trans *trans, - struct btree_iter *iter) +static void bch2_btree_iter_verify_locks(struct btree_iter *iter) { unsigned l; - if (!(trans->iters_linked & (1ULL << iter->idx))) { - BUG_ON(iter->nodes_locked); - return; - } - for (l = 0; btree_iter_node(iter, l); l++) { if (iter->uptodate >= BTREE_ITER_NEED_RELOCK && !btree_node_locked(iter, l)) @@ -392,25 +385,24 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans) struct btree_iter *iter; trans_for_each_iter(trans, iter) - bch2_btree_iter_verify_locks(trans, iter); + bch2_btree_iter_verify_locks(iter); } #else -static inline void bch2_btree_iter_verify_locks(struct btree_trans *trans, - struct btree_iter *iter) {} +static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {} #endif /* * Only for btree_cache.c - only relocks intent locks */ -bool bch2_btree_iter_relock_intent(struct btree_iter *iter) +bool bch2_btree_iter_relock_intent(struct btree_trans *trans, + struct btree_iter *iter) { - struct btree_trans *trans = iter->trans; unsigned l; for (l = iter->level; l < iter->locks_want && btree_iter_node(iter, l); l++) { - if (!bch2_btree_node_relock(iter, l)) { + if (!bch2_btree_node_relock(trans, iter, l)) { trace_node_relock_fail(trans->ip, _RET_IP_, btree_iter_type(iter) == BTREE_ITER_CACHED, iter->btree_id, &iter->real_pos, @@ -441,10 +433,10 @@ static bool bch2_btree_iter_relock(struct btree_trans *trans, return ret; } -bool __bch2_btree_iter_upgrade(struct btree_iter *iter, +bool __bch2_btree_iter_upgrade(struct btree_trans *trans, + struct btree_iter *iter, unsigned new_locks_want) { - struct btree_trans *trans = iter->trans; struct btree_iter *linked; EBUG_ON(iter->locks_want >= new_locks_want); @@ -509,7 +501,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter, } } - bch2_btree_trans_verify_locks(iter->trans); + bch2_btree_iter_verify_locks(iter); } void bch2_trans_downgrade(struct btree_trans *trans) @@ -558,12 +550,13 @@ void bch2_trans_unlock(struct btree_trans *trans) #ifdef CONFIG_BCACHEFS_DEBUG -static void bch2_btree_iter_verify_cached(struct btree_iter *iter) +static void bch2_btree_iter_verify_cached(struct btree_trans *trans, + struct btree_iter *iter) { struct bkey_cached *ck; bool locked = btree_node_locked(iter, 0); - if (!bch2_btree_node_relock(iter, 0)) + if (!bch2_btree_node_relock(trans, iter, 0)) return; ck = (void *) iter->l[0].b; @@ -574,8 +567,8 @@ static void bch2_btree_iter_verify_cached(struct btree_iter *iter) btree_node_unlock(iter, 0); } -static void bch2_btree_iter_verify_level(struct btree_iter *iter, - unsigned level) +static void bch2_btree_iter_verify_level(struct btree_trans *trans, + struct btree_iter *iter, unsigned level) { struct btree_iter_level *l; struct btree_node_iter tmp; @@ -593,7 +586,7 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter, if (btree_iter_type(iter) == BTREE_ITER_CACHED) { if (!level) - bch2_btree_iter_verify_cached(iter); + bch2_btree_iter_verify_cached(trans, iter); return; } @@ -602,7 +595,7 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter, if (!btree_iter_node(iter, level)) return; - if (!bch2_btree_node_relock(iter, level)) + if (!bch2_btree_node_relock(trans, iter, level)) return; BUG_ON(!btree_iter_pos_in_node(iter, l->b)); @@ -692,10 +685,10 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) break; } - bch2_btree_iter_verify_level(iter, i); + bch2_btree_iter_verify_level(trans, iter, i); } - bch2_btree_iter_verify_locks(trans, iter); + bch2_btree_iter_verify_locks(iter); } static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) @@ -719,12 +712,13 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b) return; trans_for_each_iter_with_node(trans, b, iter) - bch2_btree_iter_verify_level(iter, b->c.level); + bch2_btree_iter_verify_level(trans, iter, b->c.level); } #else -static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {} +static inline void bch2_btree_iter_verify_level(struct btree_trans *trans, + struct btree_iter *iter, unsigned l) {} static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} @@ -771,7 +765,7 @@ void bch2_btree_iter_fix_key_modified(struct btree_trans *trans, trans_for_each_iter_with_node(trans, b, linked) { __bch2_btree_iter_fix_key_modified(linked, b, where); - bch2_btree_iter_verify_level(linked, b->c.level); + bch2_btree_iter_verify_level(trans, linked, b->c.level); } } @@ -896,7 +890,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, __bch2_btree_node_iter_fix(linked, b, &linked->l[b->c.level].iter, t, where, clobber_u64s, new_u64s); - bch2_btree_iter_verify_level(linked, b->c.level); + bch2_btree_iter_verify_level(trans, linked, b->c.level); } } @@ -983,7 +977,8 @@ static inline bool btree_iter_advance_to_pos(struct btree_iter *iter, /* * Verify that iterator for parent node points to child node: */ -static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) +static void btree_iter_verify_new_node(struct btree_trans *trans, + struct btree_iter *iter, struct btree *b) { struct btree_iter_level *l; unsigned plevel; @@ -999,7 +994,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) parent_locked = btree_node_locked(iter, plevel); - if (!bch2_btree_node_relock(iter, plevel)) + if (!bch2_btree_node_relock(trans, iter, plevel)) return; l = &iter->l[plevel]; @@ -1013,7 +1008,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) char buf4[100]; struct bkey uk = bkey_unpack_key(b, k); - bch2_dump_btree_node(iter->trans->c, l->b); + bch2_dump_btree_node(trans->c, l->b); bch2_bpos_to_text(&PBUF(buf1), iter->real_pos); bch2_bkey_to_text(&PBUF(buf2), &uk); bch2_bpos_to_text(&PBUF(buf3), b->data->min_key); @@ -1030,8 +1025,8 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) btree_node_unlock(iter, b->c.level + 1); } -static inline void __btree_iter_init(struct btree_iter *iter, - unsigned level) +static inline void __btree_iter_level_init(struct btree_iter *iter, + unsigned level) { struct btree_iter_level *l = &iter->l[level]; @@ -1047,19 +1042,20 @@ static inline void __btree_iter_init(struct btree_iter *iter, btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); } -static inline void btree_iter_node_set(struct btree_iter *iter, - struct btree *b) +static inline void btree_iter_level_init(struct btree_trans *trans, + struct btree_iter *iter, + struct btree *b) { BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED); - btree_iter_verify_new_node(iter, b); + btree_iter_verify_new_node(trans, iter, b); EBUG_ON(!btree_iter_pos_in_node(iter, b)); EBUG_ON(b->c.lock.state.seq & 1); iter->l[b->c.level].lock_seq = b->c.lock.state.seq; iter->l[b->c.level].b = b; - __btree_iter_init(iter, b->c.level); + __btree_iter_level_init(iter, b->c.level); } /* @@ -1088,7 +1084,7 @@ void bch2_btree_iter_node_replace(struct btree_trans *trans, mark_btree_node_locked(linked, b->c.level, (enum six_lock_type) t); } - btree_iter_node_set(linked, b); + btree_iter_level_init(trans, linked, b); } } @@ -1115,7 +1111,7 @@ void bch2_btree_iter_reinit_node(struct btree_trans *trans, struct btree_iter *linked; trans_for_each_iter_with_node(trans, b, linked) - __btree_iter_init(linked, b->c.level); + __btree_iter_level_init(linked, b->c.level); } static int lock_root_check_fn(struct six_lock *lock, void *p) @@ -1156,8 +1152,8 @@ static inline int btree_iter_lock_root(struct btree_trans *trans, } lock_type = __btree_lock_want(iter, iter->level); - if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level, - iter, lock_type, + if (unlikely(!btree_node_lock(trans, iter, b, SPOS_MAX, + iter->level, lock_type, lock_root_check_fn, rootp, trace_ip))) { if (trans->restarted) @@ -1175,7 +1171,7 @@ static inline int btree_iter_lock_root(struct btree_trans *trans, iter->l[i].b = NULL; mark_btree_node_locked(iter, iter->level, lock_type); - btree_iter_node_set(iter, b); + btree_iter_level_init(trans, iter, b); return 0; } @@ -1200,7 +1196,7 @@ static int btree_iter_prefetch(struct btree_trans *trans, struct btree_iter *ite bch2_bkey_buf_init(&tmp); while (nr && !ret) { - if (!bch2_btree_node_relock(iter, iter->level)) + if (!bch2_btree_node_relock(trans, iter, iter->level)) break; bch2_btree_node_iter_advance(&node_iter, l->b); @@ -1209,8 +1205,8 @@ static int btree_iter_prefetch(struct btree_trans *trans, struct btree_iter *ite break; bch2_bkey_buf_unpack(&tmp, c, l->b, k); - ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id, - iter->level - 1); + ret = bch2_btree_node_prefetch(c, trans, iter, tmp.k, + iter->btree_id, iter->level - 1); } if (!was_locked) @@ -1220,7 +1216,8 @@ static int btree_iter_prefetch(struct btree_trans *trans, struct btree_iter *ite return ret; } -static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, +static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, + struct btree_iter *iter, unsigned plevel, struct btree *b) { struct btree_iter_level *l = &iter->l[plevel]; @@ -1228,7 +1225,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, struct bkey_packed *k; struct bch_btree_ptr_v2 *bp; - if (!bch2_btree_node_relock(iter, plevel)) + if (!bch2_btree_node_relock(trans, iter, plevel)) return; k = bch2_btree_node_iter_peek_all(&l->iter, l->b); @@ -1265,11 +1262,11 @@ static __always_inline int btree_iter_down(struct btree_trans *trans, goto err; mark_btree_node_locked(iter, level, lock_type); - btree_iter_node_set(iter, b); + btree_iter_level_init(trans, iter, b); if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 && unlikely(b != btree_node_mem_ptr(tmp.k))) - btree_node_mem_ptr_set(iter, level + 1, b); + btree_node_mem_ptr_set(trans, iter, level + 1, b); if (iter->flags & BTREE_ITER_PREFETCH) ret = btree_iter_prefetch(trans, iter); @@ -1278,7 +1275,7 @@ static __always_inline int btree_iter_down(struct btree_trans *trans, btree_node_unlock(iter, level + 1); iter->level = level; - bch2_btree_iter_verify_locks(trans, iter); + bch2_btree_iter_verify_locks(iter); err: bch2_bkey_buf_exit(&tmp, c); return ret; @@ -1310,9 +1307,9 @@ retry_all: if (prev) { if (iter->btree_id == prev->btree_id && iter->locks_want < prev->locks_want) - __bch2_btree_iter_upgrade(iter, prev->locks_want); + __bch2_btree_iter_upgrade(trans, iter, prev->locks_want); else if (!iter->locks_want && prev->locks_want) - __bch2_btree_iter_upgrade(iter, 1); + __bch2_btree_iter_upgrade(trans, iter, 1); } prev = iter; @@ -1377,11 +1374,12 @@ static int bch2_btree_iter_traverse_all(struct btree_trans *trans) return __btree_iter_traverse_all(trans, 0, _RET_IP_); } -static inline bool btree_iter_good_node(struct btree_iter *iter, +static inline bool btree_iter_good_node(struct btree_trans *trans, + struct btree_iter *iter, unsigned l, int check_pos) { if (!is_btree_node(iter, l) || - !bch2_btree_node_relock(iter, l)) + !bch2_btree_node_relock(trans, iter, l)) return false; if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b)) @@ -1391,13 +1389,14 @@ static inline bool btree_iter_good_node(struct btree_iter *iter, return true; } -static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter, +static inline unsigned btree_iter_up_until_good_node(struct btree_trans *trans, + struct btree_iter *iter, int check_pos) { unsigned l = iter->level; while (btree_iter_node(iter, l) && - !btree_iter_good_node(iter, l, check_pos)) { + !btree_iter_good_node(trans, iter, l, check_pos)) { btree_node_unlock(iter, l); iter->l[l].b = BTREE_ITER_NO_NODE_UP; l++; @@ -1432,20 +1431,20 @@ static int btree_iter_traverse_one(struct btree_trans *trans, } if (btree_iter_type(iter) == BTREE_ITER_CACHED) { - ret = bch2_btree_iter_traverse_cached(iter); + ret = bch2_btree_iter_traverse_cached(trans, iter); goto out; } if (unlikely(iter->level >= BTREE_MAX_DEPTH)) goto out; - iter->level = btree_iter_up_until_good_node(iter, 0); + iter->level = btree_iter_up_until_good_node(trans, iter, 0); /* If we need intent locks, take them too: */ for (l = iter->level + 1; l < iter->locks_want && btree_iter_node(iter, l); l++) - if (!bch2_btree_node_relock(iter, l)) + if (!bch2_btree_node_relock(trans, iter, l)) while (iter->level <= l) { btree_node_unlock(iter, iter->level); iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP; @@ -1657,7 +1656,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p return; } - l = btree_iter_up_until_good_node(iter, cmp); + l = btree_iter_up_until_good_node(trans, iter, cmp); if (btree_iter_node(iter, l)) { /* @@ -1668,7 +1667,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p */ if (cmp < 0 || !btree_iter_advance_to_pos(iter, &iter->l[l], 8)) - __btree_iter_init(iter, l); + __btree_iter_level_init(iter, l); /* Don't leave it locked if we're not supposed to: */ if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED) diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index ea129387ebb7..a175eb5f26fd 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -141,7 +141,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_iter *, struct btree *, struct btree_node_iter *, struct bkey_packed *, unsigned, unsigned); -bool bch2_btree_iter_relock_intent(struct btree_iter *); +bool bch2_btree_iter_relock_intent(struct btree_trans *, struct btree_iter *); bool bch2_trans_relock(struct btree_trans *); void bch2_trans_unlock(struct btree_trans *); @@ -154,15 +154,17 @@ static inline int btree_trans_restart(struct btree_trans *trans) return -EINTR; } -bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned); +bool __bch2_btree_iter_upgrade(struct btree_trans *, + struct btree_iter *, unsigned); -static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter, +static inline bool bch2_btree_iter_upgrade(struct btree_trans *trans, + struct btree_iter *iter, unsigned new_locks_want) { new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); return iter->locks_want < new_locks_want - ? __bch2_btree_iter_upgrade(iter, new_locks_want) + ? __bch2_btree_iter_upgrade(trans, iter, new_locks_want) : iter->uptodate <= BTREE_ITER_NEED_PEEK; } diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index c7d223f91bf6..ba03581c5290 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -213,7 +213,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, if (ret) goto err; - if (!bch2_btree_node_relock(ck_iter, 0)) { + if (!bch2_btree_node_relock(trans, ck_iter, 0)) { trace_transaction_restart_ip(trans->ip, _THIS_IP_); ret = btree_trans_restart(trans); goto err; @@ -266,9 +266,8 @@ static int bkey_cached_check_fn(struct six_lock *lock, void *p) } __flatten -int bch2_btree_iter_traverse_cached(struct btree_iter *iter) +int bch2_btree_iter_traverse_cached(struct btree_trans *trans, struct btree_iter *iter) { - struct btree_trans *trans = iter->trans; struct bch_fs *c = trans->c; struct bkey_cached *ck; int ret = 0; @@ -277,7 +276,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter) iter->l[1].b = NULL; - if (bch2_btree_node_relock(iter, 0)) { + if (bch2_btree_node_relock(trans, iter, 0)) { ck = (void *) iter->l[0].b; goto fill; } @@ -302,7 +301,7 @@ retry: } else { enum six_lock_type lock_want = __btree_lock_want(iter, 0); - if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want, + if (!btree_node_lock(trans, iter, (void *) ck, iter->pos, 0, lock_want, bkey_cached_check_fn, iter, _THIS_IP_)) { if (!trans->restarted) goto retry; @@ -326,7 +325,7 @@ retry: fill: if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) { if (!iter->locks_want && - !!__bch2_btree_iter_upgrade(iter, 1)) { + !!__bch2_btree_iter_upgrade(trans, iter, 1)) { trace_transaction_restart_ip(trans->ip, _THIS_IP_); BUG_ON(!trans->restarted); ret = -EINTR; @@ -344,7 +343,7 @@ fill: iter->uptodate = BTREE_ITER_NEED_PEEK; if ((iter->flags & BTREE_ITER_INTENT) && - !bch2_btree_iter_upgrade(iter, 1)) { + !bch2_btree_iter_upgrade(trans, iter, 1)) { BUG_ON(!trans->restarted); ret = -EINTR; } diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h index 7e2b0a08f745..d890632e4425 100644 --- a/fs/bcachefs/btree_key_cache.h +++ b/fs/bcachefs/btree_key_cache.h @@ -26,7 +26,7 @@ int bch2_btree_key_cache_journal_flush(struct journal *, struct bkey_cached * bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos); -int bch2_btree_iter_traverse_cached(struct btree_iter *); +int bch2_btree_iter_traverse_cached(struct btree_trans *, struct btree_iter *); bool bch2_btree_insert_key_cached(struct btree_trans *, struct btree_iter *, struct bkey_i *); diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 0acc731df8e9..b490e4808631 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -166,40 +166,38 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans, return false; } -bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned, - struct btree_iter *, enum six_lock_type, - six_lock_should_sleep_fn, void *, - unsigned long); +bool __bch2_btree_node_lock(struct btree_trans *, struct btree_iter *, + struct btree *, struct bpos, unsigned, + enum six_lock_type, six_lock_should_sleep_fn, + void *, unsigned long); -static inline bool btree_node_lock(struct btree *b, - struct bpos pos, unsigned level, +static inline bool btree_node_lock(struct btree_trans *trans, struct btree_iter *iter, + struct btree *b, struct bpos pos, unsigned level, enum six_lock_type type, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) { - struct btree_trans *trans = iter->trans; - EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx))); return likely(six_trylock_type(&b->c.lock, type)) || btree_node_lock_increment(trans, b, level, type) || - __bch2_btree_node_lock(b, pos, level, iter, type, + __bch2_btree_node_lock(trans, iter, b, pos, level, type, should_sleep_fn, p, ip); } -bool __bch2_btree_node_relock(struct btree_iter *, unsigned); +bool __bch2_btree_node_relock(struct btree_trans *, struct btree_iter *, unsigned); -static inline bool bch2_btree_node_relock(struct btree_iter *iter, - unsigned level) +static inline bool bch2_btree_node_relock(struct btree_trans *trans, + struct btree_iter *iter, unsigned level) { EBUG_ON(btree_node_locked(iter, level) && btree_node_locked_type(iter, level) != __btree_lock_want(iter, level)); return likely(btree_node_locked(iter, level)) || - __bch2_btree_node_relock(iter, level); + __bch2_btree_node_relock(trans, iter, level); } /* @@ -224,8 +222,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_ite void bch2_btree_node_unlock_write(struct btree_trans *, struct btree_iter *, struct btree *); -void __bch2_btree_node_lock_write(struct btree_trans *, - struct btree_iter *, struct btree *); +void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *); static inline void bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_iter *iter, @@ -233,9 +230,10 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans, { EBUG_ON(iter->l[b->c.level].b != b); EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq); + EBUG_ON(!btree_node_intent_locked(iter, b->c.level)); if (unlikely(!six_trylock_write(&b->c.lock))) - __bch2_btree_node_lock_write(trans, iter, b); + __bch2_btree_node_lock_write(trans, b); } #endif /* _BCACHEFS_BTREE_LOCKING_H */ diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 4acd49900611..c1d4227738cf 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -937,7 +937,7 @@ retry: * XXX: figure out how far we might need to split, * instead of locking/reserving all the way to the root: */ - if (!bch2_btree_iter_upgrade(iter, U8_MAX)) { + if (!bch2_btree_iter_upgrade(trans, iter, U8_MAX)) { trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_, iter->btree_id, &iter->real_pos); diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 07046dab614b..13b3a1bf0f4f 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -132,7 +132,7 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) return 0; - if (!bch2_btree_node_relock(iter, level)) + if (!bch2_btree_node_relock(trans, iter, level)) return 0; b = iter->l[level].b; diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index 92b6b5cec2ae..e93db33fcfb7 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -561,7 +561,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, */ trans_for_each_iter(trans, iter) if (iter->nodes_locked != iter->nodes_intent_locked && - !bch2_btree_iter_upgrade(iter, 1)) { + !bch2_btree_iter_upgrade(trans, iter, 1)) { trace_trans_restart_upgrade(trans->ip, trace_ip, iter->btree_id, &iter->real_pos); @@ -783,7 +783,8 @@ int __bch2_trans_commit(struct btree_trans *trans) trans_for_each_update(trans, i) { BUG_ON(!i->iter->should_be_locked); - if (unlikely(!bch2_btree_iter_upgrade(i->iter, i->level + 1))) { + if (unlikely(!bch2_btree_iter_upgrade(trans, i->iter, + i->level + 1))) { trace_trans_restart_upgrade(trans->ip, _RET_IP_, i->iter->btree_id, &i->iter->pos); diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index b02af94f4037..71b0f14f41f3 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -326,8 +326,8 @@ static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b, (k = bch2_btree_and_journal_iter_peek(&iter)).k) { bch2_bkey_buf_reassemble(&tmp, c, k); - bch2_btree_node_prefetch(c, NULL, tmp.k, - b->c.btree_id, b->c.level - 1); + bch2_btree_node_prefetch(c, NULL, NULL, tmp.k, + b->c.btree_id, b->c.level - 1); bch2_btree_and_journal_iter_advance(&iter); i++; |