diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2024-04-19 22:44:12 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2024-05-08 17:29:20 -0400 |
commit | 103304021e54bfb5cab9ba04cd5ef0dc2bf33888 (patch) | |
tree | 290f599143bec9f28307836245a152dd19db992b | |
parent | b25fd02ab4edb0193e60754626704adacbfbeefb (diff) |
bcachefs: Move gc of bucket.oldest_gen to workqueue
This is a nice cleanup - and we've also been having problems with
kthread creation in the mount path.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/alloc_background.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs.h | 5 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 82 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.h | 12 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 10 | ||||
-rw-r--r-- | fs/bcachefs/sysfs.c | 11 |
7 files changed, 22 insertions, 102 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index f07373b78174..6fa51ee16cc3 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -874,7 +874,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, bch2_do_invalidates(c); if (statechange(a->data_type == BCH_DATA_need_gc_gens)) - bch2_do_gc_gens(c); + bch2_gc_gens_async(c); } if ((flags & BTREE_TRIGGER_gc) && diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index fb8825c4e7ad..6cb878f5e5eb 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -541,7 +541,7 @@ again: bch2_do_discards(c); if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) - bch2_do_gc_gens(c); + bch2_gc_gens_async(c); if (should_invalidate_buckets(ca, *usage)) bch2_do_invalidates(c); diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 7562446f2d2a..6e324c1fe924 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -718,6 +718,7 @@ struct btree_trans_buf { x(discard_fast) \ x(invalidate) \ x(delete_dead_snapshots) \ + x(gc_gens) \ x(snapshot_delete_pagecache) \ x(sysfs) \ x(btree_write_buffer) @@ -960,8 +961,7 @@ struct bch_fs { struct work_struct discard_fast_work; /* GARBAGE COLLECTION */ - struct task_struct *gc_thread; - atomic_t kick_gc; + struct work_struct gc_gens_work; unsigned long gc_count; enum btree_id gc_gens_btree; @@ -1118,7 +1118,6 @@ struct bch_fs { u64 counters_on_mount[BCH_COUNTER_NR]; u64 __percpu *counters; - unsigned btree_gc_periodic:1; unsigned copy_gc_enabled:1; bool promote_whole_extents; diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 7549e806a6f4..919fa1b9fc2e 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -1669,6 +1669,9 @@ static int gc_btree_gens_key(struct btree_trans *trans, struct bkey_i *u; int ret; + if (unlikely(test_bit(BCH_FS_going_ro, &c->flags))) + return -EROFS; + percpu_down_read(&c->mark_lock); bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev); @@ -1802,80 +1805,23 @@ err: return ret; } -static int bch2_gc_thread(void *arg) +static void bch2_gc_gens_work(struct work_struct *work) { - struct bch_fs *c = arg; - struct io_clock *clock = &c->io_clock[WRITE]; - unsigned long last = atomic64_read(&clock->now); - unsigned last_kick = atomic_read(&c->kick_gc); - - set_freezable(); - - while (1) { - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - - if (kthread_should_stop()) { - __set_current_state(TASK_RUNNING); - return 0; - } - - if (atomic_read(&c->kick_gc) != last_kick) - break; - - if (c->btree_gc_periodic) { - unsigned long next = last + c->capacity / 16; - - if (atomic64_read(&clock->now) >= next) - break; - - bch2_io_clock_schedule_timeout(clock, next); - } else { - schedule(); - } - - try_to_freeze(); - } - __set_current_state(TASK_RUNNING); - - last = atomic64_read(&clock->now); - last_kick = atomic_read(&c->kick_gc); - - bch2_gc_gens(c); - debug_check_no_locks_held(); - } - - return 0; + struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work); + bch2_gc_gens(c); + bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens); } -void bch2_gc_thread_stop(struct bch_fs *c) +void bch2_gc_gens_async(struct bch_fs *c) { - struct task_struct *p; - - p = c->gc_thread; - c->gc_thread = NULL; - - if (p) { - kthread_stop(p); - put_task_struct(p); - } + if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) && + !queue_work(c->write_ref_wq, &c->gc_gens_work)) + bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens); } -int bch2_gc_thread_start(struct bch_fs *c) +void bch2_fs_gc_init(struct bch_fs *c) { - struct task_struct *p; - - if (c->gc_thread) - return 0; + seqcount_init(&c->gc_pos_lock); - p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name); - if (IS_ERR(p)) { - bch_err_fn(c, PTR_ERR(p)); - return PTR_ERR(p); - } - - get_task_struct(p); - c->gc_thread = p; - wake_up_process(p); - return 0; + INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work); } diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h index 0d6c0a2df613..15315aab93bd 100644 --- a/fs/bcachefs/btree_gc.h +++ b/fs/bcachefs/btree_gc.h @@ -7,9 +7,6 @@ int bch2_check_topology(struct bch_fs *); int bch2_check_allocations(struct bch_fs *); -int bch2_gc_gens(struct bch_fs *); -void bch2_gc_thread_stop(struct bch_fs *); -int bch2_gc_thread_start(struct bch_fs *); /* * For concurrent mark and sweep (with other index updates), we define a total @@ -104,11 +101,8 @@ static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos) return ret; } -static inline void bch2_do_gc_gens(struct bch_fs *c) -{ - atomic_inc(&c->kick_gc); - if (c->gc_thread) - wake_up_process(c->gc_thread); -} +int bch2_gc_gens(struct bch_fs *); +void bch2_gc_gens_async(struct bch_fs *); +void bch2_fs_gc_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_GC_H */ diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index adad2a703629..36833f86d50d 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -264,7 +264,6 @@ static void __bch2_fs_read_only(struct bch_fs *c) bch2_open_buckets_stop(c, NULL, true); bch2_rebalance_stop(c); bch2_copygc_stop(c); - bch2_gc_thread_stop(c); bch2_fs_ec_flush(c); bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu", @@ -486,12 +485,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) } #endif - ret = bch2_gc_thread_start(c); - if (ret) { - bch_err(c, "error starting gc thread"); - return ret; - } - ret = bch2_journal_reclaim_start(&c->journal); if (ret) goto err; @@ -780,6 +773,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) for (i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_init(&c->times[i]); + bch2_fs_gc_init(c); bch2_fs_copygc_init(c); bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); bch2_fs_btree_iter_init_early(c); @@ -810,8 +804,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) INIT_LIST_HEAD(&c->fsck_error_msgs); mutex_init(&c->fsck_error_msgs_lock); - seqcount_init(&c->gc_pos_lock); - seqcount_init(&c->usage_lock); sema_init(&c->io_in_flight, 128); diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 24d4c8ef25a5..43edda74d3cb 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -142,7 +142,6 @@ write_attribute(trigger_invalidates); write_attribute(trigger_journal_flush); write_attribute(prune_cache); write_attribute(btree_wakeup); -rw_attribute(btree_gc_periodic); rw_attribute(gc_gens_pos); read_attribute(uuid); @@ -408,8 +407,6 @@ SHOW(bch2_fs) if (attr == &sysfs_btree_write_stats) bch2_btree_write_stats_to_text(out, c); - sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); - if (attr == &sysfs_gc_gens_pos) bch2_gc_gens_pos_to_text(out, c); @@ -485,14 +482,6 @@ STORE(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); - if (attr == &sysfs_btree_gc_periodic) { - ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic) - ?: (ssize_t) size; - - wake_up_process(c->gc_thread); - return ret; - } - if (attr == &sysfs_copy_gc_enabled) { ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) ?: (ssize_t) size; |