diff options
author | Yu Kuai <yukuai3@huawei.com> | 2024-06-11 21:22:45 +0800 |
---|---|---|
committer | Song Liu <song@kernel.org> | 2024-06-12 16:27:50 +0000 |
commit | df79234bdc3f441bec99dfc8199b6f2c673203ed (patch) | |
tree | b872d79565bc1982cabc19e09f53541da32a6469 /drivers/md | |
parent | c8ecfe680c371db2a6d125de5d6bc2398950e9cf (diff) |
md: remove parameter check_seq for stop_sync_thread()
Caller will always set MD_RECOVERY_FROZEN if check_seq is true, and
always clear MD_RECOVERY_FROZEN if check_seq is false, hence replace
the parameter with test_bit() to make code cleaner.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240611132251.1967786-7-yukuai1@huaweicloud.com
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/md.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index d035cd52e49a..44cb18ec1c52 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4961,15 +4961,10 @@ action_show(struct mddev *mddev, char *page) * @locked: if set, reconfig_mutex will still be held after this function * return; if not set, reconfig_mutex will be released after this * function return. - * @check_seq: if set, only wait for curent running sync_thread to stop, noted - * that new sync_thread can still start. */ -static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) +static void stop_sync_thread(struct mddev *mddev, bool locked) { - int sync_seq; - - if (check_seq) - sync_seq = atomic_read(&mddev->sync_seq); + int sync_seq = atomic_read(&mddev->sync_seq); if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { if (!locked) @@ -4990,7 +4985,8 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - (check_seq && sync_seq != atomic_read(&mddev->sync_seq))); + (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) && + sync_seq != atomic_read(&mddev->sync_seq))); if (locked) mddev_lock_nointr(mddev); @@ -5001,7 +4997,7 @@ void md_idle_sync_thread(struct mddev *mddev) lockdep_assert_held(&mddev->reconfig_mutex); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, true); + stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_idle_sync_thread); @@ -5010,7 +5006,7 @@ void md_frozen_sync_thread(struct mddev *mddev) lockdep_assert_held(&mddev->reconfig_mutex); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_frozen_sync_thread); @@ -5035,7 +5031,7 @@ static void idle_sync_thread(struct mddev *mddev) return; } - stop_sync_thread(mddev, false, true); + stop_sync_thread(mddev, false); mutex_unlock(&mddev->sync_mutex); } @@ -5049,7 +5045,7 @@ static void frozen_sync_thread(struct mddev *mddev) return; } - stop_sync_thread(mddev, false, false); + stop_sync_thread(mddev, false); mutex_unlock(&mddev->sync_mutex); } @@ -6544,7 +6540,7 @@ void md_stop_writes(struct mddev *mddev) { mddev_lock_nointr(mddev); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -6612,7 +6608,7 @@ static int md_set_readonly(struct mddev *mddev) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } - stop_sync_thread(mddev, false, false); + stop_sync_thread(mddev, false); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); @@ -6658,7 +6654,7 @@ static int do_md_stop(struct mddev *mddev, int mode) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); if (mddev->sysfs_active || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { |