diff options
author | Logan Gunthorpe <logang@deltatee.com> | 2022-07-27 15:05:56 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-08-02 21:08:53 -0600 |
commit | 5165ed40a1f0a3bf03526aad96df736556fbe64f (patch) | |
tree | 196364faeaaa36f25159d7be76e055daf4ce4550 /drivers/md/raid5.c | |
parent | c55ddd9082f757050183bd0e215aab3af3fc225f (diff) |
md/raid5: Refactor raid5_get_active_stripe()
Refactor the raid5_get_active_stripe() to read more linearly in
the order it's typically executed.
The init_stripe() call is called if a free stripe is found and the
function is exited early which removes a lot of if (sh) checks and
unindents the following code.
Remove the while loop in favour of the 'goto retry' pattern, which
reduces indentation further. And use a 'goto wait_for_stripe' instead
of an additional indent seeing it is the unusual path and this makes
the code easier to read.
No functional changes intended. Will make subsequent changes
in patches easier to understand.
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Song Liu <song@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 67 |
1 files changed, 36 insertions, 31 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9270a714cceb..97e8d4baf3fc 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -766,41 +766,46 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, spin_lock_irq(conf->hash_locks + hash); - do { - wait_event_lock_irq(conf->wait_for_quiescent, - conf->quiesce == 0 || noquiesce, - *(conf->hash_locks + hash)); - sh = find_get_stripe(conf, sector, conf->generation - previous, - hash); - if (sh) - break; +retry: + wait_event_lock_irq(conf->wait_for_quiescent, + conf->quiesce == 0 || noquiesce, + *(conf->hash_locks + hash)); + sh = find_get_stripe(conf, sector, conf->generation - previous, hash); + if (sh) + goto out; - if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { - sh = get_free_stripe(conf, hash); - if (!sh && !test_bit(R5_DID_ALLOC, &conf->cache_state)) - set_bit(R5_ALLOC_MORE, &conf->cache_state); - } - if (noblock && !sh) - break; + if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) + goto wait_for_stripe; + sh = get_free_stripe(conf, hash); + if (sh) { r5c_check_stripe_cache_usage(conf); - if (!sh) { - set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); - r5l_wake_reclaim(conf->log, 0); - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(conf->inactive_list + hash) && - (atomic_read(&conf->active_stripes) - < (conf->max_nr_stripes * 3 / 4) - || !test_bit(R5_INACTIVE_BLOCKED, - &conf->cache_state)), - *(conf->hash_locks + hash)); - clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); - } else { - init_stripe(sh, sector, previous); - atomic_inc(&sh->count); - } - } while (sh == NULL); + init_stripe(sh, sector, previous); + atomic_inc(&sh->count); + goto out; + } + if (!test_bit(R5_DID_ALLOC, &conf->cache_state)) + set_bit(R5_ALLOC_MORE, &conf->cache_state); + +wait_for_stripe: + if (noblock) + goto out; + + r5c_check_stripe_cache_usage(conf); + set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); + r5l_wake_reclaim(conf->log, 0); + wait_event_lock_irq(conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash) && + (atomic_read(&conf->active_stripes) + < (conf->max_nr_stripes * 3 / 4) + || !test_bit(R5_INACTIVE_BLOCKED, + &conf->cache_state)), + *(conf->hash_locks + hash)); + clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); + goto retry; + +out: spin_unlock_irq(conf->hash_locks + hash); return sh; } |