diff options
author | Christoph Hellwig <hch@lst.de> | 2024-04-22 13:20:12 +0200 |
---|---|---|
committer | Chandan Babu R <chandanbabu@kernel.org> | 2024-04-22 18:00:47 +0530 |
commit | f30f656e25eb72c4309e76b16fa45062e183a2ee (patch) | |
tree | b097332fb140ac64fbaa8f7a0d5a66358e8c83e2 /fs/xfs/xfs_mount.c | |
parent | 5e1e4d4fc79c6e5f80864860208ceae27dead8a2 (diff) |
xfs: split xfs_mod_freecounter
xfs_mod_freecounter has two entirely separate code paths for adding or
subtracting from the free counters. Only the subtract case looks at the
rsvd flag and can return an error.
Split xfs_mod_freecounter into separate helpers for subtracting or
adding the freecounter, and remove all the impossible to reach error
handling for the addition case.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r-- | fs/xfs/xfs_mount.c | 67 |
1 files changed, 35 insertions, 32 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d37ba10f5fa3..b9df7cc9c473 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1136,16 +1136,44 @@ xfs_fs_writable( return true; } -/* Adjust m_fdblocks or m_frextents. */ +void +xfs_add_freecounter( + struct xfs_mount *mp, + struct percpu_counter *counter, + uint64_t delta) +{ + bool has_resv_pool = (counter == &mp->m_fdblocks); + uint64_t res_used; + + /* + * If the reserve pool is depleted, put blocks back into it first. + * Most of the time the pool is full. + */ + if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) { + percpu_counter_add(counter, delta); + return; + } + + spin_lock(&mp->m_sb_lock); + res_used = mp->m_resblks - mp->m_resblks_avail; + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + delta -= res_used; + mp->m_resblks_avail = mp->m_resblks; + percpu_counter_add(counter, delta); + } + spin_unlock(&mp->m_sb_lock); +} + int -xfs_mod_freecounter( +xfs_dec_freecounter( struct xfs_mount *mp, struct percpu_counter *counter, - int64_t delta, + uint64_t delta, bool rsvd) { int64_t lcounter; - long long res_used; uint64_t set_aside = 0; s32 batch; bool has_resv_pool; @@ -1155,31 +1183,6 @@ xfs_mod_freecounter( if (rsvd) ASSERT(has_resv_pool); - if (delta > 0) { - /* - * If the reserve pool is depleted, put blocks back into it - * first. Most of the time the pool is full. - */ - if (likely(!has_resv_pool || - mp->m_resblks == mp->m_resblks_avail)) { - percpu_counter_add(counter, delta); - return 0; - } - - spin_lock(&mp->m_sb_lock); - res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); - - if (res_used > delta) { - mp->m_resblks_avail += delta; - } else { - delta -= res_used; - mp->m_resblks_avail = mp->m_resblks; - percpu_counter_add(counter, delta); - } - spin_unlock(&mp->m_sb_lock); - return 0; - } - /* * Taking blocks away, need to be more accurate the closer we * are to zero. @@ -1207,7 +1210,7 @@ xfs_mod_freecounter( */ if (has_resv_pool) set_aside = xfs_fdblocks_unavailable(mp); - percpu_counter_add_batch(counter, delta, batch); + percpu_counter_add_batch(counter, -((int64_t)delta), batch); if (__percpu_counter_compare(counter, set_aside, XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ @@ -1219,11 +1222,11 @@ xfs_mod_freecounter( * that took us to ENOSPC. */ spin_lock(&mp->m_sb_lock); - percpu_counter_add(counter, -delta); + percpu_counter_add(counter, delta); if (!has_resv_pool || !rsvd) goto fdblocks_enospc; - lcounter = (long long)mp->m_resblks_avail + delta; + lcounter = (long long)mp->m_resblks_avail - delta; if (lcounter >= 0) { mp->m_resblks_avail = lcounter; spin_unlock(&mp->m_sb_lock); |