summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2023-03-15 17:31:03 -0700
committerDarrick J. Wong <djwong@kernel.org>2023-03-19 10:02:04 -0700
commite9b60c7f97130795c7aa81a649ae4b93a172a277 (patch)
tree10a41593e7fb0e16e113c35d2075f30f9751a45c /lib
parent7ba85fba47bd89618fdb7dc322bdf823b1b56efb (diff)
pcpcntr: remove percpu_counter_sum_all()
percpu_counter_sum_all() is now redundant as the race condition it was invented to handle is now dealt with by percpu_counter_sum() directly and all users of percpu_counter_sum_all() have been removed. Remove it. This effectively reverts the changes made in f689054aace2 ("percpu_counter: add percpu_counter_sum_all interface") except for the cpumask iteration that fixes percpu_counter_sum() made earlier in this series. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_counter.c40
1 files changed, 11 insertions, 29 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 0e096311e0c0..5004463c4f9f 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -122,23 +122,6 @@ void percpu_counter_sync(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(percpu_counter_sync);
-static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
- const struct cpumask *cpu_mask)
-{
- s64 ret;
- int cpu;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&fbc->lock, flags);
- ret = fbc->count;
- for_each_cpu_or(cpu, cpu_online_mask, cpu_mask) {
- s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
- raw_spin_unlock_irqrestore(&fbc->lock, flags);
- return ret;
-}
-
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive().
@@ -153,22 +136,21 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
+ s64 ret;
+ int cpu;
+ unsigned long flags;
- return __percpu_counter_sum_mask(fbc, cpu_dying_mask);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ ret = fbc->count;
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
-/*
- * This is slower version of percpu_counter_sum as it traverses all possible
- * cpus. Use this only in the cases where accurate data is needed in the
- * presense of CPUs getting offlined.
- */
-s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
- return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
-}
-EXPORT_SYMBOL(percpu_counter_sum_all);
-
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{