From bfc18e389c7a09fbbbed6bf4032396685b14246e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 21 Jun 2018 13:13:04 +0100 Subject: atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless() While __atomic_add_unless() was originally intended as a building-block for atomic_add_unless(), it's now used in a number of places around the kernel. It's the only common atomic operation named __atomic*(), rather than atomic_*(), and for consistency it would be better named atomic_fetch_add_unless(). This lack of consistency is slightly confusing, and gets in the way of scripting atomics. Given that, let's clean things up and promote it to an official part of the atomics API, in the form of atomic_fetch_add_unless(). This patch converts definitions and invocations over to the new name, including the instrumented version, using the following script: ---- git grep -w __atomic_add_unless | while read line; do sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}"; done git grep -w __arch_atomic_add_unless | while read line; do sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}"; done ---- Note that we do not have atomic{64,_long}_fetch_add_unless(), which will be introduced by later patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Acked-by: Geert Uytterhoeven Acked-by: Peter Zijlstra (Intel) Acked-by: Palmer Dabbelt Cc: Boqun Feng Cc: Linus Torvalds Cc: Thomas Gleixner Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com Signed-off-by: Ingo Molnar --- arch/alpha/include/asm/atomic.h | 4 ++-- arch/arc/include/asm/atomic.h | 4 ++-- arch/arm/include/asm/atomic.h | 4 ++-- arch/arm64/include/asm/atomic.h | 2 +- arch/h8300/include/asm/atomic.h | 2 +- arch/hexagon/include/asm/atomic.h | 4 ++-- arch/ia64/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 4 ++-- arch/openrisc/include/asm/atomic.h | 4 ++-- arch/parisc/include/asm/atomic.h | 4 ++-- arch/powerpc/include/asm/atomic.h | 8 ++++---- arch/riscv/include/asm/atomic.h | 4 ++-- arch/s390/include/asm/atomic.h | 2 +- arch/sh/include/asm/atomic.h | 4 ++-- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 2 +- arch/sparc/lib/atomic32.c | 4 ++-- arch/x86/include/asm/atomic.h | 4 ++-- arch/xtensa/include/asm/atomic.h | 4 ++-- 20 files changed, 35 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 767bfdd42992..392b15a4dd4f 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, new, old; smp_mb(); diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 11859287c52a..67121b5ff3a3 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -309,7 +309,7 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #undef ATOMIC_OP /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -317,7 +317,7 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v */ -#define __atomic_add_unless(v, a, u) \ +#define atomic_fetch_add_unless(v, a, u) \ ({ \ int c, old; \ \ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 66d0e215a773..9d56d0727c9b 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int oldval, newval; unsigned long tmp; @@ -215,7 +215,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..264d20339f74 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -125,7 +125,7 @@ #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) +#define atomic_fetch_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot /* diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index b174dec099bf..5c856887fdf2 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -94,7 +94,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; h8300flags flags; diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index fb3dfb2a667e..287aa9f394f3 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -164,7 +164,7 @@ ATOMIC_OPS(xor) #undef ATOMIC_OP /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer to value * @a: amount to add * @u: unless value is equal to u @@ -173,7 +173,7 @@ ATOMIC_OPS(xor) * */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int __oldval; register int tmp; diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 2524fb60fbc2..9d2ddde5f9d5 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -215,7 +215,7 @@ ATOMIC64_FETCH_OP(xor, ^) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index e993e2860ee1..8022d9ea1213 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -211,7 +211,7 @@ static inline int atomic_add_negative(int i, atomic_t *v) return c != 0; } -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0ab176bdb8e8..02fc1553cf9b 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -275,7 +275,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -283,7 +283,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index 146e1660f00e..b589fac39b92 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -100,7 +100,7 @@ ATOMIC_OP(xor) * * This is often used through atomic_inc_not_zero() */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int old, tmp; @@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return old; } -#define __atomic_add_unless __atomic_add_unless +#define atomic_fetch_add_unless atomic_fetch_add_unless #include diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 88bae6676c9b..7748abced766 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -78,7 +78,7 @@ static __inline__ int atomic_read(const atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -86,7 +86,7 @@ static __inline__ int atomic_read(const atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 682b3e6a1e21..1483261080a1 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -218,7 +218,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -226,13 +226,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%1 # __atomic_add_unless\n\ +"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ cmpw 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" @@ -538,7 +538,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%1 # __atomic_add_unless\n\ +"1: ldarx %0,0,%1 # atomic_fetch_add_unless\n\ cmpd 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 855115ace98c..739e810c857e 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -332,7 +332,7 @@ ATOMIC_OP(dec_and_test, dec, ==, 0, 64) #undef ATOMIC_OP /* This is required to provide a full barrier on success. */ -static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; @@ -381,7 +381,7 @@ static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u) */ static __always_inline int atomic_inc_not_zero(atomic_t *v) { - return __atomic_add_unless(v, 1, 0); + return atomic_fetch_add_unless(v, 1, 0); } #ifndef CONFIG_GENERIC_ATOMIC64 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 4b55532f15c4..c2858cdd8c29 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -90,7 +90,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return __atomic_cmpxchg(&v->counter, old, new); } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 0fd0099f43cc..ef45931ebac5 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -46,7 +46,7 @@ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -54,7 +54,7 @@ * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index d13ce517f4b9..a58f4b43bcc7 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -27,7 +27,7 @@ int atomic_fetch_or(int, atomic_t *); int atomic_fetch_xor(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); int atomic_xchg(atomic_t *, int); -int __atomic_add_unless(atomic_t *, int, int); +int atomic_fetch_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); #define atomic_set_release(v, i) atomic_set((v), (i)) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 28db058d471b..f416fd3d2708 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -89,7 +89,7 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 465a901a0ada..281fa634bb1a 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new) } EXPORT_SYMBOL(atomic_cmpxchg); -int __atomic_add_unless(atomic_t *v, int a, int u) +int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; @@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u) spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } -EXPORT_SYMBOL(__atomic_add_unless); +EXPORT_SYMBOL(atomic_fetch_add_unless); /* Atomic operations are already serializing */ void atomic_set(atomic_t *v, int i) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 0db6bec95489..84ed0bd76aef 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -254,7 +254,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v) } /** - * __arch_atomic_add_unless - add unless the number is already a given value + * arch_atomic_fetch_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -262,7 +262,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns the old value of @v. */ -static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = arch_atomic_read(v); diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e7a23f2a519a..4188e56c06c9 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -275,7 +275,7 @@ ATOMIC_OPS(xor) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -283,7 +283,7 @@ ATOMIC_OPS(xor) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); -- cgit v1.2.3-58-ga151