summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-10-01 21:53:30 +0000
committerPaul Burton <paul.burton@mips.com>2019-10-07 09:42:47 -0700
commit6bbe043bd3f4766b089b7b51a80e75745868c038 (patch)
tree4e2a4c6a1f357f3c92b6f5ed1ad3ee1f925562be
parent27aab27259aec1f200cf1f84f02b8192d27abe64 (diff)
MIPS: bitops: Implement test_and_set_bit() in terms of _lock variant
The only difference between test_and_set_bit() & test_and_set_bit_lock() is memory ordering barrier semantics - the former provides a full barrier whilst the latter only provides acquire semantics. We can therefore implement test_and_set_bit() in terms of test_and_set_bit_lock() with the addition of the extra memory barrier. Do this in order to avoid duplicating logic. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org Cc: Huacai Chen <chenhc@lemote.com> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> Cc: linux-kernel@vger.kernel.org
-rw-r--r--arch/mips/include/asm/bitops.h66
-rw-r--r--arch/mips/lib/bitops.c26
2 files changed, 13 insertions, 79 deletions
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 03532ae9f528..ea35a2e87b6d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -31,8 +31,6 @@
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
@@ -236,24 +234,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
}
/*
- * test_and_set_bit - Set a bit and return its old value
+ * test_and_set_bit_lock - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * This operation is atomic and implies acquire ordering semantics
+ * after the memory operation.
*/
-static inline int test_and_set_bit(unsigned long nr,
+static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long res, temp;
- smp_mb__before_llsc();
-
if (!kernel_uses_llsc) {
- res = __mips_test_and_set_bit(nr, addr);
+ res = __mips_test_and_set_bit_lock(nr, addr);
} else if (R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set push \n"
@@ -264,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: __LLSC_CLOBBER);
} else {
@@ -291,56 +287,20 @@ static inline int test_and_set_bit(unsigned long nr,
}
/*
- * test_and_set_bit_lock - Set a bit and return its old value
+ * test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and implies acquire ordering semantics
- * after the memory operation.
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
*/
-static inline int test_and_set_bit_lock(unsigned long nr,
+static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
- int bit = nr & SZLONG_MASK;
- unsigned long res, temp;
-
- if (!kernel_uses_llsc) {
- res = __mips_test_and_set_bit_lock(nr, addr);
- } else if (R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } else {
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- }
-
- smp_llsc_mb();
-
- return res != 0;
+ smp_mb__before_llsc();
+ return test_and_set_bit_lock(nr, addr);
}
+
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 3b2a1e78a543..fba402c0879d 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -78,32 +78,6 @@ EXPORT_SYMBOL(__mips_change_bit);
/**
- * __mips_test_and_set_bit - Set a bit and return its old value. This is
- * called by test_and_set_bit() if it cannot find a faster solution.
- * @nr: Bit to set
- * @addr: Address to count from
- */
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
- unsigned long mask;
- unsigned long flags;
- int res;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a) != 0;
- *a |= mask;
- raw_local_irq_restore(flags);
- return res;
-}
-EXPORT_SYMBOL(__mips_test_and_set_bit);
-
-
-/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set