From 6006c0d8ce9441dd1363bf14f18a8e28d3588460 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 9 Oct 2012 11:00:24 +0100 Subject: metag: Atomics, locks and bitops Add header files to implement Meta hardware thread locks (used by some other atomic operations), atomics, spinlocks, and bitops. There are 2 main types of atomic primitives for metag (in addition to IRQs off on UP): - LOCK instructions provide locking between hardware threads. - LNKGET/LNKSET instructions provide load-linked/store-conditional operations allowing for lighter weight atomics on Meta2 LOCK instructions allow for hardware threads to acquire voluntary or exclusive hardware thread locks: - LOCK0 releases exclusive and voluntary lock from the running hardware thread. - LOCK1 acquires the voluntary hardware lock, blocking until it becomes available. - LOCK2 implies LOCK1, and additionally acquires the exclusive hardware lock, blocking all other hardware threads from executing. Signed-off-by: James Hogan --- arch/metag/include/asm/atomic.h | 53 +++++++ arch/metag/include/asm/atomic_lnkget.h | 234 +++++++++++++++++++++++++++++ arch/metag/include/asm/atomic_lock1.h | 160 ++++++++++++++++++++ arch/metag/include/asm/bitops.h | 132 ++++++++++++++++ arch/metag/include/asm/cmpxchg.h | 65 ++++++++ arch/metag/include/asm/cmpxchg_irq.h | 42 ++++++ arch/metag/include/asm/cmpxchg_lnkget.h | 86 +++++++++++ arch/metag/include/asm/cmpxchg_lock1.h | 48 ++++++ arch/metag/include/asm/global_lock.h | 100 +++++++++++++ arch/metag/include/asm/spinlock.h | 22 +++ arch/metag/include/asm/spinlock_lnkget.h | 249 +++++++++++++++++++++++++++++++ arch/metag/include/asm/spinlock_lock1.h | 184 +++++++++++++++++++++++ arch/metag/include/asm/spinlock_types.h | 20 +++ 13 files changed, 1395 insertions(+) create mode 100644 arch/metag/include/asm/atomic.h create mode 100644 arch/metag/include/asm/atomic_lnkget.h create mode 100644 arch/metag/include/asm/atomic_lock1.h create mode 100644 arch/metag/include/asm/bitops.h create mode 100644 arch/metag/include/asm/cmpxchg.h create mode 100644 arch/metag/include/asm/cmpxchg_irq.h create mode 100644 arch/metag/include/asm/cmpxchg_lnkget.h create mode 100644 arch/metag/include/asm/cmpxchg_lock1.h create mode 100644 arch/metag/include/asm/global_lock.h create mode 100644 arch/metag/include/asm/spinlock.h create mode 100644 arch/metag/include/asm/spinlock_lnkget.h create mode 100644 arch/metag/include/asm/spinlock_lock1.h create mode 100644 arch/metag/include/asm/spinlock_types.h diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h new file mode 100644 index 000000000000..307ecd2bd9a1 --- /dev/null +++ b/arch/metag/include/asm/atomic.h @@ -0,0 +1,53 @@ +#ifndef __ASM_METAG_ATOMIC_H +#define __ASM_METAG_ATOMIC_H + +#include +#include +#include + +#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) +/* The simple UP case. */ +#include +#else + +#if defined(CONFIG_METAG_ATOMICITY_LOCK1) +#include +#else +#include +#endif + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) + +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_dec(v) atomic_sub(1, (v)) + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif + +#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) + +#include + +#endif /* __ASM_METAG_ATOMIC_H */ diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h new file mode 100644 index 000000000000..d2e60a18986c --- /dev/null +++ b/arch/metag/include/asm/atomic_lnkget.h @@ -0,0 +1,234 @@ +#ifndef __ASM_METAG_ATOMIC_LNKGET_H +#define __ASM_METAG_ATOMIC_LNKGET_H + +#define ATOMIC_INIT(i) { (i) } + +#define atomic_set(v, i) ((v)->counter = (i)) + +#include + +#include + +/* + * None of these asm statements clobber memory as LNKSET writes around + * the cache so the memory it modifies cannot safely be read by any means + * other than these accessors. + */ + +static inline int atomic_read(const atomic_t *v) +{ + int temp; + + asm volatile ( + "LNKGETD %0, [%1]\n" + : "=da" (temp) + : "da" (&v->counter)); + + return temp; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + int temp; + + asm volatile ( + "1: LNKGETD %0, [%1]\n" + " ADD %0, %0, %2\n" + " LNKSETD [%1], %0\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp) + : "da" (&v->counter), "bd" (i) + : "cc"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + int temp; + + asm volatile ( + "1: LNKGETD %0, [%1]\n" + " SUB %0, %0, %2\n" + " LNKSETD [%1], %0\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp) + : "da" (&v->counter), "bd" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + int result, temp; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " ADD %1, %1, %3\n" + " LNKSETD [%2], %1\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp), "=&da" (result) + : "da" (&v->counter), "bd" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + int result, temp; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " SUB %1, %1, %3\n" + " LNKSETD [%2], %1\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp), "=&da" (result) + : "da" (&v->counter), "bd" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + int temp; + + asm volatile ( + "1: LNKGETD %0, [%1]\n" + " AND %0, %0, %2\n" + " LNKSETD [%1] %0\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp) + : "da" (&v->counter), "bd" (~mask) + : "cc"); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + int temp; + + asm volatile ( + "1: LNKGETD %0, [%1]\n" + " OR %0, %0, %2\n" + " LNKSETD [%1], %0\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp) + : "da" (&v->counter), "bd" (mask) + : "cc"); +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int result, temp; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " CMP %1, %3\n" + " LNKSETDEQ [%2], %4\n" + " BNE 2f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + "2:\n" + : "=&d" (temp), "=&d" (result) + : "da" (&v->counter), "bd" (old), "da" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic_xchg(atomic_t *v, int new) +{ + int temp, old; + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " LNKSETD [%2], %3\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (temp), "=&d" (old) + : "da" (&v->counter), "da" (new) + : "cc"); + + return old; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int result, temp; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " CMP %1, %3\n" + " ADD %0, %1, %4\n" + " LNKSETDNE [%2], %0\n" + " BEQ 2f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + "2:\n" + : "=&d" (temp), "=&d" (result) + : "da" (&v->counter), "bd" (u), "bd" (a) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic_sub_if_positive(int i, atomic_t *v) +{ + int result, temp; + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " SUBS %1, %1, %3\n" + " LNKSETDGE [%2], %1\n" + " BLT 2f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + "2:\n" + : "=&d" (temp), "=&da" (result) + : "da" (&v->counter), "bd" (i) + : "cc"); + + return result; +} + +#endif /* __ASM_METAG_ATOMIC_LNKGET_H */ diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h new file mode 100644 index 000000000000..e578955e674b --- /dev/null +++ b/arch/metag/include/asm/atomic_lock1.h @@ -0,0 +1,160 @@ +#ifndef __ASM_METAG_ATOMIC_LOCK1_H +#define __ASM_METAG_ATOMIC_LOCK1_H + +#define ATOMIC_INIT(i) { (i) } + +#include + +#include +#include + +static inline int atomic_read(const atomic_t *v) +{ + return (v)->counter; +} + +/* + * atomic_set needs to be take the lock to protect atomic_add_unless from a + * possible race, as it reads the counter twice: + * + * CPU0 CPU1 + * atomic_add_unless(1, 0) + * ret = v->counter (non-zero) + * if (ret != u) v->counter = 0 + * v->counter += 1 (counter set to 1) + * + * Making atomic_set take the lock ensures that ordering and logical + * consistency is preserved. + */ +static inline int atomic_set(atomic_t *v, int i) +{ + unsigned long flags; + + __global_lock1(flags); + fence(); + v->counter = i; + __global_unlock1(flags); + return i; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long flags; + + __global_lock1(flags); + fence(); + v->counter += i; + __global_unlock1(flags); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long flags; + + __global_lock1(flags); + fence(); + v->counter -= i; + __global_unlock1(flags); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long result; + unsigned long flags; + + __global_lock1(flags); + result = v->counter; + result += i; + fence(); + v->counter = result; + __global_unlock1(flags); + + return result; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long result; + unsigned long flags; + + __global_lock1(flags); + result = v->counter; + result -= i; + fence(); + v->counter = result; + __global_unlock1(flags); + + return result; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + __global_lock1(flags); + fence(); + v->counter &= ~mask; + __global_unlock1(flags); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + __global_lock1(flags); + fence(); + v->counter |= mask; + __global_unlock1(flags); +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + __global_lock1(flags); + ret = v->counter; + if (ret == old) { + fence(); + v->counter = new; + } + __global_unlock1(flags); + + return ret; +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + __global_lock1(flags); + ret = v->counter; + if (ret != u) { + fence(); + v->counter += a; + } + __global_unlock1(flags); + + return ret; +} + +static inline int atomic_sub_if_positive(int i, atomic_t *v) +{ + int ret; + unsigned long flags; + + __global_lock1(flags); + ret = v->counter - 1; + if (ret >= 0) { + fence(); + v->counter = ret; + } + __global_unlock1(flags); + + return ret; +} + +#endif /* __ASM_METAG_ATOMIC_LOCK1_H */ diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h new file mode 100644 index 000000000000..c0d0df0d1378 --- /dev/null +++ b/arch/metag/include/asm/bitops.h @@ -0,0 +1,132 @@ +#ifndef __ASM_METAG_BITOPS_H +#define __ASM_METAG_BITOPS_H + +#include +#include +#include + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +#ifdef CONFIG_SMP +/* + * These functions are the basis of our bit ops. + */ +static inline void set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + fence(); + *p |= mask; + __global_unlock1(flags); +} + +static inline void clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + fence(); + *p &= ~mask; + __global_unlock1(flags); +} + +static inline void change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + fence(); + *p ^= mask; + __global_unlock1(flags); +} + +static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long old; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + old = *p; + if (!(old & mask)) { + fence(); + *p = old | mask; + } + __global_unlock1(flags); + + return (old & mask) != 0; +} + +static inline int test_and_clear_bit(unsigned int bit, + volatile unsigned long *p) +{ + unsigned long flags; + unsigned long old; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + old = *p; + if (old & mask) { + fence(); + *p = old & ~mask; + } + __global_unlock1(flags); + + return (old & mask) != 0; +} + +static inline int test_and_change_bit(unsigned int bit, + volatile unsigned long *p) +{ + unsigned long flags; + unsigned long old; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __global_lock1(flags); + fence(); + old = *p; + *p = old ^ mask; + __global_unlock1(flags); + + return (old & mask) != 0; +} + +#else +#include +#endif /* CONFIG_SMP */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif /* __ASM_METAG_BITOPS_H */ diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h new file mode 100644 index 000000000000..b1bc1be8540f --- /dev/null +++ b/arch/metag/include/asm/cmpxchg.h @@ -0,0 +1,65 @@ +#ifndef __ASM_METAG_CMPXCHG_H +#define __ASM_METAG_CMPXCHG_H + +#include + +#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) +#include +#elif defined(CONFIG_METAG_ATOMICITY_LOCK1) +#include +#elif defined(CONFIG_METAG_ATOMICITY_LNKGET) +#include +#endif + +extern void __xchg_called_with_bad_pointer(void); + +#define __xchg(ptr, x, size) \ +({ \ + unsigned long __xchg__res; \ + volatile void *__xchg_ptr = (ptr); \ + switch (size) { \ + case 4: \ + __xchg__res = xchg_u32(__xchg_ptr, x); \ + break; \ + case 1: \ + __xchg__res = xchg_u8(__xchg_ptr, x); \ + break; \ + default: \ + __xchg_called_with_bad_pointer(); \ + __xchg__res = x; \ + break; \ + } \ + \ + __xchg__res; \ +}) + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr)))) + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define __HAVE_ARCH_CMPXCHG 1 + +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ + }) + +#endif /* __ASM_METAG_CMPXCHG_H */ diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h new file mode 100644 index 000000000000..649573168b05 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_irq.h @@ -0,0 +1,42 @@ +#ifndef __ASM_METAG_CMPXCHG_IRQ_H +#define __ASM_METAG_CMPXCHG_IRQ_H + +#include + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val & 0xff; + local_irq_restore(flags); + return retval; +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + __u32 retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + return retval; +} + +#endif /* __ASM_METAG_CMPXCHG_IRQ_H */ diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h new file mode 100644 index 000000000000..0154e2807ebb --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lnkget.h @@ -0,0 +1,86 @@ +#ifndef __ASM_METAG_CMPXCHG_LNKGET_H +#define __ASM_METAG_CMPXCHG_LNKGET_H + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + int temp, old; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " LNKSETD [%2], %3\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" +#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE + " DCACHE [%2], %0\n" +#endif + : "=&d" (temp), "=&d" (old) + : "da" (m), "da" (val) + : "cc" + ); + + smp_mb(); + + return old; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + int temp, old; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " LNKSETD [%2], %3\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" +#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE + " DCACHE [%2], %0\n" +#endif + : "=&d" (temp), "=&d" (old) + : "da" (m), "da" (val & 0xff) + : "cc" + ); + + smp_mb(); + + return old; +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + __u32 retval, temp; + + smp_mb(); + + asm volatile ( + "1: LNKGETD %1, [%2]\n" + " CMP %1, %3\n" + " LNKSETDEQ [%2], %4\n" + " BNE 2f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" +#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE + " DCACHE [%2], %0\n" +#endif + "2:\n" + : "=&d" (temp), "=&da" (retval) + : "da" (m), "bd" (old), "da" (new) + : "cc" + ); + + smp_mb(); + + return retval; +} + +#endif /* __ASM_METAG_CMPXCHG_LNKGET_H */ diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h new file mode 100644 index 000000000000..fd6850474969 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lock1.h @@ -0,0 +1,48 @@ +#ifndef __ASM_METAG_CMPXCHG_LOCK1_H +#define __ASM_METAG_CMPXCHG_LOCK1_H + +#include + +/* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */ + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + unsigned long flags, retval; + + __global_lock2(flags); + fence(); + retval = *m; + *m = val; + __global_unlock2(flags); + return retval; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + unsigned long flags, retval; + + __global_lock2(flags); + fence(); + retval = *m; + *m = val & 0xff; + __global_unlock2(flags); + return retval; +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + __u32 retval; + unsigned long flags; + + __global_lock2(flags); + retval = *m; + if (retval == old) { + fence(); + *m = new; + } + __global_unlock2(flags); + return retval; +} + +#endif /* __ASM_METAG_CMPXCHG_LOCK1_H */ diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h new file mode 100644 index 000000000000..fc831c88c22a --- /dev/null +++ b/arch/metag/include/asm/global_lock.h @@ -0,0 +1,100 @@ +#ifndef __ASM_METAG_GLOBAL_LOCK_H +#define __ASM_METAG_GLOBAL_LOCK_H + +#include + +/** + * __global_lock1() - Acquire global voluntary lock (LOCK1). + * @flags: Variable to store flags into. + * + * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable + * all triggers so we cannot be interrupted, and to enforce a compiler barrier + * so that the compiler cannot reorder memory accesses across the lock. + * + * No other hardware thread will be able to acquire the voluntary or exclusive + * locks until the voluntary lock is released with @__global_unlock1, but they + * may continue to execute as long as they aren't trying to acquire either of + * the locks. + */ +#define __global_lock1(flags) do { \ + unsigned int __trval; \ + asm volatile("MOV %0,#0\n\t" \ + "SWAP %0,TXMASKI\n\t" \ + "LOCK1" \ + : "=r" (__trval) \ + : \ + : "memory"); \ + (flags) = __trval; \ +} while (0) + +/** + * __global_unlock1() - Release global voluntary lock (LOCK1). + * @flags: Variable to restore flags from. + * + * Releases the Meta global voluntary lock (LOCK1) acquired with + * @__global_lock1, also taking care to re-enable triggers, and to enforce a + * compiler barrier so that the compiler cannot reorder memory accesses across + * the unlock. + * + * This immediately allows another hardware thread to acquire the voluntary or + * exclusive locks. + */ +#define __global_unlock1(flags) do { \ + unsigned int __trval = (flags); \ + asm volatile("LOCK0\n\t" \ + "MOV TXMASKI,%0" \ + : \ + : "r" (__trval) \ + : "memory"); \ +} while (0) + +/** + * __global_lock2() - Acquire global exclusive lock (LOCK2). + * @flags: Variable to store flags into. + * + * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2), + * also taking care to disable all triggers so we cannot be interrupted, to take + * the atomic lock (system event) and to enforce a compiler barrier so that the + * compiler cannot reorder memory accesses across the lock. + * + * No other hardware thread will be able to execute code until the locks are + * released with @__global_unlock2. + */ +#define __global_lock2(flags) do { \ + unsigned int __trval; \ + unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ + asm volatile("MOV %0,#0\n\t" \ + "SWAP %0,TXMASKI\n\t" \ + "LOCK2\n\t" \ + "SETD [%1+#0x40],D1RtP" \ + : "=r&" (__trval) \ + : "u" (__aloc_hi) \ + : "memory"); \ + (flags) = __trval; \ +} while (0) + +/** + * __global_unlock2() - Release global exclusive lock (LOCK2). + * @flags: Variable to restore flags from. + * + * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock + * acquired with @__global_lock2, also taking care to release the atomic lock + * (system event), re-enable triggers, and to enforce a compiler barrier so that + * the compiler cannot reorder memory accesses across the unlock. + * + * This immediately allows other hardware threads to continue executing and one + * of them to acquire locks. + */ +#define __global_unlock2(flags) do { \ + unsigned int __trval = (flags); \ + unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ + asm volatile("SETD [%1+#0x00],D1RtP\n\t" \ + "LOCK0\n\t" \ + "MOV TXMASKI,%0" \ + : \ + : "r" (__trval), \ + "u" (__alock_hi) \ + : "memory"); \ +} while (0) + +#endif /* __ASM_METAG_GLOBAL_LOCK_H */ diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h new file mode 100644 index 000000000000..86a7cf3d1386 --- /dev/null +++ b/arch/metag/include/asm/spinlock.h @@ -0,0 +1,22 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#ifdef CONFIG_METAG_ATOMICITY_LOCK1 +#include +#else +#include +#endif + +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() + +#endif /* __ASM_SPINLOCK_H */ diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h new file mode 100644 index 000000000000..ad8436feed8d --- /dev/null +++ b/arch/metag/include/asm/spinlock_lnkget.h @@ -0,0 +1,249 @@ +#ifndef __ASM_SPINLOCK_LNKGET_H +#define __ASM_SPINLOCK_LNKGET_H + +/* + * None of these asm statements clobber memory as LNKSET writes around + * the cache so the memory it modifies cannot safely be read by any means + * other than these accessors. + */ + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + int ret; + + asm volatile ("LNKGETD %0, [%1]\n" + "TST %0, #1\n" + "MOV %0, #1\n" + "XORZ %0, %0, %0\n" + : "=&d" (ret) + : "da" (&lock->lock) + : "cc"); + return ret; +} + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + int tmp; + + asm volatile ("1: LNKGETD %0,[%1]\n" + " TST %0, #1\n" + " ADD %0, %0, #1\n" + " LNKSETDZ [%1], %0\n" + " BNZ 1b\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (tmp) + : "da" (&lock->lock) + : "cc"); + + smp_mb(); +} + +/* Returns 0 if failed to acquire lock */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + int tmp; + + asm volatile (" LNKGETD %0,[%1]\n" + " TST %0, #1\n" + " ADD %0, %0, #1\n" + " LNKSETDZ [%1], %0\n" + " BNZ 1f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " MOV %0, #1\n" + "1: XORNZ %0, %0, %0\n" + : "=&d" (tmp) + : "da" (&lock->lock) + : "cc"); + + smp_mb(); + + return tmp; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + asm volatile (" SETD [%0], %1\n" + : + : "da" (&lock->lock), "da" (0) + : "memory"); +} + +/* + * RWLOCKS + * + * + * Write locks are easy - we just set bit 31. When unlocking, we can + * just write zero since the lock is exclusively held. + */ + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + int tmp; + + asm volatile ("1: LNKGETD %0,[%1]\n" + " CMP %0, #0\n" + " ADD %0, %0, %2\n" + " LNKSETDZ [%1], %0\n" + " BNZ 1b\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (tmp) + : "da" (&rw->lock), "bd" (0x80000000) + : "cc"); + + smp_mb(); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + int tmp; + + asm volatile (" LNKGETD %0,[%1]\n" + " CMP %0, #0\n" + " ADD %0, %0, %2\n" + " LNKSETDZ [%1], %0\n" + " BNZ 1f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " MOV %0,#1\n" + "1: XORNZ %0, %0, %0\n" + : "=&d" (tmp) + : "da" (&rw->lock), "bd" (0x80000000) + : "cc"); + + smp_mb(); + + return tmp; +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + smp_mb(); + + asm volatile (" SETD [%0], %1\n" + : + : "da" (&rw->lock), "da" (0) + : "memory"); +} + +/* write_can_lock - would write_trylock() succeed? */ +static inline int arch_write_can_lock(arch_rwlock_t *rw) +{ + int ret; + + asm volatile ("LNKGETD %0, [%1]\n" + "CMP %0, #0\n" + "MOV %0, #1\n" + "XORNZ %0, %0, %0\n" + : "=&d" (ret) + : "da" (&rw->lock) + : "cc"); + return ret; +} + +/* + * Read locks are a bit more hairy: + * - Exclusively load the lock value. + * - Increment it. + * - Store new lock value if positive, and we still own this location. + * If the value is negative, we've already failed. + * - If we failed to store the value, we want a negative result. + * - If we failed, try again. + * Unlocking is similarly hairy. We may have multiple read locks + * currently active. However, we know we won't have any write + * locks. + */ +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + int tmp; + + asm volatile ("1: LNKGETD %0,[%1]\n" + " ADDS %0, %0, #1\n" + " LNKSETDPL [%1], %0\n" + " BMI 1b\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (tmp) + : "da" (&rw->lock) + : "cc"); + + smp_mb(); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + int tmp; + + smp_mb(); + + asm volatile ("1: LNKGETD %0,[%1]\n" + " SUB %0, %0, #1\n" + " LNKSETD [%1], %0\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " BNZ 1b\n" + : "=&d" (tmp) + : "da" (&rw->lock) + : "cc", "memory"); +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + int tmp; + + asm volatile (" LNKGETD %0,[%1]\n" + " ADDS %0, %0, #1\n" + " LNKSETDPL [%1], %0\n" + " BMI 1f\n" + " DEFR %0, TXSTAT\n" + " ANDT %0, %0, #HI(0x3f000000)\n" + " CMPT %0, #HI(0x02000000)\n" + " MOV %0,#1\n" + " BZ 2f\n" + "1: MOV %0,#0\n" + "2:\n" + : "=&d" (tmp) + : "da" (&rw->lock) + : "cc"); + + smp_mb(); + + return tmp; +} + +/* read_can_lock - would read_trylock() succeed? */ +static inline int arch_read_can_lock(arch_rwlock_t *rw) +{ + int tmp; + + asm volatile ("LNKGETD %0, [%1]\n" + "CMP %0, %2\n" + "MOV %0, #1\n" + "XORZ %0, %0, %0\n" + : "=&d" (tmp) + : "da" (&rw->lock), "bd" (0x80000000) + : "cc"); + return tmp; +} + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() + +#endif /* __ASM_SPINLOCK_LNKGET_H */ diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h new file mode 100644 index 000000000000..c630444cffe9 --- /dev/null +++ b/arch/metag/include/asm/spinlock_lock1.h @@ -0,0 +1,184 @@ +#ifndef __ASM_SPINLOCK_LOCK1_H +#define __ASM_SPINLOCK_LOCK1_H + +#include +#include + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + int ret; + + barrier(); + ret = lock->lock; + WARN_ON(ret != 0 && ret != 1); + return ret; +} + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int we_won = 0; + unsigned long flags; + +again: + __global_lock1(flags); + if (lock->lock == 0) { + fence(); + lock->lock = 1; + we_won = 1; + } + __global_unlock1(flags); + if (we_won == 0) + goto again; + WARN_ON(lock->lock != 1); +} + +/* Returns 0 if failed to acquire lock */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned long flags; + unsigned int ret; + + __global_lock1(flags); + ret = lock->lock; + if (ret == 0) { + fence(); + lock->lock = 1; + } + __global_unlock1(flags); + return (ret == 0); +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + barrier(); + WARN_ON(!lock->lock); + lock->lock = 0; +} + +/* + * RWLOCKS + * + * + * Write locks are easy - we just set bit 31. When unlocking, we can + * just write zero since the lock is exclusively held. + */ + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned long flags; + unsigned int we_won = 0; + +again: + __global_lock1(flags); + if (rw->lock == 0) { + fence(); + rw->lock = 0x80000000; + we_won = 1; + } + __global_unlock1(flags); + if (we_won == 0) + goto again; + WARN_ON(rw->lock != 0x80000000); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned long flags; + unsigned int ret; + + __global_lock1(flags); + ret = rw->lock; + if (ret == 0) { + fence(); + rw->lock = 0x80000000; + } + __global_unlock1(flags); + + return (ret == 0); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + barrier(); + WARN_ON(rw->lock != 0x80000000); + rw->lock = 0; +} + +/* write_can_lock - would write_trylock() succeed? */ +static inline int arch_write_can_lock(arch_rwlock_t *rw) +{ + unsigned int ret; + + barrier(); + ret = rw->lock; + return (ret == 0); +} + +/* + * Read locks are a bit more hairy: + * - Exclusively load the lock value. + * - Increment it. + * - Store new lock value if positive, and we still own this location. + * If the value is negative, we've already failed. + * - If we failed to store the value, we want a negative result. + * - If we failed, try again. + * Unlocking is similarly hairy. We may have multiple read locks + * currently active. However, we know we won't have any write + * locks. + */ +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned long flags; + unsigned int we_won = 0, ret; + +again: + __global_lock1(flags); + ret = rw->lock; + if (ret < 0x80000000) { + fence(); + rw->lock = ret + 1; + we_won = 1; + } + __global_unlock1(flags); + if (!we_won) + goto again; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned long flags; + unsigned int ret; + + __global_lock1(flags); + fence(); + ret = rw->lock--; + __global_unlock1(flags); + WARN_ON(ret == 0); +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned long flags; + unsigned int ret; + + __global_lock1(flags); + ret = rw->lock; + if (ret < 0x80000000) { + fence(); + rw->lock = ret + 1; + } + __global_unlock1(flags); + return (ret < 0x80000000); +} + +/* read_can_lock - would read_trylock() succeed? */ +static inline int arch_read_can_lock(arch_rwlock_t *rw) +{ + unsigned int ret; + + barrier(); + ret = rw->lock; + return (ret < 0x80000000); +} + +#endif /* __ASM_SPINLOCK_LOCK1_H */ diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h new file mode 100644 index 000000000000..b76391405fea --- /dev/null +++ b/arch/metag/include/asm/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef _ASM_METAG_SPINLOCK_TYPES_H +#define _ASM_METAG_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int lock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif /* _ASM_METAG_SPINLOCK_TYPES_H */ -- cgit v1.2.3-58-ga151