diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-08-12 11:44:30 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-12 11:44:30 +0200 |
commit | f52609fdabb1c726f8b9fdf64df3161781aa186e (patch) | |
tree | 3b7d4a47505eddb097bab848a8ffdc4bbbf79bb7 /arch | |
parent | 20f9ed1568c00bbd9e6af31341d25e06bc3d4a16 (diff) | |
parent | 41b9e9fcc1c44b84a785115058ce9c703e3fca6e (diff) |
Merge branch 'locking/arch-atomic' into locking/core, because it's ready for upstream
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
52 files changed, 696 insertions, 916 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 8f8eafbedd7c..e8c956098424 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -29,13 +29,13 @@ * branch back to restart the operation. */ -#define ATOMIC_OP(op) \ +#define ATOMIC_OP(op, asm_op) \ static __inline__ void atomic_##op(int i, atomic_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC_OP_RETURN(op) \ +#define ATOMIC_OP_RETURN(op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%3,%2\n" \ - " " #op "l %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return result; \ } -#define ATOMIC64_OP(op) \ +#define ATOMIC64_OP(op, asm_op) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC64_OP_RETURN(op) \ +#define ATOMIC64_OP_RETURN(op, asm_op) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%3,%2\n" \ - " " #op "q %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ return result; \ } -#define ATOMIC_OPS(opg) \ - ATOMIC_OP(opg) \ - ATOMIC_OP_RETURN(opg) \ - ATOMIC64_OP(opg) \ - ATOMIC64_OP_RETURN(opg) +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##l) \ + ATOMIC_OP_RETURN(op, op##l) \ + ATOMIC64_OP(op, op##q) \ + ATOMIC64_OP_RETURN(op, op##q) ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define atomic_andnot atomic_andnot +#define atomic64_andnot atomic64_andnot + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, bis) +ATOMIC_OP(xor, xor) +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, bis) +ATOMIC64_OP(xor, xor) + #undef ATOMIC_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03484cb4d16d..d8a85e706fba 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -143,9 +143,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -ATOMIC_OP(and, &=, and) -#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) +#define atomic_andnot atomic_andnot + +ATOMIC_OP(and, &=, and) +ATOMIC_OP(andnot, &= ~, bic) +ATOMIC_OP(or, |=, or) +ATOMIC_OP(xor, ^=, xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index e22c11970b7b..82b75a7cb762 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -194,6 +194,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +#define atomic_andnot atomic_andnot + +ATOMIC_OP(and, &=, and) +ATOMIC_OP(andnot, &= ~, bic) +ATOMIC_OP(or, |=, orr) +ATOMIC_OP(xor, ^=, eor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -321,6 +328,13 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) +#define atomic64_andnot atomic64_andnot + +ATOMIC64_OP(and, and, and) +ATOMIC64_OP(andnot, bic, bic) +ATOMIC64_OP(or, orr, orr) +ATOMIC64_OP(xor, eor, eor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 7047051ded40..866a71fca9a3 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, sub) +#define atomic_andnot atomic_andnot + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, orr) +ATOMIC_OP(xor, eor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \ ATOMIC64_OPS(add, add) ATOMIC64_OPS(sub, sub) +#define atomic64_andnot atomic64_andnot + +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, orr) +ATOMIC64_OP(xor, eor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 2d07ce1c5327..97c9bdf83409 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OP_RETURN(sub, sub, rKs21) ATOMIC_OP_RETURN(add, add, r) +#define ATOMIC_OP(op, asm_op) \ +ATOMIC_OP_RETURN(op, asm_op, r) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + (void)__atomic_##op##_return(i, v); \ +} + +ATOMIC_OP(and, and) +ATOMIC_OP(or, or) +ATOMIC_OP(xor, eor) + +#undef ATOMIC_OP #undef ATOMIC_OP_RETURN /* diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index a107a98e9978..1c1c42330c99 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -16,19 +16,21 @@ #include <linux/types.h> asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); -asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); -asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); + +asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); +asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) -#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) -#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) +#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) +#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) -#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) -#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) +#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) +#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) +#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) #endif diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index c446591b961d..a401c27b69b4 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl); EXPORT_SYMBOL(insl_16); #ifdef CONFIG_SMP -EXPORT_SYMBOL(__raw_atomic_update_asm); -EXPORT_SYMBOL(__raw_atomic_clear_asm); -EXPORT_SYMBOL(__raw_atomic_set_asm); +EXPORT_SYMBOL(__raw_atomic_add_asm); +EXPORT_SYMBOL(__raw_atomic_and_asm); +EXPORT_SYMBOL(__raw_atomic_or_asm); EXPORT_SYMBOL(__raw_atomic_xor_asm); EXPORT_SYMBOL(__raw_atomic_test_asm); + EXPORT_SYMBOL(__raw_xchg_1_asm); EXPORT_SYMBOL(__raw_xchg_2_asm); EXPORT_SYMBOL(__raw_xchg_4_asm); diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S index 2a08df8e8c4c..26fccb5568b9 100644 --- a/arch/blackfin/mach-bf561/atomic.S +++ b/arch/blackfin/mach-bf561/atomic.S @@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm) * r0 = ptr * r1 = value * - * Add a signed value to a 32bit word and return the new value atomically. + * ADD a signed value to a 32bit word and return the new value atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_update_asm) +ENTRY(___raw_atomic_add_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_update_asm) +ENDPROC(___raw_atomic_add_asm) /* * r0 = ptr * r1 = mask * - * Clear the mask bits from a 32bit word and return the old 32bit value + * AND the mask bits from a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_clear_asm) +ENTRY(___raw_atomic_and_asm) p1 = r0; - r3 = ~r1; + r3 = r1; [--sp] = rets; call _get_core_lock; r2 = [p1]; @@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_clear_asm) +ENDPROC(___raw_atomic_and_asm) /* * r0 = ptr * r1 = mask * - * Set the mask bits into a 32bit word and return the old 32bit value + * OR the mask bits into a 32bit word and return the old 32bit value * atomically. * Clobbers: r3:0, p1:0 */ -ENTRY(___raw_atomic_set_asm) +ENTRY(___raw_atomic_or_asm) p1 = r0; r3 = r1; [--sp] = rets; @@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm) r0 = r3; rets = [sp++]; rts; -ENDPROC(___raw_atomic_set_asm) +ENDPROC(___raw_atomic_or_asm) /* * r0 = ptr @@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm) r2 = r1; r1 = 1; r1 <<= r2; - jump ___raw_atomic_set_asm + jump ___raw_atomic_or_asm ENDPROC(___raw_bit_set_asm) /* @@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm) * Clobbers: r3:0, p1:0 */ ENTRY(___raw_bit_clear_asm) - r2 = r1; - r1 = 1; - r1 <<= r2; - jump ___raw_atomic_clear_asm + r2 = 1; + r2 <<= r1; + r1 = ~r2; + jump ___raw_atomic_and_asm ENDPROC(___raw_bit_clear_asm) /* diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1c7259597395..0030e21cfceb 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) local_irq_save(flags); for_each_cpu(cpu, cpumask) { bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - atomic_set_mask((1 << msg), &bfin_ipi_data->bits); + atomic_or((1 << msg), &bfin_ipi_data->bits); atomic_inc(&bfin_ipi_data->count); } local_irq_restore(flags); diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 102190a61d65..0da689def4cc 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -15,7 +15,6 @@ #define _ASM_ATOMIC_H #include <linux/types.h> -#include <asm/spr-regs.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> @@ -23,6 +22,8 @@ #error not SMP safe #endif +#include <asm/atomic_defs.h> + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. @@ -34,56 +35,26 @@ #define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) -#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS -static inline int atomic_add_return(int i, atomic_t *v) +static inline int atomic_inc_return(atomic_t *v) { - unsigned long val; + return __atomic_add_return(1, &v->counter); +} - asm("0: \n" - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ - " ckeq icc3,cc7 \n" - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ - " add%I2 %1,%2,%1 \n" - " cst.p %1,%M0 ,cc3,#1 \n" - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ - " beq icc3,#0,0b \n" - : "+U"(v->counter), "=&r"(val) - : "NPr"(i) - : "memory", "cc7", "cc3", "icc3" - ); +static inline int atomic_dec_return(atomic_t *v) +{ + return __atomic_sub_return(1, &v->counter); +} - return val; +static inline int atomic_add_return(int i, atomic_t *v) +{ + return __atomic_add_return(i, &v->counter); } static inline int atomic_sub_return(int i, atomic_t *v) { - unsigned long val; - - asm("0: \n" - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ - " ckeq icc3,cc7 \n" - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ - " sub%I2 %1,%2,%1 \n" - " cst.p %1,%M0 ,cc3,#1 \n" - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ - " beq icc3,#0,0b \n" - : "+U"(v->counter), "=&r"(val) - : "NPr"(i) - : "memory", "cc7", "cc3", "icc3" - ); - - return val; + return __atomic_sub_return(i, &v->counter); } -#else - -extern int atomic_add_return(int i, atomic_t *v); -extern int atomic_sub_return(int i, atomic_t *v); - -#endif - static inline int atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { - atomic_add_return(1, v); + atomic_inc_return(v); } static inline void atomic_dec(atomic_t *v) { - atomic_sub_return(1, v); + atomic_dec_return(v); } -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) @@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v) * 64-bit atomic ops */ typedef struct { - volatile long long counter; + long long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } -static inline long long atomic64_read(atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v) { long long counter; asm("ldd%I1 %M1,%0" : "=e"(counter) : "m"(v->counter)); + return counter; } @@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i) : "e"(i)); } -extern long long atomic64_inc_return(atomic64_t *v); -extern long long atomic64_dec_return(atomic64_t *v); -extern long long atomic64_add_return(long long i, atomic64_t *v); -extern long long atomic64_sub_return(long long i, atomic64_t *v); +static inline long long atomic64_inc_return(atomic64_t *v) +{ + return __atomic64_add_return(1, &v->counter); +} + +static inline long long atomic64_dec_return(atomic64_t *v) +{ + return __atomic64_sub_return(1, &v->counter); +} + +static inline long long atomic64_add_return(long long i, atomic64_t *v) +{ + return __atomic64_add_return(i, &v->counter); +} + +static inline long long atomic64_sub_return(long long i, atomic64_t *v) +{ + return __atomic64_sub_return(i, &v->counter); +} static inline long long atomic64_add_negative(long long i, atomic64_t *v) { @@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) + #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) @@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + (void)__atomic32_fetch_##op(i, &v->counter); \ +} \ + \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + (void)__atomic64_fetch_##op(i, &v->counter); \ +} + +ATOMIC_OP(or) +ATOMIC_OP(and) +ATOMIC_OP(xor) + +#undef ATOMIC_OP #endif /* _ASM_ATOMIC_H */ diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h new file mode 100644 index 000000000000..36e126d2f801 --- /dev/null +++ b/arch/frv/include/asm/atomic_defs.h @@ -0,0 +1,172 @@ + +#include <asm/spr-regs.h> + +#ifdef __ATOMIC_LIB__ + +#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS + +#define ATOMIC_QUALS +#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x) + +#else /* !OUTOFLINE && LIB */ + +#define ATOMIC_OP_RETURN(op) +#define ATOMIC_FETCH_OP(op) + +#endif /* OUTOFLINE */ + +#else /* !__ATOMIC_LIB__ */ + +#define ATOMIC_EXPORT(x) + +#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS + +#define ATOMIC_OP_RETURN(op) \ +extern int __atomic_##op##_return(int i, int *v); \ +extern long long __atomic64_##op##_return(long long i, long long *v); + +#define ATOMIC_FETCH_OP(op) \ +extern int __atomic32_fetch_##op(int i, int *v); \ +extern long long __atomic64_fetch_##op(long long i, long long *v); + +#else /* !OUTOFLINE && !LIB */ + +#define ATOMIC_QUALS static inline + +#endif /* OUTOFLINE */ +#endif /* __ATOMIC_LIB__ */ + + +/* + * Note on the 64 bit inline asm variants... + * + * CSTD is a conditional instruction and needs a constrained memory reference. + * Normally 'U' provides the correct constraints for conditional instructions + * and this is used for the 32 bit version, however 'U' does not appear to work + * for 64 bit values (gcc-4.9) + * + * The exact constraint is that conditional instructions cannot deal with an + * immediate displacement in the memory reference, so what we do is we read the + * address through a volatile cast into a local variable in order to insure we + * _have_ to compute the correct address without displacement. This allows us + * to use the regular 'm' for the memory address. + * + * Furthermore, the %Ln operand, which prints the low word register (r+1), + * really only works for registers, this means we cannot allow immediate values + * for the 64 bit versions -- like we do for the 32 bit ones. + * + */ + +#ifndef ATOMIC_OP_RETURN +#define ATOMIC_OP_RETURN(op) \ +ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \ +{ \ + int val; \ + \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ld.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " "#op"%I2 %1,%2,%1 \n" \ + " cst.p %1,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + : "+U"(*v), "=&r"(val) \ + : "NPr"(i) \ + : "memory", "cc7", "cc3", "icc3" \ + ); \ + \ + return val; \ +} \ +ATOMIC_EXPORT(__atomic_##op##_return); \ + \ +ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \ +{ \ + long long *__v = READ_ONCE(v); \ + long long val; \ + \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ldd.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " "#op"cc %L1,%L2,%L1,icc0 \n" \ + " "#op"x %1,%2,%1,icc0 \n" \ + " cstd.p %1,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + : "+m"(*__v), "=&e"(val) \ + : "e"(i) \ + : "memory", "cc7", "cc3", "icc0", "icc3" \ + ); \ + \ + return val; \ +} \ +ATOMIC_EXPORT(__atomic64_##op##_return); +#endif + +#ifndef ATOMIC_FETCH_OP +#define ATOMIC_FETCH_OP(op) \ +ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \ +{ \ + int old, tmp; \ + \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ld.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " "#op"%I3 %1,%3,%2 \n" \ + " cst.p %2,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + : "+U"(*v), "=&r"(old), "=r"(tmp) \ + : "NPr"(i) \ + : "memory", "cc7", "cc3", "icc3" \ + ); \ + \ + return old; \ +} \ +ATOMIC_EXPORT(__atomic32_fetch_##op); \ + \ +ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \ +{ \ + long long *__v = READ_ONCE(v); \ + long long old, tmp; \ + \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ldd.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " "#op" %L1,%L3,%L2 \n" \ + " "#op" %1,%3,%2 \n" \ + " cstd.p %2,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + : "+m"(*__v), "=&e"(old), "=e"(tmp) \ + : "e"(i) \ + : "memory", "cc7", "cc3", "icc3" \ + ); \ + \ + return old; \ +} \ +ATOMIC_EXPORT(__atomic64_fetch_##op); +#endif + +ATOMIC_FETCH_OP(or) +ATOMIC_FETCH_OP(and) +ATOMIC_FETCH_OP(xor) + +ATOMIC_OP_RETURN(add) +ATOMIC_OP_RETURN(sub) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_QUALS +#undef ATOMIC_EXPORT diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h index 96de220ef131..0df8e95e3715 100644 --- a/arch/frv/include/asm/bitops.h +++ b/arch/frv/include/asm/bitops.h @@ -25,109 +25,30 @@ #include <asm-generic/bitops/ffz.h> -#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS -static inline -unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) -{ - unsigned long old, tmp; - - asm volatile( - "0: \n" - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ - " ckeq icc3,cc7 \n" - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ - " and%I3 %1,%3,%2 \n" - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ - " beq icc3,#0,0b \n" - : "+U"(*v), "=&r"(old), "=r"(tmp) - : "NPr"(~mask) - : "memory", "cc7", "cc3", "icc3" - ); - - return old; -} - -static inline -unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) -{ - unsigned long old, tmp; - - asm volatile( - "0: \n" - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ - " ckeq icc3,cc7 \n" - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ - " or%I3 %1,%3,%2 \n" - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ - " beq icc3,#0,0b \n" - : "+U"(*v), "=&r"(old), "=r"(tmp) - : "NPr"(mask) - : "memory", "cc7", "cc3", "icc3" - ); - - return old; -} - -static inline -unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) -{ - unsigned long old, tmp; - - asm volatile( - "0: \n" - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ - " ckeq icc3,cc7 \n" - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ - " xor%I3 %1,%3,%2 \n" - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ - " beq icc3,#0,0b \n" - : "+U"(*v), "=&r"(old), "=r"(tmp) - : "NPr"(mask) - : "memory", "cc7", "cc3", "icc3" - ); - - return old; -} - -#else - -extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); -extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); -extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); - -#endif - -#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) -#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) +#include <asm/atomic.h> static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) { - volatile unsigned long *ptr = addr; - unsigned long mask = 1UL << (nr & 31); + unsigned int *ptr = (void *)addr; + unsigned int mask = 1UL << (nr & 31); ptr += nr >> 5; - return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; + return (__atomic32_fetch_and(~mask, ptr) & mask) != 0; } static inline int test_and_set_bit(unsigned long nr, volatile void *addr) { - volatile unsigned long *ptr = addr; - unsigned long mask = 1UL << (nr & 31); + unsigned int *ptr = (void *)addr; + unsigned int mask = 1UL << (nr & 31); ptr += nr >> 5; - return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; + return (__atomic32_fetch_or(mask, ptr) & mask) != 0; } static inline int test_and_change_bit(unsigned long nr, volatile void *addr) { - volatile unsigned long *ptr = addr; - unsigned long mask = 1UL << (nr & 31); + unsigned int *ptr = (void *)addr; + unsigned int mask = 1UL << (nr & 31); ptr += nr >> 5; - return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; + return (__atomic32_fetch_xor(mask, ptr) & mask) != 0; } static inline void clear_bit(unsigned long nr, volatile void *addr) diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c index 156184e17e57..370dc9fa0b11 100644 --- a/arch/frv/kernel/dma.c +++ b/arch/frv/kernel/dma.c @@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = { static DEFINE_RWLOCK(frv_dma_channels_lock); -unsigned long frv_dma_inprogress; +unsigned int frv_dma_inprogress; #define frv_clear_dma_inprogress(channel) \ - atomic_clear_mask(1 << (channel), &frv_dma_inprogress); + (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress); #define frv_set_dma_inprogress(channel) \ - atomic_set_mask(1 << (channel), &frv_dma_inprogress); + (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress); /*****************************************************************************/ /* diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c index 86c516d96dcd..cdb4ce9960eb 100644 --- a/arch/frv/kernel/frv_ksyms.c +++ b/arch/frv/kernel/frv_ksyms.c @@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns); EXPORT_SYMBOL(__insl_ns); #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS -EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask); -EXPORT_SYMBOL(atomic_test_and_OR_mask); -EXPORT_SYMBOL(atomic_test_and_XOR_mask); -EXPORT_SYMBOL(atomic_add_return); -EXPORT_SYMBOL(atomic_sub_return); EXPORT_SYMBOL(__xchg_32); EXPORT_SYMBOL(__cmpxchg_32); #endif diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile index 4ff2fb1e6b16..970e8b4f1a02 100644 --- a/arch/frv/lib/Makefile +++ b/arch/frv/lib/Makefile @@ -5,4 +5,4 @@ lib-y := \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c new file mode 100644 index 000000000000..4d1b887c248b --- /dev/null +++ b/arch/frv/lib/atomic-lib.c @@ -0,0 +1,7 @@ + +#include <linux/export.h> +#include <asm/atomic.h> + +#define __ATOMIC_LIB__ + +#include <asm/atomic_defs.h> diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S index 5e9e6ab5dd0e..b7439a960b5b 100644 --- a/arch/frv/lib/atomic-ops.S +++ b/arch/frv/lib/atomic-ops.S @@ -19,116 +19,6 @@ ############################################################################### # -# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); -# -############################################################################### - .globl atomic_test_and_ANDNOT_mask - .type atomic_test_and_ANDNOT_mask,@function -atomic_test_and_ANDNOT_mask: - not.p gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - and gr8,gr10,gr11 - cst.p gr11,@(gr9,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask - -############################################################################### -# -# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); -# -############################################################################### - .globl atomic_test_and_OR_mask - .type atomic_test_and_OR_mask,@function -atomic_test_and_OR_mask: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - or gr8,gr10,gr11 - cst.p gr11,@(gr9,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask - -############################################################################### -# -# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); -# -############################################################################### - .globl atomic_test_and_XOR_mask - .type atomic_test_and_XOR_mask,@function -atomic_test_and_XOR_mask: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - xor gr8,gr10,gr11 - cst.p gr11,@(gr9,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask - -############################################################################### -# -# int atomic_add_return(int i, atomic_t *v) -# -############################################################################### - .globl atomic_add_return - .type atomic_add_return,@function -atomic_add_return: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - add gr8,gr10,gr8 - cst.p gr8,@(gr9,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic_add_return, .-atomic_add_return - -############################################################################### -# -# int atomic_sub_return(int i, atomic_t *v) -# -############################################################################### - .globl atomic_sub_return - .type atomic_sub_return,@function -atomic_sub_return: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - sub gr8,gr10,gr8 - cst.p gr8,@(gr9,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic_sub_return, .-atomic_sub_return - -############################################################################### -# # uint32_t __xchg_32(uint32_t i, uint32_t *v) # ############################################################################### diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S index b6194eeac127..c4c472308a33 100644 --- a/arch/frv/lib/atomic64-ops.S +++ b/arch/frv/lib/atomic64-ops.S @@ -20,100 +20,6 @@ ############################################################################### # -# long long atomic64_inc_return(atomic64_t *v) -# -############################################################################### - .globl atomic64_inc_return - .type atomic64_inc_return,@function -atomic64_inc_return: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - addicc gr9,#1,gr9,icc0 - addxi gr8,#0,gr8,icc0 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic64_inc_return, .-atomic64_inc_return - -############################################################################### -# -# long long atomic64_dec_return(atomic64_t *v) -# -############################################################################### - .globl atomic64_dec_return - .type atomic64_dec_return,@function -atomic64_dec_return: - or.p gr8,gr8,gr10 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - subicc gr9,#1,gr9,icc0 - subxi gr8,#0,gr8,icc0 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic64_dec_return, .-atomic64_dec_return - -############################################################################### -# -# long long atomic64_add_return(long long i, atomic64_t *v) -# -############################################################################### - .globl atomic64_add_return - .type atomic64_add_return,@function -atomic64_add_return: - or.p gr8,gr8,gr4 - or gr9,gr9,gr5 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - addcc gr9,gr5,gr9,icc0 - addx gr8,gr4,gr8,icc0 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic64_add_return, .-atomic64_add_return - -############################################################################### -# -# long long atomic64_sub_return(long long i, atomic64_t *v) -# -############################################################################### - .globl atomic64_sub_return - .type atomic64_sub_return,@function -atomic64_sub_return: - or.p gr8,gr8,gr4 - or gr9,gr9,gr5 -0: - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ - ckeq icc3,cc7 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ - orcr cc7,cc7,cc3 /* set CC3 to true */ - subcc gr9,gr5,gr9,icc0 - subx gr8,gr4,gr8,icc0 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ - beq icc3,#0,0b - bralr - - .size atomic64_sub_return, .-atomic64_sub_return - -############################################################################### -# # uint64_t __xchg_64(uint64_t i, uint64_t *v) # ############################################################################### diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 7ca73f8546cc..702ee539f87d 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -16,83 +16,52 @@ #include <linux/kernel.h> -static inline int atomic_add_return(int i, atomic_t *v) -{ - h8300flags flags; - int ret; - - flags = arch_local_irq_save(); - ret = v->counter += i; - arch_local_irq_restore(flags); - return ret; +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + h8300flags flags; \ + int ret; \ + \ + flags = arch_local_irq_save(); \ + ret = v->counter c_op i; \ + arch_local_irq_restore(flags); \ + return ret; \ } -#define atomic_add(i, v) atomic_add_return(i, v) -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - h8300flags flags; - int ret; - - flags = arch_local_irq_save(); - ret = v->counter -= i; - arch_local_irq_restore(flags); - return ret; +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + h8300flags flags; \ + \ + flags = arch_local_irq_save(); \ + v->counter c_op i; \ + arch_local_irq_restore(flags); \ } -#define atomic_sub(i, v) atomic_sub_return(i, v) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +ATOMIC_OP_RETURN(add, +=) +ATOMIC_OP_RETURN(sub, -=) -static inline int atomic_inc_return(atomic_t *v) -{ - h8300flags flags; - int ret; +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) - flags = arch_local_irq_save(); - v->counter++; - ret = v->counter; - arch_local_irq_restore(flags); - return ret; -} - -#define atomic_inc(v) atomic_inc_return(v) +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_add(i, v) (void)atomic_add_return(i, v) +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static inline int atomic_dec_return(atomic_t *v) -{ - h8300flags flags; - int ret; +#define atomic_sub(i, v) (void)atomic_sub_return(i, v) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - flags = arch_local_irq_save(); - --v->counter; - ret = v->counter; - arch_local_irq_restore(flags); - return ret; -} +#define atomic_inc_return(v) atomic_add_return(1, v) +#define atomic_dec_return(v) atomic_sub_return(1, v) -#define atomic_dec(v) atomic_dec_return(v) +#define atomic_inc(v) (void)atomic_inc_return(v) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -static inline int atomic_dec_and_test(atomic_t *v) -{ - h8300flags flags; - int ret; - - flags = arch_local_irq_save(); - --v->counter; - ret = v->counter; - arch_local_irq_restore(flags); - return ret == 0; -} +#define atomic_dec(v) (void)atomic_dec_return(v) +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { @@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return ret; } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - unsigned char ccr; - unsigned long tmp; - - __asm__ __volatile__("stc ccr,%w3\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,%1\n\t" - "and.l %2,%1\n\t" - "mov.l %1,%0\n\t" - "ldc %w3,ccr" - : "=m"(*v), "=r"(tmp) - : "g"(~(mask)), "r"(ccr)); -} - -static inline void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - unsigned char ccr; - unsigned long tmp; - - __asm__ __volatile__("stc ccr,%w3\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,%1\n\t" - "or.l %2,%1\n\t" - "mov.l %1,%0\n\t" - "ldc %w3,ccr" - : "=m"(*v), "=r"(tmp) - : "g"(~(mask)), "r"(ccr)); -} - -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 93d07025f183..811d61f6422d 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0bf03501fe5c..be4beeb77d57 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \ ATOMIC_OP(add, +) ATOMIC_OP(sub, -) -#undef ATOMIC_OP - #define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ @@ -71,6 +69,16 @@ ATOMIC_OP(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) +ATOMIC_OP(and, &) +ATOMIC_OP(or, |) +ATOMIC_OP(xor, ^) + +#define atomic_and(i,v) (void)ia64_atomic_and(i,v) +#define atomic_or(i,v) (void)ia64_atomic_or(i,v) +#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) + +#undef ATOMIC_OP + #define ATOMIC64_OP(op, c_op) \ static __inline__ long \ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ @@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ ATOMIC64_OP(add, +) ATOMIC64_OP(sub, -) -#undef ATOMIC64_OP - #define atomic64_add_return(i,v) \ ({ \ long __ia64_aar_i = (i); \ @@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -) : ia64_atomic64_sub(__ia64_asr_i, v); \ }) +ATOMIC64_OP(and, &) +ATOMIC64_OP(or, |) +ATOMIC64_OP(xor, ^) + +#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) +#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) +#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) + +#undef ATOMIC64_OP + #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 31bb74adba08..025e2a170493 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) -{ - unsigned long flags; - unsigned long tmp; - - local_irq_save(flags); - __asm__ __volatile__ ( - "# atomic_clear_mask \n\t" - DCACHE_CLEAR("%0", "r5", "%1") - M32R_LOCK" %0, @%1; \n\t" - "and %0, %2; \n\t" - M32R_UNLOCK" %0, @%1; \n\t" - : "=&r" (tmp) - : "r" (addr), "r" (~mask) - : "memory" - __ATOMIC_CLOBBER - ); - local_irq_restore(flags); -} - -static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) -{ - unsigned long flags; - unsigned long tmp; - - local_irq_save(flags); - __asm__ __volatile__ ( - "# atomic_set_mask \n\t" - DCACHE_CLEAR("%0", "r5", "%1") - M32R_LOCK" %0, @%1; \n\t" - "or %0, %2; \n\t" - M32R_UNLOCK" %0, @%1; \n\t" - : "=&r" (tmp) - : "r" (addr), "r" (mask) - : "memory" - __ATOMIC_CLOBBER - ); - local_irq_restore(flags); -} - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c18ddc74ef9a..62d6961e7f2b 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -156,7 +156,7 @@ void smp_flush_cache_all(void) cpumask_clear_cpu(smp_processor_id(), &cpumask); spin_lock(&flushcache_lock); mask=cpumask_bits(&cpumask); - atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); + atomic_or(*mask, (atomic_t *)&flushcache_cpumask); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) @@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_vma = vma; flush_va = va; mask=cpumask_bits(&cpumask); - atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); + atomic_or(*mask, (atomic_t *)&flush_cpumask); /* * We have to send the IPI only to diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index e85f047fb072..039fac120cc0 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +ATOMIC_OP(and, &=, and) +ATOMIC_OP(or, |=, or) +ATOMIC_OP(xor, ^=, eor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v) return c != 0; } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask))); -} - -static inline void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); -} - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 948d8688643c..21c4c268b86c 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h @@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - int temp; - - asm volatile ( - "1: LNKGETD %0, [%1]\n" - " AND %0, %0, %2\n" - " LNKSETD [%1] %0\n" - " DEFR %0, TXSTAT\n" - " ANDT %0, %0, #HI(0x3f000000)\n" - " CMPT %0, #HI(0x02000000)\n" - " BNZ 1b\n" - : "=&d" (temp) - : "da" (&v->counter), "bd" (~mask) - : "cc"); -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - int temp; - - asm volatile ( - "1: LNKGETD %0, [%1]\n" - " OR %0, %0, %2\n" - " LNKSETD [%1], %0\n" - " DEFR %0, TXSTAT\n" - " ANDT %0, %0, #HI(0x3f000000)\n" - " CMPT %0, #HI(0x02000000)\n" - " BNZ 1b\n" - : "=&d" (temp) - : "da" (&v->counter), "bd" (mask) - : "cc"); -} - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int result, temp; diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h index f5d5898c1020..f8efe380fe8b 100644 --- a/arch/metag/include/asm/atomic_lock1.h +++ b/arch/metag/include/asm/atomic_lock1.h @@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - __global_lock1(flags); - fence(); - v->counter &= ~mask; - __global_unlock1(flags); -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - __global_lock1(flags); - fence(); - v->counter |= mask; - __global_unlock1(flags); -} - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 26d436336f2e..4c42fd9af777 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(sub, -=, subu) +ATOMIC_OP(and, &=, and) +ATOMIC_OP(or, |=, or) +ATOMIC_OP(xor, ^=, xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ATOMIC64_OPS(add, +=, daddu) ATOMIC64_OPS(sub, -=, dsubu) +ATOMIC64_OP(and, &=, and) +ATOMIC64_OP(or, |=, or) +ATOMIC64_OP(xor, ^=, xor) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 5be655e83e70..375e59140c9c 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -/** - * atomic_clear_mask - Atomically clear bits in memory - * @mask: Mask of the bits to be cleared - * @v: pointer to word in memory - * - * Atomically clears the bits set in mask from the memory word specified. - */ -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ -#ifdef CONFIG_SMP - int status; - - asm volatile( - "1: mov %3,(_AAR,%2) \n" - " mov (_ADR,%2),%0 \n" - " and %4,%0 \n" - " mov %0,(_ADR,%2) \n" - " mov (_ADR,%2),%0 \n" /* flush */ - " mov (_ASR,%2),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=m"(*addr) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask) - : "memory", "cc"); -#else - unsigned long flags; - - mask = ~mask; - flags = arch_local_cli_save(); - *addr &= mask; - arch_local_irq_restore(flags); -#endif -} - -/** - * atomic_set_mask - Atomically set bits in memory - * @mask: Mask of the bits to be set - * @v: pointer to word in memory - * - * Atomically sets the bits set in mask from the memory word specified. - */ -static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) -{ -#ifdef CONFIG_SMP - int status; - - asm volatile( - "1: mov %3,(_AAR,%2) \n" - " mov (_ADR,%2),%0 \n" - " or %4,%0 \n" - " mov %0,(_ADR,%2) \n" - " mov (_ADR,%2),%0 \n" /* flush */ - " mov (_ASR,%2),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=m"(*addr) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask) - : "memory", "cc"); -#else - unsigned long flags; - - flags = arch_local_cli_save(); - *addr |= mask; - arch_local_irq_restore(flags); -#endif -} - #endif /* __KERNEL__ */ #endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index e5d0ef722bfa..9a39ea9031d4 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, flush_mm = mm; flush_va = va; #if NR_CPUS <= BITS_PER_LONG - atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); + atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]); #else #error Not supported. #endif diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 226f8ca993f6..2536965d00ea 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +ATOMIC64_OP(and, &=) +ATOMIC64_OP(or, |=) +ATOMIC64_OP(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 512d2782b043..55f106ed12bf 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ ATOMIC_OPS(add, add) ATOMIC_OPS(sub, subf) +ATOMIC_OP(and, and) +ATOMIC_OP(or, or) +ATOMIC_OP(xor, xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \ ATOMIC64_OPS(add, add) ATOMIC64_OPS(sub, subf) +ATOMIC64_OP(and, and) +ATOMIC64_OP(or, or) +ATOMIC64_OP(xor, xor) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 7c6bb4b17b49..ed3ab509faca 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -596,25 +596,6 @@ _GLOBAL(copy_page) b 2b /* - * void atomic_clear_mask(atomic_t mask, atomic_t *addr) - * void atomic_set_mask(atomic_t mask, atomic_t *addr); - */ -_GLOBAL(atomic_clear_mask) -10: lwarx r5,0,r4 - andc r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr -_GLOBAL(atomic_set_mask) -10: lwarx r5,0,r4 - or r5,r5,r3 - PPC405_ERR77(0,r4) - stwcx. r5,0,r4 - bne- 10b - blr - -/* * Extended precision shifts. * * Updated to be valid for shift counts from 0 to 63 inclusive. diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index adbe3802e377..117fa5c921c1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -27,6 +27,7 @@ #define __ATOMIC_OR "lao" #define __ATOMIC_AND "lan" #define __ATOMIC_ADD "laa" +#define __ATOMIC_XOR "lax" #define __ATOMIC_BARRIER "bcr 14,0\n" #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ @@ -49,6 +50,7 @@ #define __ATOMIC_OR "or" #define __ATOMIC_AND "nr" #define __ATOMIC_ADD "ar" +#define __ATOMIC_XOR "xr" #define __ATOMIC_BARRIER "\n" #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ @@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v) #define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); +#define ATOMIC_OP(op, OP) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ } -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); -} +ATOMIC_OP(and, AND) +ATOMIC_OP(or, OR) +ATOMIC_OP(xor, XOR) + +#undef ATOMIC_OP #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define __ATOMIC64_OR "laog" #define __ATOMIC64_AND "lang" #define __ATOMIC64_ADD "laag" +#define __ATOMIC64_XOR "laxg" #define __ATOMIC64_BARRIER "bcr 14,0\n" #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ @@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define __ATOMIC64_OR "ogr" #define __ATOMIC64_AND "ngr" #define __ATOMIC64_ADD "agr" +#define __ATOMIC64_XOR "xgr" #define __ATOMIC64_BARRIER "\n" #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ @@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v) __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); } -static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) -{ - __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); -} - -static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) -{ - __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); -} - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) static inline long long atomic64_cmpxchg(atomic64_t *v, @@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, return old; } +#define ATOMIC64_OP(op, OP) \ +static inline void atomic64_##op(long i, atomic64_t *v) \ +{ \ + __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ +} + +ATOMIC64_OP(and, AND) +ATOMIC64_OP(or, OR) +ATOMIC64_OP(xor, XOR) + +#undef ATOMIC64_OP #undef __ATOMIC64_LOOP static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..f5a0bd778ace 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) * increase the "sequence" counter to avoid the race of an * etr event and the complete recovery against get_sync_clock. */ - atomic_clear_mask(0x80000000, sw_ptr); + atomic_andnot(0x80000000, sw_ptr); atomic_inc(sw_ptr); } @@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) static void enable_sync_clock(void) { atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); - atomic_set_mask(0x80000000, sw_ptr); + atomic_or(0x80000000, sw_ptr); } /* diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..57309e9cdd80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, - &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, + &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); @@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) { - atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); + atomic_or(flag, &vcpu->arch.sie_block->cpuflags); } static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) @@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); + atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; } @@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) /* another external call is pending */ return -EBUSY; } - atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); return 0; } @@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) spin_lock(&li->lock); switch (type) { case KVM_S390_MCHK: - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + atomic_or(CPUSTAT_STOP_INT, li->cpuflags); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + atomic_or(CPUSTAT_IO_INT, li->cpuflags); break; default: - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); break; } spin_unlock(&li->lock); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..b73302fb0507 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); - atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); if (test_kvm_facility(vcpu->kvm, 129)) { save_fp_ctl(&vcpu->run->s.regs.fpc); @@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED); if (test_kvm_facility(vcpu->kvm, 78)) - atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); else if (test_kvm_facility(vcpu->kvm, 8)) - atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_setup_model(vcpu); @@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* @@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } @@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, if (dbg->control & KVM_GUESTDBG_ENABLE) { vcpu->guest_debug = dbg->control; /* enforce guest PER */ - atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); if (dbg->control & KVM_GUESTDBG_USE_HW_BP) rc = kvm_s390_import_bp_data(vcpu, dbg); } else { - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); vcpu->arch.guestdbg.last_bp = 0; } if (rc) { vcpu->guest_debug = 0; kvm_s390_clear_bp_data(vcpu); - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); } return rc; @@ -1771,7 +1771,7 @@ retry: if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (!ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); - atomic_set_mask(CPUSTAT_IBS, + atomic_or(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -1780,7 +1780,7 @@ retry: if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); - atomic_clear_mask(CPUSTAT_IBS, + atomic_andnot(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) __disable_ibs_on_all_vcpus(vcpu->kvm); } - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); /* * Another VCPU might have used IBS while we were offline. * Let's play safe and flush the VCPU at startup. @@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ kvm_s390_clear_stop_irq(vcpu); - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 97a5fda83450..b94df40e5f2d 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - int tmp; - unsigned int _mask = ~mask; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " and %2, %0 \n\t" /* add */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (_mask) - : "memory" , "r0", "r1"); -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - int tmp; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " or %2, %0 \n\t" /* or */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (mask) - : "memory" , "r0", "r1"); -} - #endif /* __ASM_SH_ATOMIC_GRB_H */ diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 61d107523f06..23fcdad5773e 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h @@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); - v->counter &= ~mask; - raw_local_irq_restore(flags); -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); - v->counter |= mask; - raw_local_irq_restore(flags); -} - #endif /* __ASM_SH_ATOMIC_IRQ_H */ diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index 8575dccb9ef7..33d34b16d4d6 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h @@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_clear_mask \n" -" and %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (~mask), "r" (&v->counter) - : "t"); -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_set_mask \n" -" or %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (mask), "r" (&v->counter) - : "t"); -} - #endif /* __ASM_SH_ATOMIC_LLSC_H */ diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 0e69b7e7a439..7dcbebbcaec6 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -17,10 +17,12 @@ #include <asm/barrier.h> #include <asm-generic/atomic64.h> - #define ATOMIC_INIT(i) { (i) } int atomic_add_return(int, atomic_t *); +void atomic_and(int, atomic_t *); +void atomic_or(int, atomic_t *); +void atomic_xor(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); int atomic_xchg(atomic_t *, int); int __atomic_add_unless(atomic_t *, int, int); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 4082749913ce..917084ace49d 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 71cd65ab200c..b9d63c0a7aab 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy); #endif /* SMP */ -#define ATOMIC_OP(op, cop) \ +#define ATOMIC_OP_RETURN(op, c_op) \ int atomic_##op##_return(int i, atomic_t *v) \ { \ int ret; \ unsigned long flags; \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \ \ - ret = (v->counter cop i); \ + ret = (v->counter c_op i); \ \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ return ret; \ } \ EXPORT_SYMBOL(atomic_##op##_return); -ATOMIC_OP(add, +=) +#define ATOMIC_OP(op, c_op) \ +void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + v->counter c_op i; \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ +} \ +EXPORT_SYMBOL(atomic_##op); + +ATOMIC_OP_RETURN(add, +=) +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) +#undef ATOMIC_OP_RETURN #undef ATOMIC_OP int atomic_xchg(atomic_t *v, int new) diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 05dac43907d1..d6b0363f345b 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN @@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return); ATOMIC64_OPS(add) ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1d649a95660c..bb6005997268 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1b109fad9fff..d320ce253d86 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v) _atomic_xchg_add(&v->counter, i); } +#define ATOMIC_OP(op) \ +unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + _atomic_##op((unsigned long *)&v->counter, i); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t @@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v) _atomic64_xchg_add(&v->counter, i); } +#define ATOMIC64_OP(op) \ +long long _atomic64_##op(long long *v, long long n); \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + _atomic64_##op(&v->counter, i); \ +} + +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + /** * atomic64_add_return - add integer and return * @v: pointer of type atomic64_t @@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xchg_add_unless(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); +extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, @@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add_unless(volatile long long *p, int *lock, long long o, long long n); +extern long long __atomic64_and(volatile long long *p, int *lock, long long n); +extern long long __atomic64_or(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 0496970cef82..096a56d6ead4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +static inline void atomic_and(int i, atomic_t *v) +{ + __insn_fetchand4((void *)&v->counter, i); +} + +static inline void atomic_or(int i, atomic_t *v) +{ + __insn_fetchor4((void *)&v->counter, i); +} + +static inline void atomic_xor(int i, atomic_t *v) +{ + int guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch4(&v->counter, guess ^ i); + } while (guess != oldval); +} + /* Now the true 64-bit operations. */ #define ATOMIC64_INIT(i) { (i) } @@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) return oldval != u; } +static inline void atomic64_and(long i, atomic64_t *v) +{ + __insn_fetchand((void *)&v->counter, i); +} + +static inline void atomic64_or(long i, atomic64_t *v) +{ + __insn_fetchor((void *)&v->counter, i); +} + +static inline void atomic64_xor(long i, atomic64_t *v) +{ + long guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch(&v->counter, guess ^ i); + } while (guess != oldval); +} + #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index c89b211fd9e7..298df1e9912a 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c @@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) } EXPORT_SYMBOL(_atomic_or); +unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) +{ + return __atomic_and((int *)p, __atomic_setup(p), mask).val; +} +EXPORT_SYMBOL(_atomic_and); + unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) { return __atomic_andn((int *)p, __atomic_setup(p), mask).val; @@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) } EXPORT_SYMBOL(_atomic64_cmpxchg); +long long _atomic64_and(long long *v, long long n) +{ + return __atomic64_and(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_and); + +long long _atomic64_or(long long *v, long long n) +{ + return __atomic64_or(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_or); + +long long _atomic64_xor(long long *v, long long n) +{ + return __atomic64_xor(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_xor); /* * If any of the atomic or futex routines hit a bad address (not in diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 6bda3132cd61..f611265633d6 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2" atomic_op _xchg_add_unless, 32, \ "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" atomic_op _or, 32, "or r24, r22, r2" +atomic_op _and, 32, "and r24, r22, r2" atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" atomic_op _xor, 32, "xor r24, r22, r2" @@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \ { bbns r26, 3f; add r24, r22, r4 }; \ { bbns r27, 3f; add r25, r23, r5 }; \ slt_u r26, r24, r22; add r25, r25, r26" +atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" +atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" +atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" jrp lr /* happy backtracer */ diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index e9168955c42f..fb52aa644aab 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + asm volatile(LOCK_PREFIX #op"l %1,%0" \ + : "+m" (v->counter) \ + : "ir" (i) \ + : "memory"); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * __atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t @@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v) return *v; } -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "andl %0,%1" \ - : : "r" (~(mask)), "m" (*(addr)) : "memory") - -#define atomic_set_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "orl %0,%1" \ - : : "r" ((unsigned)(mask)), "m" (*(addr)) \ - : "memory") - #ifdef CONFIG_X86_32 # include <asm/atomic64_32.h> #else diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index b154de75c90c..a11c30b77fb5 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #undef alternative_atomic64 #undef __alternative_atomic64 +#define ATOMIC64_OP(op, c_op) \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + long long old, c = 0; \ + while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ + c = old; \ +} + +ATOMIC64_OP(and, &) +ATOMIC64_OP(or, |) +ATOMIC64_OP(xor, ^) + +#undef ATOMIC64_OP + #endif /* _ASM_X86_ATOMIC64_32_H */ diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index b965f9e03f2a..50e33eff58de 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) return dec; } +#define ATOMIC64_OP(op) \ +static inline void atomic64_##op(long i, atomic64_t *v) \ +{ \ + asm volatile(LOCK_PREFIX #op"q %1,%0" \ + : "+m" (v->counter) \ + : "er" (i) \ + : "memory"); \ +} + +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + +#undef ATOMIC64_OP + #endif /* _ASM_X86_ATOMIC64_64_H */ diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 00b7d46b35b8..e0be67936990 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (~mask), "a" (v) - : "memory" - ); -#else - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " xor %1, %4, %3\n" - " and %0, %0, %4\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); -#endif -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (mask), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " or %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); -#endif -} - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ |