diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-07 17:52:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-07 17:52:35 -0700 |
commit | 4e23eeebb2e57f5a28b36221aa776b5a1122dde5 (patch) | |
tree | f8376df9b40f35576533e830f8c2a1b7e981c160 /arch | |
parent | 3bc1bc0b59d04e997db25b84babf459ca1cd80b7 (diff) | |
parent | 36d4b36b69590fed99356a4426c940a253a93800 (diff) |
Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov:
- fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo)
- optimize out non-atomic bitops on compile-time constants (Alexander
Lobakin)
- cleanup bitmap-related headers (Yury Norov)
- x86/olpc: fix 'logical not is only applied to the left hand side'
(Alexander Lobakin)
- lib/nodemask: inline wrappers around bitmap (Yury Norov)
* tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits)
lib/nodemask: inline next_node_in() and node_random()
powerpc: drop dependency on <asm/machdep.h> in archrandom.h
x86/olpc: fix 'logical not is only applied to the left hand side'
lib/cpumask: move some one-line wrappers to header file
headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure
headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h>
headers/deps: mm: Optimize <linux/gfp.h> header dependencies
lib/cpumask: move trivial wrappers around find_bit to the header
lib/cpumask: change return types to unsigned where appropriate
cpumask: change return types to bool where appropriate
lib/bitmap: change type of bitmap_weight to unsigned long
lib/bitmap: change return types to bool where appropriate
arm: align find_bit declarations with generic kernel
iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
lib/test_bitmap: test the tail after bitmap_to_arr64()
lib/bitmap: fix off-by-one in bitmap_to_arr64()
lib: test_bitmap: add compile-time optimization/evaluations assertions
bitmap: don't assume compiler evaluates small mem*() builtins calls
net/ice: fix initializing the bitmap in the switch code
bitops: let optimize out non-atomic bitops on compile-time constants
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/include/asm/bitops.h | 32 | ||||
-rw-r--r-- | arch/arm/include/asm/bitops.h | 18 | ||||
-rw-r--r-- | arch/hexagon/include/asm/bitops.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/bitops.h | 42 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/m68k/include/asm/bitops.h | 49 | ||||
-rw-r--r-- | arch/powerpc/include/asm/archrandom.h | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 61 | ||||
-rw-r--r-- | arch/sh/include/asm/bitops-op32.h | 34 | ||||
-rw-r--r-- | arch/sparc/include/asm/bitops_32.h | 18 | ||||
-rw-r--r-- | arch/sparc/lib/atomic32.c | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/bitops.h | 22 | ||||
-rw-r--r-- | arch/x86/platform/olpc/olpc-xo1-sci.c | 2 |
14 files changed, 189 insertions, 144 deletions
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index e1d8483a45f2..492c7713ddae 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline void -__set_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__clear_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -94,7 +94,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile void * addr) { smp_mb(); - __clear_bit(nr, addr); + arch___clear_bit(nr, addr); } static inline void @@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__change_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_set_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_clear_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ int -__test_and_change_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr) return (old & mask) != 0; } -static inline int -test_bit(int nr, const volatile void * addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } @@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2]) return __ffs(tmp) + ofs; } +#include <asm-generic/bitops/non-instrumented-non-atomic.h> + #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 8e94fe7ab5eb..714440fa2fc6 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p); /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); -extern int _find_first_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_bit_le(const unsigned long *p, int size, int offset); +unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size); +unsigned long _find_next_zero_bit_le(const unsigned long *p, + unsigned long size, unsigned long offset); +unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size); +unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); -extern int _find_first_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_bit_be(const unsigned long *p, int size, int offset); +unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size); +unsigned long _find_next_zero_bit_be(const unsigned long *p, + unsigned long size, unsigned long offset); +unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size); +unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset); #ifndef CONFIG_SMP /* diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 75d6ba3643b8..da500471ac73 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) * be atomic, particularly for things like slab_lock and slab_unlock. * */ -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { test_and_clear_bit(nr, addr); } -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { test_and_set_bit(nr, addr); } -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { test_and_change_bit(nr, addr); } /* Apparently, at least some of these are allowed to be non-atomic */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_clear_bit(nr, addr); } -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_change_bit(nr, addr); } -static inline int __test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { int retval; @@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) return retval; } -#define test_bit(nr, addr) __test_bit(nr, addr) - /* * ffz - find first zero in word. * @word: The word to search @@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word) } #include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 577be93c0818..9f62af7fd7c4 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr) } /** - * __set_bit - Set a bit in memory + * arch___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__set_bit (int nr, volatile void *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } @@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr) } /** - * __clear_bit - Clears a bit in memory (non-atomic version) + * arch___clear_bit - Clears a bit in memory (non-atomic version) * @nr: the bit to clear * @addr: the address to start counting from * @@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__clear_bit (int nr, volatile void *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); } @@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr) } /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to toggle * @addr: the address to start counting from * @@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__change_bit (int nr, volatile void *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); } @@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr) #define test_and_set_bit_lock test_and_set_bit /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int -__test_and_set_bit (int nr, volatile void *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { __u32 *p = (__u32 *) addr + (nr >> 5); __u32 m = 1 << (nr & 31); @@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int -__test_and_clear_bit(int nr, volatile void * addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { __u32 *p = (__u32 *) addr + (nr >> 5); __u32 m = 1 << (nr & 31); @@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr) } /** - * __test_and_change_bit - Change a bit and return its old value + * arch___test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic and can be reordered. */ -static __inline__ int -__test_and_change_bit (int nr, void *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { __u32 old, bit = (1 << (nr & 31)); __u32 *m = (__u32 *) addr + (nr >> 5); @@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr) return (old & bit) != 0; } -static __inline__ int -test_bit (int nr, const volatile void *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } @@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x) #ifdef __KERNEL__ +#include <asm-generic/bitops/non-instrumented-non-atomic.h> + #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 7cbce290f4e5..757c2f6d8d4b 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector) { unsigned int reg = vector / 64; unsigned int bit = vector % 64; - u64 irr; + unsigned long irr; switch (reg) { case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 87c2cd66a9ce..470aed978590 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) bfset_mem_set_bit(nr, vaddr)) #endif -#define __set_bit(nr, vaddr) set_bit(nr, vaddr) - +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + set_bit(nr, addr); +} static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) { @@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) bfclr_mem_clear_bit(nr, vaddr)) #endif -#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) - +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + clear_bit(nr, addr); +} static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) { @@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) bfchg_mem_change_bit(nr, vaddr)) #endif -#define __change_bit(nr, vaddr) change_bit(nr, vaddr) - - -static inline int test_bit(int nr, const volatile unsigned long *vaddr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { - return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; + change_bit(nr, addr); } +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + return (addr[nr >> 5] & (1UL << (nr & 31))) != 0; +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) @@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr, bfset_mem_test_and_set_bit(nr, vaddr)) #endif -#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} static inline int bclr_reg_test_and_clear_bit(int nr, volatile unsigned long *vaddr) @@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr, bfclr_mem_test_and_clear_bit(nr, vaddr)) #endif -#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_clear_bit(nr, addr); +} static inline int bchg_reg_test_and_change_bit(int nr, volatile unsigned long *vaddr) @@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr, bfchg_mem_test_and_change_bit(nr, vaddr)) #endif -#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_change_bit(nr, addr); +} /* * The true 68020 and more advanced processors support the "bfffo" @@ -522,6 +540,7 @@ static inline unsigned long __fls(unsigned long x) #define clear_bit_unlock clear_bit #define __clear_bit_unlock clear_bit_unlock +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 564859e6a807..51b093f67528 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -2,19 +2,12 @@ #ifndef _ASM_POWERPC_ARCHRANDOM_H #define _ASM_POWERPC_ARCHRANDOM_H -#include <asm/machdep.h> - static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { return 0; } -static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) -{ - if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v)) - return 1; - return 0; -} +size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs); #ifdef CONFIG_PPC_POWERNV int pnv_get_random_long(unsigned long *v); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 1a02629ec70b..dd98f43bd685 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off); void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); +size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v)) + return 1; + return 0; +} +EXPORT_SYMBOL(arch_get_random_seed_longs); + void machine_halt(void) { machine_shutdown(); diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 191dc7898b0f..9a7d15da966e 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr, return old & mask; } -static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr |= mask; + *p |= mask; } -static inline void arch___clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr &= ~mask; + *p &= ~mask; } -static inline void arch___change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr ^= mask; + *p ^= mask; } -static inline bool arch___test_and_set_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr |= mask; + old = *p; + *p |= mask; return old & mask; } -static inline bool arch___test_and_clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr &= ~mask; + old = *p; + *p &= ~mask; return old & mask; } -static inline bool arch___test_and_change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr ^= mask; + old = *p; + *p ^= mask; return old & mask; } -static inline bool arch_test_bit(unsigned long nr, - const volatile unsigned long *ptr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { - const volatile unsigned long *addr = __bitops_word(nr, ptr); + const volatile unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - return *addr & mask; + return *p & mask; } static inline bool arch_test_and_set_bit_lock(unsigned long nr, diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h index cfe5465acce7..565a85d8b7fb 100644 --- a/arch/sh/include/asm/bitops-op32.h +++ b/arch/sh/include/asm/bitops-op32.h @@ -2,6 +2,8 @@ #ifndef __ASM_SH_BITOPS_OP32_H #define __ASM_SH_BITOPS_OP32_H +#include <linux/bits.h> + /* * The bit modifying instructions on SH-2A are only capable of working * with a 3-bit immediate, which signifies the shift position for the bit @@ -16,7 +18,8 @@ #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #endif -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) } } -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) } /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr, } /** - * test_bit - Determine whether a bit is set + * arch_test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +#include <asm-generic/bitops/non-instrumented-non-atomic.h> + #endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h index 889afa9f990f..3448c191b484 100644 --- a/arch/sparc/include/asm/bitops_32.h +++ b/arch/sparc/include/asm/bitops_32.h @@ -19,9 +19,9 @@ #error only <linux/bitops.h> can be included directly #endif -unsigned long ___set_bit(unsigned long *addr, unsigned long mask); -unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); -unsigned long ___change_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask); /* * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' @@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___set_bit(ADDR, mask) != 0; + return sp32___set_bit(ADDR, mask) != 0; } static inline void set_bit(unsigned long nr, volatile unsigned long *addr) @@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___set_bit(ADDR, mask); + (void) sp32___set_bit(ADDR, mask); } static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___clear_bit(ADDR, mask) != 0; + return sp32___clear_bit(ADDR, mask) != 0; } static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___clear_bit(ADDR, mask); + (void) sp32___clear_bit(ADDR, mask); } static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) @@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long * ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___change_bit(ADDR, mask) != 0; + return sp32___change_bit(ADDR, mask) != 0; } static inline void change_bit(unsigned long nr, volatile unsigned long *addr) @@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___change_bit(ADDR, mask); + (void) sp32___change_bit(ADDR, mask); } #include <asm-generic/bitops/non-atomic.h> diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 8b81d0f00c97..cf80d1ae352b 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i) } EXPORT_SYMBOL(arch_atomic_set); -unsigned long ___set_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___set_bit); +EXPORT_SYMBOL(sp32___set_bit); -unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___clear_bit); +EXPORT_SYMBOL(sp32___clear_bit); -unsigned long ___change_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___change_bit); +EXPORT_SYMBOL(sp32___change_bit); unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) { diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index a288ecd230ab..973c6bd17f98 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr) } static __always_inline void -arch___set_bit(long nr, volatile unsigned long *addr) +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr) } static __always_inline void -arch___clear_bit(long nr, volatile unsigned long *addr) +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr) } static __always_inline void -arch___change_bit(long nr, volatile unsigned long *addr) +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) } static __always_inline bool -arch___test_and_set_bit(long nr, volatile unsigned long *addr) +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr) * this without also updating arch/x86/kernel/kvm.c */ static __always_inline bool -arch___test_and_clear_bit(long nr, volatile unsigned long *addr) +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr) } static __always_inline bool -arch___test_and_change_bit(long nr, volatile unsigned long *addr) +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l return oldbit; } -#define arch_test_bit(nr, addr) \ - (__builtin_constant_p((nr)) \ - ? constant_test_bit((nr), (addr)) \ - : variable_test_bit((nr), (addr))) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) : + variable_test_bit(nr, addr); +} /** * __ffs - find first set bit in word diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index f03a6883dcc6..89f25af4b3c3 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -80,7 +80,7 @@ static void send_ebook_state(void) return; } - if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) + if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state) return; /* Nothing new to report. */ input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); |