From dfd402a4c4baae42398ce9180ff424d589b8bffc Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:54 +0100 Subject: kcsan: Add Kernel Concurrency Sanitizer infrastructure Kernel Concurrency Sanitizer (KCSAN) is a dynamic data-race detector for kernel space. KCSAN is a sampling watchpoint-based data-race detector. See the included Documentation/dev-tools/kcsan.rst for more details. This patch adds basic infrastructure, but does not yet enable KCSAN for any architecture. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- scripts/Makefile.kcsan | 6 ++++++ scripts/Makefile.lib | 10 ++++++++++ 2 files changed, 16 insertions(+) create mode 100644 scripts/Makefile.kcsan (limited to 'scripts') diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan new file mode 100644 index 000000000000..caf1111a28ae --- /dev/null +++ b/scripts/Makefile.kcsan @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +ifdef CONFIG_KCSAN + +CFLAGS_KCSAN := -fsanitize=thread + +endif # CONFIG_KCSAN diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 179d55af5852..8952f909f7c9 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -152,6 +152,16 @@ _c_flags += $(if $(patsubst n%,, \ $(CFLAGS_KCOV)) endif +# +# Enable KCSAN flags except some files or directories we don't want to check +# (depends on variables KCSAN_SANITIZE_obj.o, KCSAN_SANITIZE) +# +ifeq ($(CONFIG_KCSAN),y) +_c_flags += $(if $(patsubst n%,, \ + $(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \ + $(CFLAGS_KCSAN)) +endif + # $(srctree)/$(src) for including checkin headers from generated source files # $(objtree)/$(obj) for including generated headers from checkin source files ifeq ($(KBUILD_EXTMOD),) -- cgit v1.2.3-58-ga151 From e75a6795ed132f26f69d08dea958630d5993056d Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:03:02 +0100 Subject: locking/atomics, kcsan: Add KCSAN instrumentation This adds KCSAN instrumentation to atomic-instrumented.h. Signed-off-by: Marco Elver Reviewed-by: Mark Rutland Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- include/asm-generic/atomic-instrumented.h | 393 +++++++++++++++--------------- scripts/atomic/gen-atomic-instrumented.sh | 17 +- 2 files changed, 218 insertions(+), 192 deletions(-) (limited to 'scripts') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index e8730c6b9fe2..3dc0f38544f6 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -19,11 +19,24 @@ #include #include +#include + +static inline void __atomic_check_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +static inline void __atomic_check_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} static inline int atomic_read(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read @@ -32,7 +45,7 @@ atomic_read(const atomic_t *v) static inline int atomic_read_acquire(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire @@ -41,7 +54,7 @@ atomic_read_acquire(const atomic_t *v) static inline void atomic_set(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set @@ -50,7 +63,7 @@ atomic_set(atomic_t *v, int i) static inline void atomic_set_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release @@ -59,7 +72,7 @@ atomic_set_release(atomic_t *v, int i) static inline void atomic_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add @@ -68,7 +81,7 @@ atomic_add(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return @@ -78,7 +91,7 @@ atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire @@ -88,7 +101,7 @@ atomic_add_return_acquire(int i, atomic_t *v) static inline int atomic_add_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release @@ -98,7 +111,7 @@ atomic_add_return_release(int i, atomic_t *v) static inline int atomic_add_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed @@ -108,7 +121,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) static inline int atomic_fetch_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add @@ -118,7 +131,7 @@ atomic_fetch_add(int i, atomic_t *v) static inline int atomic_fetch_add_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire @@ -128,7 +141,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) static inline int atomic_fetch_add_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release @@ -138,7 +151,7 @@ atomic_fetch_add_release(int i, atomic_t *v) static inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed @@ -147,7 +160,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub @@ -156,7 +169,7 @@ atomic_sub(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return @@ -166,7 +179,7 @@ atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire @@ -176,7 +189,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) static inline int atomic_sub_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release @@ -186,7 +199,7 @@ atomic_sub_return_release(int i, atomic_t *v) static inline int atomic_sub_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed @@ -196,7 +209,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) static inline int atomic_fetch_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub @@ -206,7 +219,7 @@ atomic_fetch_sub(int i, atomic_t *v) static inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire @@ -216,7 +229,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) static inline int atomic_fetch_sub_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release @@ -226,7 +239,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) static inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed @@ -236,7 +249,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc @@ -246,7 +259,7 @@ atomic_inc(atomic_t *v) static inline int atomic_inc_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return @@ -256,7 +269,7 @@ atomic_inc_return(atomic_t *v) static inline int atomic_inc_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire @@ -266,7 +279,7 @@ atomic_inc_return_acquire(atomic_t *v) static inline int atomic_inc_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release @@ -276,7 +289,7 @@ atomic_inc_return_release(atomic_t *v) static inline int atomic_inc_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed @@ -286,7 +299,7 @@ atomic_inc_return_relaxed(atomic_t *v) static inline int atomic_fetch_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc @@ -296,7 +309,7 @@ atomic_fetch_inc(atomic_t *v) static inline int atomic_fetch_inc_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire @@ -306,7 +319,7 @@ atomic_fetch_inc_acquire(atomic_t *v) static inline int atomic_fetch_inc_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release @@ -316,7 +329,7 @@ atomic_fetch_inc_release(atomic_t *v) static inline int atomic_fetch_inc_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed @@ -326,7 +339,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) static inline void atomic_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec @@ -336,7 +349,7 @@ atomic_dec(atomic_t *v) static inline int atomic_dec_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return @@ -346,7 +359,7 @@ atomic_dec_return(atomic_t *v) static inline int atomic_dec_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire @@ -356,7 +369,7 @@ atomic_dec_return_acquire(atomic_t *v) static inline int atomic_dec_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release @@ -366,7 +379,7 @@ atomic_dec_return_release(atomic_t *v) static inline int atomic_dec_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed @@ -376,7 +389,7 @@ atomic_dec_return_relaxed(atomic_t *v) static inline int atomic_fetch_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec @@ -386,7 +399,7 @@ atomic_fetch_dec(atomic_t *v) static inline int atomic_fetch_dec_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire @@ -396,7 +409,7 @@ atomic_fetch_dec_acquire(atomic_t *v) static inline int atomic_fetch_dec_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release @@ -406,7 +419,7 @@ atomic_fetch_dec_release(atomic_t *v) static inline int atomic_fetch_dec_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed @@ -415,7 +428,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) static inline void atomic_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and @@ -424,7 +437,7 @@ atomic_and(int i, atomic_t *v) static inline int atomic_fetch_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and @@ -434,7 +447,7 @@ atomic_fetch_and(int i, atomic_t *v) static inline int atomic_fetch_and_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire @@ -444,7 +457,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) static inline int atomic_fetch_and_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release @@ -454,7 +467,7 @@ atomic_fetch_and_release(int i, atomic_t *v) static inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed @@ -464,7 +477,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) static inline void atomic_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot @@ -474,7 +487,7 @@ atomic_andnot(int i, atomic_t *v) static inline int atomic_fetch_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot @@ -484,7 +497,7 @@ atomic_fetch_andnot(int i, atomic_t *v) static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire @@ -494,7 +507,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) static inline int atomic_fetch_andnot_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release @@ -504,7 +517,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed @@ -513,7 +526,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) static inline void atomic_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or @@ -522,7 +535,7 @@ atomic_or(int i, atomic_t *v) static inline int atomic_fetch_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or @@ -532,7 +545,7 @@ atomic_fetch_or(int i, atomic_t *v) static inline int atomic_fetch_or_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire @@ -542,7 +555,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) static inline int atomic_fetch_or_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release @@ -552,7 +565,7 @@ atomic_fetch_or_release(int i, atomic_t *v) static inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed @@ -561,7 +574,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) static inline void atomic_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor @@ -570,7 +583,7 @@ atomic_xor(int i, atomic_t *v) static inline int atomic_fetch_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor @@ -580,7 +593,7 @@ atomic_fetch_xor(int i, atomic_t *v) static inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire @@ -590,7 +603,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) static inline int atomic_fetch_xor_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release @@ -600,7 +613,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) static inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed @@ -610,7 +623,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) static inline int atomic_xchg(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg @@ -620,7 +633,7 @@ atomic_xchg(atomic_t *v, int i) static inline int atomic_xchg_acquire(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire @@ -630,7 +643,7 @@ atomic_xchg_acquire(atomic_t *v, int i) static inline int atomic_xchg_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release @@ -640,7 +653,7 @@ atomic_xchg_release(atomic_t *v, int i) static inline int atomic_xchg_relaxed(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed @@ -650,7 +663,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg @@ -660,7 +673,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire @@ -670,7 +683,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release @@ -680,7 +693,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed @@ -690,8 +703,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg @@ -701,8 +714,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire @@ -712,8 +725,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release @@ -723,8 +736,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed @@ -734,7 +747,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) static inline bool atomic_sub_and_test(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test @@ -744,7 +757,7 @@ atomic_sub_and_test(int i, atomic_t *v) static inline bool atomic_dec_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test @@ -754,7 +767,7 @@ atomic_dec_and_test(atomic_t *v) static inline bool atomic_inc_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test @@ -764,7 +777,7 @@ atomic_inc_and_test(atomic_t *v) static inline bool atomic_add_negative(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative @@ -774,7 +787,7 @@ atomic_add_negative(int i, atomic_t *v) static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless @@ -784,7 +797,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) static inline bool atomic_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless @@ -794,7 +807,7 @@ atomic_add_unless(atomic_t *v, int a, int u) static inline bool atomic_inc_not_zero(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero @@ -804,7 +817,7 @@ atomic_inc_not_zero(atomic_t *v) static inline bool atomic_inc_unless_negative(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative @@ -814,7 +827,7 @@ atomic_inc_unless_negative(atomic_t *v) static inline bool atomic_dec_unless_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive @@ -824,7 +837,7 @@ atomic_dec_unless_positive(atomic_t *v) static inline int atomic_dec_if_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive @@ -833,7 +846,7 @@ atomic_dec_if_positive(atomic_t *v) static inline s64 atomic64_read(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read @@ -842,7 +855,7 @@ atomic64_read(const atomic64_t *v) static inline s64 atomic64_read_acquire(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire @@ -851,7 +864,7 @@ atomic64_read_acquire(const atomic64_t *v) static inline void atomic64_set(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set @@ -860,7 +873,7 @@ atomic64_set(atomic64_t *v, s64 i) static inline void atomic64_set_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release @@ -869,7 +882,7 @@ atomic64_set_release(atomic64_t *v, s64 i) static inline void atomic64_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add @@ -878,7 +891,7 @@ atomic64_add(s64 i, atomic64_t *v) static inline s64 atomic64_add_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return @@ -888,7 +901,7 @@ atomic64_add_return(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire @@ -898,7 +911,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release @@ -908,7 +921,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed @@ -918,7 +931,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add @@ -928,7 +941,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire @@ -938,7 +951,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -948,7 +961,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed @@ -957,7 +970,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) static inline void atomic64_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub @@ -966,7 +979,7 @@ atomic64_sub(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return @@ -976,7 +989,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire @@ -986,7 +999,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release @@ -996,7 +1009,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed @@ -1006,7 +1019,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub @@ -1016,7 +1029,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire @@ -1026,7 +1039,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release @@ -1036,7 +1049,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed @@ -1046,7 +1059,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) static inline void atomic64_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc @@ -1056,7 +1069,7 @@ atomic64_inc(atomic64_t *v) static inline s64 atomic64_inc_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return @@ -1066,7 +1079,7 @@ atomic64_inc_return(atomic64_t *v) static inline s64 atomic64_inc_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire @@ -1076,7 +1089,7 @@ atomic64_inc_return_acquire(atomic64_t *v) static inline s64 atomic64_inc_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release @@ -1086,7 +1099,7 @@ atomic64_inc_return_release(atomic64_t *v) static inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed @@ -1096,7 +1109,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) static inline s64 atomic64_fetch_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc @@ -1106,7 +1119,7 @@ atomic64_fetch_inc(atomic64_t *v) static inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire @@ -1116,7 +1129,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) static inline s64 atomic64_fetch_inc_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release @@ -1126,7 +1139,7 @@ atomic64_fetch_inc_release(atomic64_t *v) static inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed @@ -1136,7 +1149,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) static inline void atomic64_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec @@ -1146,7 +1159,7 @@ atomic64_dec(atomic64_t *v) static inline s64 atomic64_dec_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return @@ -1156,7 +1169,7 @@ atomic64_dec_return(atomic64_t *v) static inline s64 atomic64_dec_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire @@ -1166,7 +1179,7 @@ atomic64_dec_return_acquire(atomic64_t *v) static inline s64 atomic64_dec_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release @@ -1176,7 +1189,7 @@ atomic64_dec_return_release(atomic64_t *v) static inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed @@ -1186,7 +1199,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) static inline s64 atomic64_fetch_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec @@ -1196,7 +1209,7 @@ atomic64_fetch_dec(atomic64_t *v) static inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire @@ -1206,7 +1219,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) static inline s64 atomic64_fetch_dec_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release @@ -1216,7 +1229,7 @@ atomic64_fetch_dec_release(atomic64_t *v) static inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed @@ -1225,7 +1238,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) static inline void atomic64_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and @@ -1234,7 +1247,7 @@ atomic64_and(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and @@ -1244,7 +1257,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire @@ -1254,7 +1267,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release @@ -1264,7 +1277,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed @@ -1274,7 +1287,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) static inline void atomic64_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot @@ -1284,7 +1297,7 @@ atomic64_andnot(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot @@ -1294,7 +1307,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire @@ -1304,7 +1317,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release @@ -1314,7 +1327,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed @@ -1323,7 +1336,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) static inline void atomic64_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or @@ -1332,7 +1345,7 @@ atomic64_or(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or @@ -1342,7 +1355,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire @@ -1352,7 +1365,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release @@ -1362,7 +1375,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed @@ -1371,7 +1384,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) static inline void atomic64_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor @@ -1380,7 +1393,7 @@ atomic64_xor(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor @@ -1390,7 +1403,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire @@ -1400,7 +1413,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release @@ -1410,7 +1423,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed @@ -1420,7 +1433,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_xchg(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg @@ -1430,7 +1443,7 @@ atomic64_xchg(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire @@ -1440,7 +1453,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release @@ -1450,7 +1463,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed @@ -1460,7 +1473,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg @@ -1470,7 +1483,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire @@ -1480,7 +1493,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release @@ -1490,7 +1503,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed @@ -1500,8 +1513,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg @@ -1511,8 +1524,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire @@ -1522,8 +1535,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release @@ -1533,8 +1546,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed @@ -1544,7 +1557,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test @@ -1554,7 +1567,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) static inline bool atomic64_dec_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test @@ -1564,7 +1577,7 @@ atomic64_dec_and_test(atomic64_t *v) static inline bool atomic64_inc_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test @@ -1574,7 +1587,7 @@ atomic64_inc_and_test(atomic64_t *v) static inline bool atomic64_add_negative(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative @@ -1584,7 +1597,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless @@ -1594,7 +1607,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless @@ -1604,7 +1617,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) static inline bool atomic64_inc_not_zero(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero @@ -1614,7 +1627,7 @@ atomic64_inc_not_zero(atomic64_t *v) static inline bool atomic64_inc_unless_negative(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative @@ -1624,7 +1637,7 @@ atomic64_inc_unless_negative(atomic64_t *v) static inline bool atomic64_dec_unless_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive @@ -1634,7 +1647,7 @@ atomic64_dec_unless_positive(atomic64_t *v) static inline s64 atomic64_dec_if_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive @@ -1644,7 +1657,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1653,7 +1666,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1662,7 +1675,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1671,7 +1684,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1680,7 +1693,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1689,7 +1702,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1698,7 +1711,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1707,7 +1720,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1716,7 +1729,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1725,7 +1738,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1734,7 +1747,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1743,7 +1756,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1751,28 +1764,28 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -1780,9 +1793,9 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// b29b625d5de9280f680e42c7be859b55b15e5f6a +// beea41c2a0f2c69e4958ed71bf26f59740fa4b12 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index e09812372b17..8b8b2a6f8d68 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -20,7 +20,7 @@ gen_param_check() # We don't write to constant parameters [ ${type#c} != ${type} ] && rw="read" - printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n" + printf "\t__atomic_check_${rw}(${name}, sizeof(*${name}));\n" } #gen_param_check(arg...) @@ -107,7 +107,7 @@ cat < #include +#include + +static inline void __atomic_check_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +static inline void __atomic_check_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} EOF -- cgit v1.2.3-58-ga151 From c020395b6634b7a674ee6aa91a971b08e268caba Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 26 Nov 2019 15:04:04 +0100 Subject: asm-generic/atomic: Use __always_inline for pure wrappers Prefer __always_inline for atomic wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. By using __always_inline, we let the real implementation and not the wrapper determine the final inlining preference. For x86 tinyconfig we observe: - vmlinux baseline: 1316204 - vmlinux with patch: 1315988 (-216 bytes) This came up when addressing UACCESS warnings with CC_OPTIMIZE_FOR_SIZE in the KCSAN runtime: http://lkml.kernel.org/r/58708908-84a0-0a81-a836-ad97e33dbb62@infradead.org Reported-by: Randy Dunlap Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- include/asm-generic/atomic-instrumented.h | 335 +++++++++++++++--------------- include/asm-generic/atomic-long.h | 331 ++++++++++++++--------------- scripts/atomic/gen-atomic-instrumented.sh | 7 +- scripts/atomic/gen-atomic-long.sh | 3 +- 4 files changed, 340 insertions(+), 336 deletions(-) (limited to 'scripts') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 3dc0f38544f6..63869ded73ac 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -18,22 +18,23 @@ #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #include +#include #include #include -static inline void __atomic_check_read(const volatile void *v, size_t size) +static __always_inline void __atomic_check_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_atomic_read(v, size); } -static inline void __atomic_check_write(const volatile void *v, size_t size) +static __always_inline void __atomic_check_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_write(v, size); } -static inline int +static __always_inline int atomic_read(const atomic_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -42,7 +43,7 @@ atomic_read(const atomic_t *v) #define atomic_read atomic_read #if defined(arch_atomic_read_acquire) -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -51,7 +52,7 @@ atomic_read_acquire(const atomic_t *v) #define atomic_read_acquire atomic_read_acquire #endif -static inline void +static __always_inline void atomic_set(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -60,7 +61,7 @@ atomic_set(atomic_t *v, int i) #define atomic_set atomic_set #if defined(arch_atomic_set_release) -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -69,7 +70,7 @@ atomic_set_release(atomic_t *v, int i) #define atomic_set_release atomic_set_release #endif -static inline void +static __always_inline void atomic_add(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -78,7 +79,7 @@ atomic_add(int i, atomic_t *v) #define atomic_add atomic_add #if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -88,7 +89,7 @@ atomic_add_return(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_acquire) -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -98,7 +99,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_release) -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -108,7 +109,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_relaxed) -static inline int +static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -118,7 +119,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -128,7 +129,7 @@ atomic_fetch_add(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_acquire) -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -138,7 +139,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_release) -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -148,7 +149,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_relaxed) -static inline int +static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -157,7 +158,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic_sub(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -166,7 +167,7 @@ atomic_sub(int i, atomic_t *v) #define atomic_sub atomic_sub #if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -176,7 +177,7 @@ atomic_sub_return(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_acquire) -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -186,7 +187,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_release) -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -196,7 +197,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_relaxed) -static inline int +static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -206,7 +207,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -216,7 +217,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_acquire) -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -226,7 +227,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_release) -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -236,7 +237,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_relaxed) -static inline int +static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -246,7 +247,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) #endif #if defined(arch_atomic_inc) -static inline void +static __always_inline void atomic_inc(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -256,7 +257,7 @@ atomic_inc(atomic_t *v) #endif #if defined(arch_atomic_inc_return) -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -266,7 +267,7 @@ atomic_inc_return(atomic_t *v) #endif #if defined(arch_atomic_inc_return_acquire) -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -276,7 +277,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #if defined(arch_atomic_inc_return_release) -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -286,7 +287,7 @@ atomic_inc_return_release(atomic_t *v) #endif #if defined(arch_atomic_inc_return_relaxed) -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -296,7 +297,7 @@ atomic_inc_return_relaxed(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc) -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -306,7 +307,7 @@ atomic_fetch_inc(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_acquire) -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -316,7 +317,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_release) -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -326,7 +327,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_relaxed) -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -336,7 +337,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #endif #if defined(arch_atomic_dec) -static inline void +static __always_inline void atomic_dec(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -346,7 +347,7 @@ atomic_dec(atomic_t *v) #endif #if defined(arch_atomic_dec_return) -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -356,7 +357,7 @@ atomic_dec_return(atomic_t *v) #endif #if defined(arch_atomic_dec_return_acquire) -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -366,7 +367,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #if defined(arch_atomic_dec_return_release) -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -376,7 +377,7 @@ atomic_dec_return_release(atomic_t *v) #endif #if defined(arch_atomic_dec_return_relaxed) -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -386,7 +387,7 @@ atomic_dec_return_relaxed(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec) -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -396,7 +397,7 @@ atomic_fetch_dec(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_acquire) -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -406,7 +407,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_release) -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -416,7 +417,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_relaxed) -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -425,7 +426,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic_and(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -434,7 +435,7 @@ atomic_and(int i, atomic_t *v) #define atomic_and atomic_and #if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -444,7 +445,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_acquire) -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -454,7 +455,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_release) -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -464,7 +465,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_relaxed) -static inline int +static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -474,7 +475,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) #endif #if defined(arch_atomic_andnot) -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -484,7 +485,7 @@ atomic_andnot(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot) -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -494,7 +495,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_acquire) -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -504,7 +505,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_release) -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -514,7 +515,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_relaxed) -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -523,7 +524,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic_or(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -532,7 +533,7 @@ atomic_or(int i, atomic_t *v) #define atomic_or atomic_or #if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -542,7 +543,7 @@ atomic_fetch_or(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_acquire) -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -552,7 +553,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_release) -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -562,7 +563,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_relaxed) -static inline int +static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -571,7 +572,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic_xor(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -580,7 +581,7 @@ atomic_xor(int i, atomic_t *v) #define atomic_xor atomic_xor #if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -590,7 +591,7 @@ atomic_fetch_xor(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_acquire) -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -600,7 +601,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_release) -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -610,7 +611,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_relaxed) -static inline int +static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -620,7 +621,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -630,7 +631,7 @@ atomic_xchg(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_acquire) -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -640,7 +641,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_release) -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -650,7 +651,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_relaxed) -static inline int +static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -660,7 +661,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) #endif #if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -670,7 +671,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_acquire) -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -680,7 +681,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_release) -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -690,7 +691,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_relaxed) -static inline int +static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -700,7 +701,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_try_cmpxchg) -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -711,7 +712,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -722,7 +723,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -733,7 +734,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -744,7 +745,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_sub_and_test) -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -754,7 +755,7 @@ atomic_sub_and_test(int i, atomic_t *v) #endif #if defined(arch_atomic_dec_and_test) -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -764,7 +765,7 @@ atomic_dec_and_test(atomic_t *v) #endif #if defined(arch_atomic_inc_and_test) -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -774,7 +775,7 @@ atomic_inc_and_test(atomic_t *v) #endif #if defined(arch_atomic_add_negative) -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -784,7 +785,7 @@ atomic_add_negative(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_unless) -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { __atomic_check_write(v, sizeof(*v)); @@ -794,7 +795,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) #endif #if defined(arch_atomic_add_unless) -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { __atomic_check_write(v, sizeof(*v)); @@ -804,7 +805,7 @@ atomic_add_unless(atomic_t *v, int a, int u) #endif #if defined(arch_atomic_inc_not_zero) -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -814,7 +815,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #if defined(arch_atomic_inc_unless_negative) -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -824,7 +825,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #if defined(arch_atomic_dec_unless_positive) -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -834,7 +835,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #if defined(arch_atomic_dec_if_positive) -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -843,7 +844,7 @@ atomic_dec_if_positive(atomic_t *v) #define atomic_dec_if_positive atomic_dec_if_positive #endif -static inline s64 +static __always_inline s64 atomic64_read(const atomic64_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -852,7 +853,7 @@ atomic64_read(const atomic64_t *v) #define atomic64_read atomic64_read #if defined(arch_atomic64_read_acquire) -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -861,7 +862,7 @@ atomic64_read_acquire(const atomic64_t *v) #define atomic64_read_acquire atomic64_read_acquire #endif -static inline void +static __always_inline void atomic64_set(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -870,7 +871,7 @@ atomic64_set(atomic64_t *v, s64 i) #define atomic64_set atomic64_set #if defined(arch_atomic64_set_release) -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -879,7 +880,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #define atomic64_set_release atomic64_set_release #endif -static inline void +static __always_inline void atomic64_add(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -888,7 +889,7 @@ atomic64_add(s64 i, atomic64_t *v) #define atomic64_add atomic64_add #if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -898,7 +899,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_acquire) -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -908,7 +909,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_release) -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -918,7 +919,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -928,7 +929,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -938,7 +939,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -948,7 +949,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_release) -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -958,7 +959,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -967,7 +968,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -976,7 +977,7 @@ atomic64_sub(s64 i, atomic64_t *v) #define atomic64_sub atomic64_sub #if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -986,7 +987,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_acquire) -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -996,7 +997,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_release) -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1006,7 +1007,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1016,7 +1017,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1026,7 +1027,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1036,7 +1037,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_release) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1046,7 +1047,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1056,7 +1057,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_inc) -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1066,7 +1067,7 @@ atomic64_inc(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return) -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1076,7 +1077,7 @@ atomic64_inc_return(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_acquire) -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1086,7 +1087,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_release) -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1096,7 +1097,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1106,7 +1107,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc) -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1116,7 +1117,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1126,7 +1127,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_release) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1136,7 +1137,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1146,7 +1147,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_dec) -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1156,7 +1157,7 @@ atomic64_dec(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return) -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1166,7 +1167,7 @@ atomic64_dec_return(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_acquire) -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1176,7 +1177,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_release) -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1186,7 +1187,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1196,7 +1197,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec) -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1206,7 +1207,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1216,7 +1217,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_release) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1226,7 +1227,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1235,7 +1236,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic64_and(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1244,7 +1245,7 @@ atomic64_and(s64 i, atomic64_t *v) #define atomic64_and atomic64_and #if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1254,7 +1255,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1264,7 +1265,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_release) -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1274,7 +1275,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1284,7 +1285,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_andnot) -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1294,7 +1295,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1304,7 +1305,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1314,7 +1315,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_release) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1324,7 +1325,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1333,7 +1334,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic64_or(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1342,7 +1343,7 @@ atomic64_or(s64 i, atomic64_t *v) #define atomic64_or atomic64_or #if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1352,7 +1353,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1362,7 +1363,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_release) -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1372,7 +1373,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1381,7 +1382,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1390,7 +1391,7 @@ atomic64_xor(s64 i, atomic64_t *v) #define atomic64_xor atomic64_xor #if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1400,7 +1401,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1410,7 +1411,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_release) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1420,7 +1421,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1430,7 +1431,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1440,7 +1441,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_acquire) -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1450,7 +1451,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_release) -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1460,7 +1461,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1470,7 +1471,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) #endif #if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1480,7 +1481,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_acquire) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1490,7 +1491,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_release) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1500,7 +1501,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1510,7 +1511,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg) -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1521,7 +1522,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1532,7 +1533,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1543,7 +1544,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1554,7 +1555,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_sub_and_test) -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1564,7 +1565,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_dec_and_test) -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1574,7 +1575,7 @@ atomic64_dec_and_test(atomic64_t *v) #endif #if defined(arch_atomic64_inc_and_test) -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1584,7 +1585,7 @@ atomic64_inc_and_test(atomic64_t *v) #endif #if defined(arch_atomic64_add_negative) -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1594,7 +1595,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_unless) -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { __atomic_check_write(v, sizeof(*v)); @@ -1604,7 +1605,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) #endif #if defined(arch_atomic64_add_unless) -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { __atomic_check_write(v, sizeof(*v)); @@ -1614,7 +1615,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) #endif #if defined(arch_atomic64_inc_not_zero) -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1624,7 +1625,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #if defined(arch_atomic64_inc_unless_negative) -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1634,7 +1635,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #if defined(arch_atomic64_dec_unless_positive) -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1644,7 +1645,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #if defined(arch_atomic64_dec_if_positive) -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1798,4 +1799,4 @@ atomic64_dec_if_positive(atomic64_t *v) }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// beea41c2a0f2c69e4958ed71bf26f59740fa4b12 +// 7b7e2af0e75c8ecb6f02298a7075f503f30d244c diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 881c7e27af28..073cf40f431b 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -6,6 +6,7 @@ #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H +#include #include #ifdef CONFIG_64BIT @@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t; #ifdef CONFIG_64BIT -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic64_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic64_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic64_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic64_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic64_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic64_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic64_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic64_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic64_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic64_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic64_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic64_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic64_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic64_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic64_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic64_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic64_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic64_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic64_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic64_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic64_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic64_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic64_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic64_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic64_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic64_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic64_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic64_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic64_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic64_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic64_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic64_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic64_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic64_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic64_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic64_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic64_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic64_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic64_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic64_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic64_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic64_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic64_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic64_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic64_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic64_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic64_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic64_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic64_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic64_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic64_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic64_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic64_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic64_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic64_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic64_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic64_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic64_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_release(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic64_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic64_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic64_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic64_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic64_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic64_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic64_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic64_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic64_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic64_dec_if_positive(v); @@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v) #else /* CONFIG_64BIT */ -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_acquire(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_release(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_relaxed(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic_dec_if_positive(v); @@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) #endif /* CONFIG_64BIT */ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// 77558968132ce4f911ad53f6f52ce423006f6268 +// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index 8b8b2a6f8d68..fb4222548b22 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -84,7 +84,7 @@ gen_proto_order_variant() [ ! -z "${guard}" ] && printf "#if ${guard}\n" cat < +#include #include #include -static inline void __atomic_check_read(const volatile void *v, size_t size) +static __always_inline void __atomic_check_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_atomic_read(v, size); } -static inline void __atomic_check_write(const volatile void *v, size_t size) +static __always_inline void __atomic_check_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_write(v, size); diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh index c240a7231b2e..e318d3f92e53 100755 --- a/scripts/atomic/gen-atomic-long.sh +++ b/scripts/atomic/gen-atomic-long.sh @@ -46,7 +46,7 @@ gen_proto_order_variant() local retstmt="$(gen_ret_stmt "${meta}")" cat < #include #ifdef CONFIG_64BIT -- cgit v1.2.3-58-ga151 From 944bc9cca7c392879fa2c3f911bbef7422707679 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 26 Nov 2019 15:04:05 +0100 Subject: asm-generic/atomic: Use __always_inline for fallback wrappers Use __always_inline for atomic fallback wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. While the fallback wrappers aren't pure wrappers, they are trivial nonetheless, and the function they wrap should determine the final inlining policy. For x86 tinyconfig we observe: - vmlinux baseline: 1315988 - vmlinux with patch: 1315928 (-60 bytes) Suggested-by: Mark Rutland Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- include/linux/atomic-fallback.h | 340 ++++++++++++++------------- scripts/atomic/fallbacks/acquire | 2 +- scripts/atomic/fallbacks/add_negative | 2 +- scripts/atomic/fallbacks/add_unless | 2 +- scripts/atomic/fallbacks/andnot | 2 +- scripts/atomic/fallbacks/dec | 2 +- scripts/atomic/fallbacks/dec_and_test | 2 +- scripts/atomic/fallbacks/dec_if_positive | 2 +- scripts/atomic/fallbacks/dec_unless_positive | 2 +- scripts/atomic/fallbacks/fence | 2 +- scripts/atomic/fallbacks/fetch_add_unless | 2 +- scripts/atomic/fallbacks/inc | 2 +- scripts/atomic/fallbacks/inc_and_test | 2 +- scripts/atomic/fallbacks/inc_not_zero | 2 +- scripts/atomic/fallbacks/inc_unless_negative | 2 +- scripts/atomic/fallbacks/read_acquire | 2 +- scripts/atomic/fallbacks/release | 2 +- scripts/atomic/fallbacks/set_release | 2 +- scripts/atomic/fallbacks/sub_and_test | 2 +- scripts/atomic/fallbacks/try_cmpxchg | 2 +- scripts/atomic/gen-atomic-fallback.sh | 2 + 21 files changed, 192 insertions(+), 188 deletions(-) (limited to 'scripts') diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index a7d240e465c0..656b5489b673 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -6,6 +6,8 @@ #ifndef _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H +#include + #ifndef xchg_relaxed #define xchg_relaxed xchg #define xchg_acquire xchg @@ -76,7 +78,7 @@ #endif /* cmpxchg64_relaxed */ #ifndef atomic_read_acquire -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { return smp_load_acquire(&(v)->counter); @@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v) #endif #ifndef atomic_set_release -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { smp_store_release(&(v)->counter, i); @@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i) #else /* atomic_add_return_relaxed */ #ifndef atomic_add_return_acquire -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { int ret = atomic_add_return_relaxed(i, v); @@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_add_return_release -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #ifndef atomic_add_return -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { int ret; @@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v) #else /* atomic_fetch_add_relaxed */ #ifndef atomic_fetch_add_acquire -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { int ret = atomic_fetch_add_relaxed(i, v); @@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_add_release -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_add -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { int ret; @@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v) #else /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_acquire -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { int ret = atomic_sub_return_relaxed(i, v); @@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_sub_return_release -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #ifndef atomic_sub_return -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { int ret; @@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v) #else /* atomic_fetch_sub_relaxed */ #ifndef atomic_fetch_sub_acquire -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { int ret = atomic_fetch_sub_relaxed(i, v); @@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub_release -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { int ret; @@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ #ifndef atomic_inc -static inline void +static __always_inline void atomic_inc(atomic_t *v) { atomic_add(1, v); @@ -278,7 +280,7 @@ atomic_inc(atomic_t *v) #endif /* atomic_inc_return */ #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { return atomic_add_return(1, v); @@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v) #endif #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { return atomic_add_return_acquire(1, v); @@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { return atomic_add_return_release(1, v); @@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return_relaxed -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { return atomic_add_return_relaxed(1, v); @@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v) #else /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { int ret = atomic_inc_return_relaxed(v); @@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_release_fence(); @@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { int ret; @@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_fetch_inc */ #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { return atomic_fetch_add(1, v); @@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v) #endif #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { return atomic_fetch_add_acquire(1, v); @@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { return atomic_fetch_add_release(1, v); @@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc_relaxed -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { return atomic_fetch_add_relaxed(1, v); @@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #else /* atomic_fetch_inc_relaxed */ #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { int ret = atomic_fetch_inc_relaxed(v); @@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_release_fence(); @@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { int ret; @@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ #ifndef atomic_dec -static inline void +static __always_inline void atomic_dec(atomic_t *v) { atomic_sub(1, v); @@ -449,7 +451,7 @@ atomic_dec(atomic_t *v) #endif /* atomic_dec_return */ #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { return atomic_sub_return(1, v); @@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v) #endif #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { return atomic_sub_return_acquire(1, v); @@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { return atomic_sub_return_release(1, v); @@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return_relaxed -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { return atomic_sub_return_relaxed(1, v); @@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v) #else /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { int ret = atomic_dec_return_relaxed(v); @@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_release_fence(); @@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { int ret; @@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_fetch_dec */ #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { return atomic_fetch_sub(1, v); @@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v) #endif #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { return atomic_fetch_sub_acquire(1, v); @@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { return atomic_fetch_sub_release(1, v); @@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec_relaxed -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { return atomic_fetch_sub_relaxed(1, v); @@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #else /* atomic_fetch_dec_relaxed */ #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { int ret = atomic_fetch_dec_relaxed(v); @@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_release_fence(); @@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { int ret; @@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v) #else /* atomic_fetch_and_relaxed */ #ifndef atomic_fetch_and_acquire -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { int ret = atomic_fetch_and_relaxed(i, v); @@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_and_release -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_and -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { int ret; @@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ #ifndef atomic_andnot -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); @@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot */ #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { return atomic_fetch_and(~i, v); @@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { return atomic_fetch_and_acquire(~i, v); @@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { return atomic_fetch_and_release(~i, v); @@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_relaxed -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { return atomic_fetch_and_relaxed(~i, v); @@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #else /* atomic_fetch_andnot_relaxed */ #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { int ret = atomic_fetch_andnot_relaxed(i, v); @@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { int ret; @@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #else /* atomic_fetch_or_relaxed */ #ifndef atomic_fetch_or_acquire -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { int ret = atomic_fetch_or_relaxed(i, v); @@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_or_release -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_or -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { int ret; @@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v) #else /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_acquire -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { int ret = atomic_fetch_xor_relaxed(i, v); @@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor_release -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { int ret; @@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v) #else /* atomic_xchg_relaxed */ #ifndef atomic_xchg_acquire -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { int ret = atomic_xchg_relaxed(v, i); @@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #ifndef atomic_xchg_release -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_release_fence(); @@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #ifndef atomic_xchg -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { int ret; @@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i) #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { int ret = atomic_cmpxchg_relaxed(v, old, new); @@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg_release -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_release_fence(); @@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_try_cmpxchg */ #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { int r, o = *old; @@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { int r, o = *old; @@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { int r, o = *old; @@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { int r, o = *old; @@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #else /* atomic_try_cmpxchg_relaxed */ #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { bool ret = atomic_try_cmpxchg_relaxed(v, old, new); @@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_release_fence(); @@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { bool ret; @@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { return atomic_sub_return(i, v) == 0; @@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { return atomic_dec_return(v) == 0; @@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { return atomic_inc_return(v) == 0; @@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = atomic_read(v); @@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { return atomic_fetch_add_unless(v, a, u) != u; @@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { return atomic_add_unless(v, 1, 0); @@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #ifndef atomic_inc_unless_negative -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { int c = atomic_read(v); @@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #ifndef atomic_dec_unless_positive -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { int c = atomic_read(v); @@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #ifndef atomic_dec_if_positive -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { int dec, c = atomic_read(v); @@ -1186,7 +1188,7 @@ atomic_dec_if_positive(atomic_t *v) #endif #ifndef atomic64_read_acquire -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { return smp_load_acquire(&(v)->counter); @@ -1195,7 +1197,7 @@ atomic64_read_acquire(const atomic64_t *v) #endif #ifndef atomic64_set_release -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { smp_store_release(&(v)->counter, i); @@ -1210,7 +1212,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #else /* atomic64_add_return_relaxed */ #ifndef atomic64_add_return_acquire -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_add_return_relaxed(i, v); @@ -1221,7 +1223,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return_release -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1231,7 +1233,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { s64 ret; @@ -1252,7 +1254,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_add_relaxed */ #ifndef atomic64_fetch_add_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_add_relaxed(i, v); @@ -1263,7 +1265,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add_release -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1273,7 +1275,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { s64 ret; @@ -1294,7 +1296,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #else /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_acquire -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_sub_return_relaxed(i, v); @@ -1305,7 +1307,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return_release -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1315,7 +1317,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { s64 ret; @@ -1336,7 +1338,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_fetch_sub_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_sub_relaxed(i, v); @@ -1347,7 +1349,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub_release -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1357,7 +1359,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { s64 ret; @@ -1372,7 +1374,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_inc -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { atomic64_add(1, v); @@ -1388,7 +1390,7 @@ atomic64_inc(atomic64_t *v) #endif /* atomic64_inc_return */ #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { return atomic64_add_return(1, v); @@ -1397,7 +1399,7 @@ atomic64_inc_return(atomic64_t *v) #endif #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { return atomic64_add_return_acquire(1, v); @@ -1406,7 +1408,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { return atomic64_add_return_release(1, v); @@ -1415,7 +1417,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return_relaxed -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { return atomic64_add_return_relaxed(1, v); @@ -1426,7 +1428,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #else /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { s64 ret = atomic64_inc_return_relaxed(v); @@ -1437,7 +1439,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1447,7 +1449,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { s64 ret; @@ -1469,7 +1471,7 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_fetch_inc */ #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { return atomic64_fetch_add(1, v); @@ -1478,7 +1480,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { return atomic64_fetch_add_acquire(1, v); @@ -1487,7 +1489,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { return atomic64_fetch_add_release(1, v); @@ -1496,7 +1498,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { return atomic64_fetch_add_relaxed(1, v); @@ -1507,7 +1509,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #else /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_inc_relaxed(v); @@ -1518,7 +1520,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_release_fence(); @@ -1528,7 +1530,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { s64 ret; @@ -1543,7 +1545,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_dec -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { atomic64_sub(1, v); @@ -1559,7 +1561,7 @@ atomic64_dec(atomic64_t *v) #endif /* atomic64_dec_return */ #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { return atomic64_sub_return(1, v); @@ -1568,7 +1570,7 @@ atomic64_dec_return(atomic64_t *v) #endif #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { return atomic64_sub_return_acquire(1, v); @@ -1577,7 +1579,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { return atomic64_sub_return_release(1, v); @@ -1586,7 +1588,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return_relaxed -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { return atomic64_sub_return_relaxed(1, v); @@ -1597,7 +1599,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #else /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { s64 ret = atomic64_dec_return_relaxed(v); @@ -1608,7 +1610,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1618,7 +1620,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { s64 ret; @@ -1640,7 +1642,7 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_fetch_dec */ #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { return atomic64_fetch_sub(1, v); @@ -1649,7 +1651,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { return atomic64_fetch_sub_acquire(1, v); @@ -1658,7 +1660,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { return atomic64_fetch_sub_release(1, v); @@ -1667,7 +1669,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { return atomic64_fetch_sub_relaxed(1, v); @@ -1678,7 +1680,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #else /* atomic64_fetch_dec_relaxed */ #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_dec_relaxed(v); @@ -1689,7 +1691,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_release_fence(); @@ -1699,7 +1701,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { s64 ret; @@ -1720,7 +1722,7 @@ atomic64_fetch_dec(atomic64_t *v) #else /* atomic64_fetch_and_relaxed */ #ifndef atomic64_fetch_and_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_and_relaxed(i, v); @@ -1731,7 +1733,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and_release -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1741,7 +1743,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { s64 ret; @@ -1756,7 +1758,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ #ifndef atomic64_andnot -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { atomic64_and(~i, v); @@ -1772,7 +1774,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot */ #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { return atomic64_fetch_and(~i, v); @@ -1781,7 +1783,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { return atomic64_fetch_and_acquire(~i, v); @@ -1790,7 +1792,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { return atomic64_fetch_and_release(~i, v); @@ -1799,7 +1801,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { return atomic64_fetch_and_relaxed(~i, v); @@ -1810,7 +1812,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #else /* atomic64_fetch_andnot_relaxed */ #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_andnot_relaxed(i, v); @@ -1821,7 +1823,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1831,7 +1833,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { s64 ret; @@ -1852,7 +1854,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #else /* atomic64_fetch_or_relaxed */ #ifndef atomic64_fetch_or_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_or_relaxed(i, v); @@ -1863,7 +1865,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or_release -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1873,7 +1875,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { s64 ret; @@ -1894,7 +1896,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #else /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_xor_relaxed(i, v); @@ -1905,7 +1907,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor_release -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1915,7 +1917,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { s64 ret; @@ -1936,7 +1938,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #else /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_acquire -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { s64 ret = atomic64_xchg_relaxed(v, i); @@ -1947,7 +1949,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg_release -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_release_fence(); @@ -1957,7 +1959,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { s64 ret; @@ -1978,7 +1980,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #else /* atomic64_cmpxchg_relaxed */ #ifndef atomic64_cmpxchg_acquire -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { s64 ret = atomic64_cmpxchg_relaxed(v, old, new); @@ -1989,7 +1991,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg_release -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_release_fence(); @@ -1999,7 +2001,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { s64 ret; @@ -2021,7 +2023,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_try_cmpxchg */ #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2034,7 +2036,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2047,7 +2049,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2060,7 +2062,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2075,7 +2077,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #else /* atomic64_try_cmpxchg_relaxed */ #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); @@ -2086,7 +2088,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_release_fence(); @@ -2096,7 +2098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { bool ret; @@ -2120,7 +2122,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { return atomic64_sub_return(i, v) == 0; @@ -2137,7 +2139,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { return atomic64_dec_return(v) == 0; @@ -2154,7 +2156,7 @@ atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { return atomic64_inc_return(v) == 0; @@ -2172,7 +2174,7 @@ atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; @@ -2190,7 +2192,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c = atomic64_read(v); @@ -2215,7 +2217,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { return atomic64_fetch_add_unless(v, a, u) != u; @@ -2231,7 +2233,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { return atomic64_add_unless(v, 1, 0); @@ -2240,7 +2242,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #ifndef atomic64_inc_unless_negative -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2256,7 +2258,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #ifndef atomic64_dec_unless_positive -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2272,7 +2274,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #ifndef atomic64_dec_if_positive -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 dec, c = atomic64_read(v); @@ -2292,4 +2294,4 @@ atomic64_dec_if_positive(atomic64_t *v) #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 25de4a2804d70f57e994fe3b419148658bb5378a +// baaf45f4c24ed88ceae58baca39d7fd80bb8101b diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire index e38871e64db6..ea489acc285e 100755 --- a/scripts/atomic/fallbacks/acquire +++ b/scripts/atomic/fallbacks/acquire @@ -1,5 +1,5 @@ cat <counter); diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release index 3f628a3802d9..730d2a6d3e07 100755 --- a/scripts/atomic/fallbacks/release +++ b/scripts/atomic/fallbacks/release @@ -1,5 +1,5 @@ cat <counter, i); diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test index 289ef17a2d7a..6cfe4ed49746 100755 --- a/scripts/atomic/fallbacks/sub_and_test +++ b/scripts/atomic/fallbacks/sub_and_test @@ -8,7 +8,7 @@ cat < + EOF for xchg in "xchg" "cmpxchg" "cmpxchg64"; do -- cgit v1.2.3-58-ga151 From ed8af2e4d2a71bd58f5776b7e5a477d136e32be4 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:09 +0100 Subject: asm-generic, atomic-instrumented: Use generic instrumented.h This switches atomic-instrumented.h to use the generic instrumentation wrappers provided by instrumented.h. No functional change intended. Suggested-by: Arnd Bergmann Signed-off-by: Marco Elver Acked-by: Arnd Bergmann Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-instrumented.h | 395 +++++++++++++++--------------- scripts/atomic/gen-atomic-instrumented.sh | 19 +- 2 files changed, 194 insertions(+), 220 deletions(-) (limited to 'scripts') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 63869ded73ac..379986e40159 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -19,25 +19,12 @@ #include #include -#include -#include - -static __always_inline void __atomic_check_read(const volatile void *v, size_t size) -{ - kasan_check_read(v, size); - kcsan_check_atomic_read(v, size); -} - -static __always_inline void __atomic_check_write(const volatile void *v, size_t size) -{ - kasan_check_write(v, size); - kcsan_check_atomic_write(v, size); -} +#include static __always_inline int atomic_read(const atomic_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read @@ -46,7 +33,7 @@ atomic_read(const atomic_t *v) static __always_inline int atomic_read_acquire(const atomic_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire @@ -55,7 +42,7 @@ atomic_read_acquire(const atomic_t *v) static __always_inline void atomic_set(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set @@ -64,7 +51,7 @@ atomic_set(atomic_t *v, int i) static __always_inline void atomic_set_release(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release @@ -73,7 +60,7 @@ atomic_set_release(atomic_t *v, int i) static __always_inline void atomic_add(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add @@ -82,7 +69,7 @@ atomic_add(int i, atomic_t *v) static __always_inline int atomic_add_return(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return @@ -92,7 +79,7 @@ atomic_add_return(int i, atomic_t *v) static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire @@ -102,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v) static __always_inline int atomic_add_return_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release @@ -112,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v) static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed @@ -122,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add @@ -132,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v) static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire @@ -142,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release @@ -152,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v) static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed @@ -161,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) static __always_inline void atomic_sub(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub @@ -170,7 +157,7 @@ atomic_sub(int i, atomic_t *v) static __always_inline int atomic_sub_return(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return @@ -180,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v) static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire @@ -190,7 +177,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release @@ -200,7 +187,7 @@ atomic_sub_return_release(int i, atomic_t *v) static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed @@ -210,7 +197,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub @@ -220,7 +207,7 @@ atomic_fetch_sub(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire @@ -230,7 +217,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release @@ -240,7 +227,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed @@ -250,7 +237,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) static __always_inline void atomic_inc(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc @@ -260,7 +247,7 @@ atomic_inc(atomic_t *v) static __always_inline int atomic_inc_return(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return @@ -270,7 +257,7 @@ atomic_inc_return(atomic_t *v) static __always_inline int atomic_inc_return_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire @@ -280,7 +267,7 @@ atomic_inc_return_acquire(atomic_t *v) static __always_inline int atomic_inc_return_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release @@ -290,7 +277,7 @@ atomic_inc_return_release(atomic_t *v) static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed @@ -300,7 +287,7 @@ atomic_inc_return_relaxed(atomic_t *v) static __always_inline int atomic_fetch_inc(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc @@ -310,7 +297,7 @@ atomic_fetch_inc(atomic_t *v) static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire @@ -320,7 +307,7 @@ atomic_fetch_inc_acquire(atomic_t *v) static __always_inline int atomic_fetch_inc_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release @@ -330,7 +317,7 @@ atomic_fetch_inc_release(atomic_t *v) static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed @@ -340,7 +327,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) static __always_inline void atomic_dec(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec @@ -350,7 +337,7 @@ atomic_dec(atomic_t *v) static __always_inline int atomic_dec_return(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return @@ -360,7 +347,7 @@ atomic_dec_return(atomic_t *v) static __always_inline int atomic_dec_return_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire @@ -370,7 +357,7 @@ atomic_dec_return_acquire(atomic_t *v) static __always_inline int atomic_dec_return_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release @@ -380,7 +367,7 @@ atomic_dec_return_release(atomic_t *v) static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed @@ -390,7 +377,7 @@ atomic_dec_return_relaxed(atomic_t *v) static __always_inline int atomic_fetch_dec(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec @@ -400,7 +387,7 @@ atomic_fetch_dec(atomic_t *v) static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire @@ -410,7 +397,7 @@ atomic_fetch_dec_acquire(atomic_t *v) static __always_inline int atomic_fetch_dec_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release @@ -420,7 +407,7 @@ atomic_fetch_dec_release(atomic_t *v) static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed @@ -429,7 +416,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) static __always_inline void atomic_and(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and @@ -438,7 +425,7 @@ atomic_and(int i, atomic_t *v) static __always_inline int atomic_fetch_and(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and @@ -448,7 +435,7 @@ atomic_fetch_and(int i, atomic_t *v) static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire @@ -458,7 +445,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release @@ -468,7 +455,7 @@ atomic_fetch_and_release(int i, atomic_t *v) static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed @@ -478,7 +465,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) static __always_inline void atomic_andnot(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot @@ -488,7 +475,7 @@ atomic_andnot(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot @@ -498,7 +485,7 @@ atomic_fetch_andnot(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire @@ -508,7 +495,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release @@ -518,7 +505,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed @@ -527,7 +514,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) static __always_inline void atomic_or(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or @@ -536,7 +523,7 @@ atomic_or(int i, atomic_t *v) static __always_inline int atomic_fetch_or(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or @@ -546,7 +533,7 @@ atomic_fetch_or(int i, atomic_t *v) static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire @@ -556,7 +543,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release @@ -566,7 +553,7 @@ atomic_fetch_or_release(int i, atomic_t *v) static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed @@ -575,7 +562,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) static __always_inline void atomic_xor(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor @@ -584,7 +571,7 @@ atomic_xor(int i, atomic_t *v) static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor @@ -594,7 +581,7 @@ atomic_fetch_xor(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire @@ -604,7 +591,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release @@ -614,7 +601,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed @@ -624,7 +611,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) static __always_inline int atomic_xchg(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg @@ -634,7 +621,7 @@ atomic_xchg(atomic_t *v, int i) static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire @@ -644,7 +631,7 @@ atomic_xchg_acquire(atomic_t *v, int i) static __always_inline int atomic_xchg_release(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release @@ -654,7 +641,7 @@ atomic_xchg_release(atomic_t *v, int i) static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed @@ -664,7 +651,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg @@ -674,7 +661,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire @@ -684,7 +671,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release @@ -694,7 +681,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed @@ -704,8 +691,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg @@ -715,8 +702,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire @@ -726,8 +713,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release @@ -737,8 +724,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed @@ -748,7 +735,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test @@ -758,7 +745,7 @@ atomic_sub_and_test(int i, atomic_t *v) static __always_inline bool atomic_dec_and_test(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test @@ -768,7 +755,7 @@ atomic_dec_and_test(atomic_t *v) static __always_inline bool atomic_inc_and_test(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test @@ -778,7 +765,7 @@ atomic_inc_and_test(atomic_t *v) static __always_inline bool atomic_add_negative(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative @@ -788,7 +775,7 @@ atomic_add_negative(int i, atomic_t *v) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless @@ -798,7 +785,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless @@ -808,7 +795,7 @@ atomic_add_unless(atomic_t *v, int a, int u) static __always_inline bool atomic_inc_not_zero(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero @@ -818,7 +805,7 @@ atomic_inc_not_zero(atomic_t *v) static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative @@ -828,7 +815,7 @@ atomic_inc_unless_negative(atomic_t *v) static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive @@ -838,7 +825,7 @@ atomic_dec_unless_positive(atomic_t *v) static __always_inline int atomic_dec_if_positive(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive @@ -847,7 +834,7 @@ atomic_dec_if_positive(atomic_t *v) static __always_inline s64 atomic64_read(const atomic64_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read @@ -856,7 +843,7 @@ atomic64_read(const atomic64_t *v) static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire @@ -865,7 +852,7 @@ atomic64_read_acquire(const atomic64_t *v) static __always_inline void atomic64_set(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set @@ -874,7 +861,7 @@ atomic64_set(atomic64_t *v, s64 i) static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release @@ -883,7 +870,7 @@ atomic64_set_release(atomic64_t *v, s64 i) static __always_inline void atomic64_add(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add @@ -892,7 +879,7 @@ atomic64_add(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return @@ -902,7 +889,7 @@ atomic64_add_return(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire @@ -912,7 +899,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release @@ -922,7 +909,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed @@ -932,7 +919,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add @@ -942,7 +929,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire @@ -952,7 +939,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -962,7 +949,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed @@ -971,7 +958,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub @@ -980,7 +967,7 @@ atomic64_sub(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return @@ -990,7 +977,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire @@ -1000,7 +987,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release @@ -1010,7 +997,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed @@ -1020,7 +1007,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub @@ -1030,7 +1017,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire @@ -1040,7 +1027,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release @@ -1050,7 +1037,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed @@ -1060,7 +1047,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_inc(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc @@ -1070,7 +1057,7 @@ atomic64_inc(atomic64_t *v) static __always_inline s64 atomic64_inc_return(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return @@ -1080,7 +1067,7 @@ atomic64_inc_return(atomic64_t *v) static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire @@ -1090,7 +1077,7 @@ atomic64_inc_return_acquire(atomic64_t *v) static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release @@ -1100,7 +1087,7 @@ atomic64_inc_return_release(atomic64_t *v) static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed @@ -1110,7 +1097,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc @@ -1120,7 +1107,7 @@ atomic64_fetch_inc(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire @@ -1130,7 +1117,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release @@ -1140,7 +1127,7 @@ atomic64_fetch_inc_release(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed @@ -1150,7 +1137,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) static __always_inline void atomic64_dec(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec @@ -1160,7 +1147,7 @@ atomic64_dec(atomic64_t *v) static __always_inline s64 atomic64_dec_return(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return @@ -1170,7 +1157,7 @@ atomic64_dec_return(atomic64_t *v) static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire @@ -1180,7 +1167,7 @@ atomic64_dec_return_acquire(atomic64_t *v) static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release @@ -1190,7 +1177,7 @@ atomic64_dec_return_release(atomic64_t *v) static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed @@ -1200,7 +1187,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec @@ -1210,7 +1197,7 @@ atomic64_fetch_dec(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire @@ -1220,7 +1207,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release @@ -1230,7 +1217,7 @@ atomic64_fetch_dec_release(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed @@ -1239,7 +1226,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) static __always_inline void atomic64_and(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and @@ -1248,7 +1235,7 @@ atomic64_and(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and @@ -1258,7 +1245,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire @@ -1268,7 +1255,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release @@ -1278,7 +1265,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed @@ -1288,7 +1275,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot @@ -1298,7 +1285,7 @@ atomic64_andnot(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot @@ -1308,7 +1295,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire @@ -1318,7 +1305,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release @@ -1328,7 +1315,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed @@ -1337,7 +1324,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_or(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or @@ -1346,7 +1333,7 @@ atomic64_or(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or @@ -1356,7 +1343,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire @@ -1366,7 +1353,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release @@ -1376,7 +1363,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed @@ -1385,7 +1372,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor @@ -1394,7 +1381,7 @@ atomic64_xor(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor @@ -1404,7 +1391,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire @@ -1414,7 +1401,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release @@ -1424,7 +1411,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed @@ -1434,7 +1421,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg @@ -1444,7 +1431,7 @@ atomic64_xchg(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire @@ -1454,7 +1441,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release @@ -1464,7 +1451,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed @@ -1474,7 +1461,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg @@ -1484,7 +1471,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire @@ -1494,7 +1481,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release @@ -1504,7 +1491,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed @@ -1514,8 +1501,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg @@ -1525,8 +1512,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire @@ -1536,8 +1523,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release @@ -1547,8 +1534,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed @@ -1558,7 +1545,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test @@ -1568,7 +1555,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test @@ -1578,7 +1565,7 @@ atomic64_dec_and_test(atomic64_t *v) static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test @@ -1588,7 +1575,7 @@ atomic64_inc_and_test(atomic64_t *v) static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative @@ -1598,7 +1585,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless @@ -1608,7 +1595,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless @@ -1618,7 +1605,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero @@ -1628,7 +1615,7 @@ atomic64_inc_not_zero(atomic64_t *v) static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative @@ -1638,7 +1625,7 @@ atomic64_inc_unless_negative(atomic64_t *v) static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive @@ -1648,7 +1635,7 @@ atomic64_dec_unless_positive(atomic64_t *v) static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive @@ -1658,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1667,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1676,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1685,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1694,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1703,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1712,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1721,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1730,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1739,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1748,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1757,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1765,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -1794,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 7b7e2af0e75c8ecb6f02298a7075f503f30d244c +// 89bf97f3a7509b740845e51ddf31055b48a81f40 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index fb4222548b22..6afadf73da17 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -20,7 +20,7 @@ gen_param_check() # We don't write to constant parameters [ ${type#c} != ${type} ] && rw="read" - printf "\t__atomic_check_${rw}(${name}, sizeof(*${name}));\n" + printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n" } #gen_param_check(arg...) @@ -107,7 +107,7 @@ cat < #include -#include -#include - -static __always_inline void __atomic_check_read(const volatile void *v, size_t size) -{ - kasan_check_read(v, size); - kcsan_check_atomic_read(v, size); -} - -static __always_inline void __atomic_check_write(const volatile void *v, size_t size) -{ - kasan_check_write(v, size); - kcsan_check_atomic_write(v, size); -} +#include EOF -- cgit v1.2.3-58-ga151 From 5099a722e9727fe9a93fac51e961735f40e5b6c8 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 1 Apr 2020 12:17:14 +0200 Subject: checkpatch: Warn about data_race() without comment Warn about applications of data_race() without a comment, to encourage documenting the reasoning behind why it was deemed safe. Suggested-by: Will Deacon Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- scripts/checkpatch.pl | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a63380c6b0d2..48bb9508e300 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5833,6 +5833,14 @@ sub process { } } +# check for data_race without a comment. + if ($line =~ /\bdata_race\s*\(/) { + if (!ctx_has_comment($first_line, $linenr)) { + WARN("DATA_RACE", + "data_race without comment\n" . $herecurr); + } + } + # check for smp_read_barrier_depends and read_barrier_depends if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) { WARN("READ_BARRIER_DEPENDS", -- cgit v1.2.3-58-ga151 From 17168f5c1bef4ec7c239e2dd5e3e60b862727dd4 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 21 May 2020 16:20:38 +0200 Subject: kcsan: Avoid inserting __tsan_func_entry/exit if possible To avoid inserting __tsan_func_{entry,exit}, add option if supported by compiler. Currently only Clang can be told to not emit calls to these functions. It is safe to not emit these, since KCSAN does not rely on them. Note that, if we disable __tsan_func_{entry,exit}(), we need to disable tail-call optimization in sanitized compilation units, as otherwise we may skip frames in the stack trace; in particular when the tail called function is one of the KCSAN's runtime functions, and a report is generated, we might miss the function where the actual access occurred. Since __tsan_func_{entry,exit}() insertion effectively disabled tail-call optimization, there should be no observable change. This was caught and confirmed with kcsan-test & UNWINDER_ORC. Signed-off-by: Marco Elver Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Acked-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200521142047.169334-3-elver@google.com --- scripts/Makefile.kcsan | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan index caf1111a28ae..20337a7ecf54 100644 --- a/scripts/Makefile.kcsan +++ b/scripts/Makefile.kcsan @@ -1,6 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 ifdef CONFIG_KCSAN -CFLAGS_KCSAN := -fsanitize=thread +# GCC and Clang accept backend options differently. Do not wrap in cc-option, +# because Clang accepts "--param" even if it is unused. +ifdef CONFIG_CC_IS_CLANG +cc-param = -mllvm -$(1) +else +cc-param = --param -$(1) +endif + +CFLAGS_KCSAN := -fsanitize=thread \ + $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) endif # CONFIG_KCSAN -- cgit v1.2.3-58-ga151 From 75d75b7a4d5489cc6a5e91ace306f6c13f376f33 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 21 May 2020 16:20:39 +0200 Subject: kcsan: Support distinguishing volatile accesses In the kernel, the "volatile" keyword is used in various concurrent contexts, whether in low-level synchronization primitives or for legacy reasons. If supported by the compiler, it will be assumed that aligned volatile accesses up to sizeof(long long) (matching compiletime_assert_rwonce_type()) are atomic. Recent versions of Clang [1] (GCC tentative [2]) can instrument volatile accesses differently. Add the option (required) to enable the instrumentation, and provide the necessary runtime functions. None of the updated compilers are widely available yet (Clang 11 will be the first release to support the feature). [1] https://github.com/llvm/llvm-project/commit/5a2c31116f412c3b6888be361137efd705e05814 [2] https://gcc.gnu.org/pipermail/gcc-patches/2020-April/544452.html This change allows removing of any explicit checks in primitives such as READ_ONCE() and WRITE_ONCE(). [ bp: Massage commit message a bit. ] Signed-off-by: Marco Elver Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Acked-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200521142047.169334-4-elver@google.com --- kernel/kcsan/core.c | 43 +++++++++++++++++++++++++++++++++++++++++++ scripts/Makefile.kcsan | 5 ++++- 2 files changed, 47 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index a73a66cf79df..15f67949d11e 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -789,6 +789,49 @@ void __tsan_write_range(void *ptr, size_t size) } EXPORT_SYMBOL(__tsan_write_range); +/* + * Use of explicit volatile is generally disallowed [1], however, volatile is + * still used in various concurrent context, whether in low-level + * synchronization primitives or for legacy reasons. + * [1] https://lwn.net/Articles/233479/ + * + * We only consider volatile accesses atomic if they are aligned and would pass + * the size-check of compiletime_assert_rwonce_type(). + */ +#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \ + void __tsan_volatile_read##size(void *ptr) \ + { \ + const bool is_atomic = size <= sizeof(long long) && \ + IS_ALIGNED((unsigned long)ptr, size); \ + if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \ + return; \ + check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \ + } \ + EXPORT_SYMBOL(__tsan_volatile_read##size); \ + void __tsan_unaligned_volatile_read##size(void *ptr) \ + __alias(__tsan_volatile_read##size); \ + EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \ + void __tsan_volatile_write##size(void *ptr) \ + { \ + const bool is_atomic = size <= sizeof(long long) && \ + IS_ALIGNED((unsigned long)ptr, size); \ + if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \ + return; \ + check_access(ptr, size, \ + KCSAN_ACCESS_WRITE | \ + (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \ + } \ + EXPORT_SYMBOL(__tsan_volatile_write##size); \ + void __tsan_unaligned_volatile_write##size(void *ptr) \ + __alias(__tsan_volatile_write##size); \ + EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size) + +DEFINE_TSAN_VOLATILE_READ_WRITE(1); +DEFINE_TSAN_VOLATILE_READ_WRITE(2); +DEFINE_TSAN_VOLATILE_READ_WRITE(4); +DEFINE_TSAN_VOLATILE_READ_WRITE(8); +DEFINE_TSAN_VOLATILE_READ_WRITE(16); + /* * The below are not required by KCSAN, but can still be emitted by the * compiler. diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan index 20337a7ecf54..75d2942b9437 100644 --- a/scripts/Makefile.kcsan +++ b/scripts/Makefile.kcsan @@ -9,7 +9,10 @@ else cc-param = --param -$(1) endif +# Keep most options here optional, to allow enabling more compilers if absence +# of some options does not break KCSAN nor causes false positive reports. CFLAGS_KCSAN := -fsanitize=thread \ - $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) + $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \ + $(call cc-param,tsan-distinguish-volatile=1) endif # CONFIG_KCSAN -- cgit v1.2.3-58-ga151 From d31d4d6bb256c3e784709e4aaf075de87c3390fc Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 21 May 2020 16:20:40 +0200 Subject: kcsan: Pass option tsan-instrument-read-before-write to Clang Clang (unlike GCC) removes reads before writes with matching addresses in the same basic block. This is an optimization for TSAN, since writes will always cause conflict if the preceding read would have. However, for KCSAN we cannot rely on this option, because we apply several special rules to writes, in particular when the KCSAN_ASSUME_PLAIN_WRITES_ATOMIC option is selected. To avoid missing potential data races, pass the -tsan-instrument-read-before-write option to Clang if it is available [1]. [1] https://github.com/llvm/llvm-project/commit/151ed6aa38a3ec6c01973b35f684586b6e1c0f7e Signed-off-by: Marco Elver Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Link: https://lkml.kernel.org/r/20200521142047.169334-5-elver@google.com --- scripts/Makefile.kcsan | 1 + 1 file changed, 1 insertion(+) (limited to 'scripts') diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan index 75d2942b9437..bd4da1af5953 100644 --- a/scripts/Makefile.kcsan +++ b/scripts/Makefile.kcsan @@ -13,6 +13,7 @@ endif # of some options does not break KCSAN nor causes false positive reports. CFLAGS_KCSAN := -fsanitize=thread \ $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \ + $(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \ $(call cc-param,tsan-distinguish-volatile=1) endif # CONFIG_KCSAN -- cgit v1.2.3-58-ga151