diff options
author | Mark Rutland <mark.rutland@arm.com> | 2023-06-05 08:01:17 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2023-06-05 09:57:21 +0200 |
commit | 9257959a6e5b4fca6fc8e985790bff62c2046f20 (patch) | |
tree | 1b12bc04830f3339277701a6406e555db6163128 /scripts/atomic/fallbacks | |
parent | 1815da1718aa4c062b94cf3fc09432f552e25768 (diff) |
locking/atomic: scripts: restructure fallback ifdeffery
Currently the various ordering variants of an atomic operation are
defined in groups of full/acquire/release/relaxed ordering variants with
some shared ifdeffery and several potential definitions of each ordering
variant in different branches of the shared ifdeffery.
As an ordering variant can have several potential definitions down
different branches of the shared ifdeffery, it can be painful for a
human to find a relevant definition, and we don't have a good location
to place anything common to all definitions of an ordering variant (e.g.
kerneldoc).
Historically the grouping of full/acquire/release/relaxed ordering
variants was necessary as we filled in the missing atomics in the same
namespace as the architecture used. It would be easy to accidentally
define one ordering fallback in terms of another ordering fallback with
redundant barriers, and avoiding that would otherwise require a lot of
baroque ifdeffery.
With recent changes we no longer need to fill in the missing atomics in
the arch_atomic*_<op>() namespace, and only need to fill in the
raw_atomic*_<op>() namespace. Due to this, there's no risk of a
namespace collision, and we can define each raw_atomic*_<op> ordering
variant with its own ifdeffery checking for the arch_atomic*_<op>
ordering variants.
Restructure the fallbacks in this way, with each ordering variant having
its own ifdeffery of the form:
| #if defined(arch_atomic_fetch_andnot_acquire)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| __atomic_acquire_fence();
| return ret;
| }
| #elif defined(arch_atomic_fetch_andnot)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
| #else
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| return raw_atomic_fetch_and_acquire(~i, v);
| }
| #endif
Note that where there's no relevant arch_atomic*_<op>() ordering
variant, we'll define the operation in terms of a distinct
raw_atomic*_<otherop>(), as this itself might have been filled in with a
fallback.
As we now generate the raw_atomic*_<op>() implementations directly, we
no longer need the trivial wrappers, so they are removed.
This makes the ifdeffery easier to follow, and will allow for further
improvements in subsequent patches.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
Diffstat (limited to 'scripts/atomic/fallbacks')
-rwxr-xr-x | scripts/atomic/fallbacks/acquire | 2 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/add_negative | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/add_unless | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/andnot | 4 | ||||
-rw-r--r-- | scripts/atomic/fallbacks/cmpxchg | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/dec | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/dec_and_test | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/dec_if_positive | 6 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/dec_unless_positive | 6 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/fence | 2 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/fetch_add_unless | 6 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/inc | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/inc_and_test | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/inc_not_zero | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/inc_unless_negative | 6 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/read_acquire | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/release | 2 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/set_release | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/sub_and_test | 4 | ||||
-rwxr-xr-x | scripts/atomic/fallbacks/try_cmpxchg | 4 | ||||
-rw-r--r-- | scripts/atomic/fallbacks/xchg | 4 |
21 files changed, 43 insertions, 43 deletions
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire index ef764085c79a..b0f732a5c46e 100755 --- a/scripts/atomic/fallbacks/acquire +++ b/scripts/atomic/fallbacks/acquire @@ -1,6 +1,6 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}_acquire(${params}) +raw_${atomic}_${pfx}${name}${sfx}_acquire(${params}) { ${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args}); __atomic_acquire_fence(); diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative index d0bd2dfbb244..16876118019e 100755 --- a/scripts/atomic/fallbacks/add_negative +++ b/scripts/atomic/fallbacks/add_negative @@ -1,7 +1,7 @@ cat <<EOF static __always_inline bool -arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v) +raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v) { - return arch_${atomic}_add_return${order}(i, v) < 0; + return raw_${atomic}_add_return${order}(i, v) < 0; } EOF diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless index cf79b9da38db..88593e28b163 100755 --- a/scripts/atomic/fallbacks/add_unless +++ b/scripts/atomic/fallbacks/add_unless @@ -1,7 +1,7 @@ cat << EOF static __always_inline bool -arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) +raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) { - return arch_${atomic}_fetch_add_unless(v, a, u) != u; + return raw_${atomic}_fetch_add_unless(v, a, u) != u; } EOF diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot index 5a42f54a3595..5b83bb63f728 100755 --- a/scripts/atomic/fallbacks/andnot +++ b/scripts/atomic/fallbacks/andnot @@ -1,7 +1,7 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) +raw_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) { - ${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v); + ${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v); } EOF diff --git a/scripts/atomic/fallbacks/cmpxchg b/scripts/atomic/fallbacks/cmpxchg index 87cd010f98d5..312ee67f1743 100644 --- a/scripts/atomic/fallbacks/cmpxchg +++ b/scripts/atomic/fallbacks/cmpxchg @@ -1,7 +1,7 @@ cat <<EOF static __always_inline ${int} -arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new) +raw_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new) { - return arch_cmpxchg${order}(&v->counter, old, new); + return raw_cmpxchg${order}(&v->counter, old, new); } EOF diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec index 8c144c818e9e..a660ac65994b 100755 --- a/scripts/atomic/fallbacks/dec +++ b/scripts/atomic/fallbacks/dec @@ -1,7 +1,7 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) +raw_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) { - ${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v); + ${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v); } EOF diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test index 3f6b6a8b4773..521dfcae03f2 100755 --- a/scripts/atomic/fallbacks/dec_and_test +++ b/scripts/atomic/fallbacks/dec_and_test @@ -1,7 +1,7 @@ cat <<EOF static __always_inline bool -arch_${atomic}_dec_and_test(${atomic}_t *v) +raw_${atomic}_dec_and_test(${atomic}_t *v) { - return arch_${atomic}_dec_return(v) == 0; + return raw_${atomic}_dec_return(v) == 0; } EOF diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive index 86bdced3428d..7acb205e6ce3 100755 --- a/scripts/atomic/fallbacks/dec_if_positive +++ b/scripts/atomic/fallbacks/dec_if_positive @@ -1,14 +1,14 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_dec_if_positive(${atomic}_t *v) +raw_${atomic}_dec_if_positive(${atomic}_t *v) { - ${int} dec, c = arch_${atomic}_read(v); + ${int} dec, c = raw_${atomic}_read(v); do { dec = c - 1; if (unlikely(dec < 0)) break; - } while (!arch_${atomic}_try_cmpxchg(v, &c, dec)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, dec)); return dec; } diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive index c531d5afecc4..bcb4f27945ea 100755 --- a/scripts/atomic/fallbacks/dec_unless_positive +++ b/scripts/atomic/fallbacks/dec_unless_positive @@ -1,13 +1,13 @@ cat <<EOF static __always_inline bool -arch_${atomic}_dec_unless_positive(${atomic}_t *v) +raw_${atomic}_dec_unless_positive(${atomic}_t *v) { - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c > 0)) return false; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1)); return true; } diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence index 07757d8e338e..067eea553f5e 100755 --- a/scripts/atomic/fallbacks/fence +++ b/scripts/atomic/fallbacks/fence @@ -1,6 +1,6 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}(${params}) +raw_${atomic}_${pfx}${name}${sfx}(${params}) { ${ret} ret; __atomic_pre_full_fence(); diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless index 81d2834f03d2..c18b940153df 100755 --- a/scripts/atomic/fallbacks/fetch_add_unless +++ b/scripts/atomic/fallbacks/fetch_add_unless @@ -1,13 +1,13 @@ cat << EOF static __always_inline ${int} -arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) +raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) { - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c == u)) break; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c + a)); return c; } diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc index 3c2c3739169e..7d838f0b6639 100755 --- a/scripts/atomic/fallbacks/inc +++ b/scripts/atomic/fallbacks/inc @@ -1,7 +1,7 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) +raw_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) { - ${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v); + ${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v); } EOF diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test index c726a6d0634d..de25aebee715 100755 --- a/scripts/atomic/fallbacks/inc_and_test +++ b/scripts/atomic/fallbacks/inc_and_test @@ -1,7 +1,7 @@ cat <<EOF static __always_inline bool -arch_${atomic}_inc_and_test(${atomic}_t *v) +raw_${atomic}_inc_and_test(${atomic}_t *v) { - return arch_${atomic}_inc_return(v) == 0; + return raw_${atomic}_inc_return(v) == 0; } EOF diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero index 97603591aac2..e02206d017f6 100755 --- a/scripts/atomic/fallbacks/inc_not_zero +++ b/scripts/atomic/fallbacks/inc_not_zero @@ -1,7 +1,7 @@ cat <<EOF static __always_inline bool -arch_${atomic}_inc_not_zero(${atomic}_t *v) +raw_${atomic}_inc_not_zero(${atomic}_t *v) { - return arch_${atomic}_add_unless(v, 1, 0); + return raw_${atomic}_add_unless(v, 1, 0); } EOF diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative index 95d8ce48233f..7b85cc5b00d2 100755 --- a/scripts/atomic/fallbacks/inc_unless_negative +++ b/scripts/atomic/fallbacks/inc_unless_negative @@ -1,13 +1,13 @@ cat <<EOF static __always_inline bool -arch_${atomic}_inc_unless_negative(${atomic}_t *v) +raw_${atomic}_inc_unless_negative(${atomic}_t *v) { - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c < 0)) return false; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1)); return true; } diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire index a0ea1d26e6b2..26d15ad92d04 100755 --- a/scripts/atomic/fallbacks/read_acquire +++ b/scripts/atomic/fallbacks/read_acquire @@ -1,13 +1,13 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_read_acquire(const ${atomic}_t *v) +raw_${atomic}_read_acquire(const ${atomic}_t *v) { ${int} ret; if (__native_word(${atomic}_t)) { ret = smp_load_acquire(&(v)->counter); } else { - ret = arch_${atomic}_read(v); + ret = raw_${atomic}_read(v); __atomic_acquire_fence(); } diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release index b46feb56d69c..cbbff708129b 100755 --- a/scripts/atomic/fallbacks/release +++ b/scripts/atomic/fallbacks/release @@ -1,6 +1,6 @@ cat <<EOF static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}_release(${params}) +raw_${atomic}_${pfx}${name}${sfx}_release(${params}) { __atomic_release_fence(); ${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args}); diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release index 05cdb7f42477..104693bc3c66 100755 --- a/scripts/atomic/fallbacks/set_release +++ b/scripts/atomic/fallbacks/set_release @@ -1,12 +1,12 @@ cat <<EOF static __always_inline void -arch_${atomic}_set_release(${atomic}_t *v, ${int} i) +raw_${atomic}_set_release(${atomic}_t *v, ${int} i) { if (__native_word(${atomic}_t)) { smp_store_release(&(v)->counter, i); } else { __atomic_release_fence(); - arch_${atomic}_set(v, i); + raw_${atomic}_set(v, i); } } EOF diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test index da8a049c9b02..8975a496d495 100755 --- a/scripts/atomic/fallbacks/sub_and_test +++ b/scripts/atomic/fallbacks/sub_and_test @@ -1,7 +1,7 @@ cat <<EOF static __always_inline bool -arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v) +raw_${atomic}_sub_and_test(${int} i, ${atomic}_t *v) { - return arch_${atomic}_sub_return(i, v) == 0; + return raw_${atomic}_sub_return(i, v) == 0; } EOF diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg index 890f850ede37..4c911a6cced9 100755 --- a/scripts/atomic/fallbacks/try_cmpxchg +++ b/scripts/atomic/fallbacks/try_cmpxchg @@ -1,9 +1,9 @@ cat <<EOF static __always_inline bool -arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) +raw_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) { ${int} r, o = *old; - r = arch_${atomic}_cmpxchg${order}(v, o, new); + r = raw_${atomic}_cmpxchg${order}(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); diff --git a/scripts/atomic/fallbacks/xchg b/scripts/atomic/fallbacks/xchg index 733b8980b2f3..bdd788aa575f 100644 --- a/scripts/atomic/fallbacks/xchg +++ b/scripts/atomic/fallbacks/xchg @@ -1,7 +1,7 @@ cat <<EOF static __always_inline ${int} -arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new) +raw_${atomic}_xchg${order}(${atomic}_t *v, ${int} new) { - return arch_xchg${order}(&v->counter, new); + return raw_xchg${order}(&v->counter, new); } EOF |